< prev index next >

src/hotspot/share/gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.cpp

Print this page




  55                                                                          size_t actual_free) {
  56   size_t garbage_threshold = ShenandoahHeapRegion::region_size_bytes() * ShenandoahGarbageThreshold / 100;
  57 
  58   // The logic for cset selection in adaptive is as follows:
  59   //
  60   //   1. We cannot get cset larger than available free space. Otherwise we guarantee OOME
  61   //      during evacuation, and thus guarantee full GC. In practice, we also want to let
  62   //      application to allocate something. This is why we limit CSet to some fraction of
  63   //      available space. In non-overloaded heap, max_cset would contain all plausible candidates
  64   //      over garbage threshold.
  65   //
  66   //   2. We should not get cset too low so that free threshold would not be met right
  67   //      after the cycle. Otherwise we get back-to-back cycles for no reason if heap is
  68   //      too fragmented. In non-overloaded non-fragmented heap min_garbage would be around zero.
  69   //
  70   // Therefore, we start by sorting the regions by garbage. Then we unconditionally add the best candidates
  71   // before we meet min_garbage. Then we add all candidates that fit with a garbage threshold before
  72   // we hit max_cset. When max_cset is hit, we terminate the cset selection. Note that in this scheme,
  73   // ShenandoahGarbageThreshold is the soft threshold which would be ignored until min_garbage is hit.
  74 
  75   size_t capacity    = ShenandoahHeap::heap()->max_capacity();
  76   size_t free_target = ShenandoahMinFreeThreshold * capacity / 100;
  77   size_t min_garbage = free_target > actual_free ? (free_target - actual_free) : 0;
  78   size_t max_cset    = (size_t)(1.0 * ShenandoahEvacReserve * capacity / 100 / ShenandoahEvacWaste);
  79 
  80   log_info(gc, ergo)("Adaptive CSet Selection. Target Free: " SIZE_FORMAT "M, Actual Free: "
  81                      SIZE_FORMAT "M, Max CSet: " SIZE_FORMAT "M, Min Garbage: " SIZE_FORMAT "M",
  82                      free_target / M, actual_free / M, max_cset / M, min_garbage / M);
  83 
  84   // Better select garbage-first regions
  85   QuickSort::sort<RegionData>(data, (int)size, compare_by_garbage, false);
  86 
  87   size_t cur_cset = 0;
  88   size_t cur_garbage = 0;
  89   _bytes_in_cset = 0;
  90 
  91   for (size_t idx = 0; idx < size; idx++) {
  92     ShenandoahHeapRegion* r = data[idx]._region;
  93 
  94     size_t new_cset    = cur_cset + r->get_live_data_bytes();
  95     size_t new_garbage = cur_garbage + r->garbage();


 106     }
 107   }
 108 }
 109 
 110 void ShenandoahAdaptiveHeuristics::record_cycle_start() {
 111   ShenandoahHeuristics::record_cycle_start();
 112   double last_cycle_gap = (_cycle_start - _last_cycle_end);
 113   _cycle_gap_history->add(last_cycle_gap);
 114 }
 115 
 116 void ShenandoahAdaptiveHeuristics::record_phase_time(ShenandoahPhaseTimings::Phase phase, double secs) {
 117   if (phase == ShenandoahPhaseTimings::conc_mark) {
 118     _conc_mark_duration_history->add(secs);
 119   } else if (phase == ShenandoahPhaseTimings::conc_update_refs) {
 120     _conc_uprefs_duration_history->add(secs);
 121   } // Else ignore
 122 }
 123 
 124 bool ShenandoahAdaptiveHeuristics::should_start_normal_gc() const {
 125   ShenandoahHeap* heap = ShenandoahHeap::heap();
 126   size_t capacity = heap->max_capacity();
 127   size_t available = heap->free_set()->available();
 128 
 129   // Check if we are falling below the worst limit, time to trigger the GC, regardless of
 130   // anything else.
 131   size_t min_threshold = ShenandoahMinFreeThreshold * heap->max_capacity() / 100;
 132   if (available < min_threshold) {
 133     log_info(gc)("Trigger: Free (" SIZE_FORMAT "M) is below minimum threshold (" SIZE_FORMAT "M)",
 134                  available / M, min_threshold / M);
 135     return true;
 136   }
 137 
 138   // Check if are need to learn a bit about the application
 139   const size_t max_learn = ShenandoahLearningSteps;
 140   if (_gc_times_learned < max_learn) {
 141     size_t init_threshold = ShenandoahInitFreeThreshold * heap->max_capacity() / 100;
 142     if (available < init_threshold) {
 143       log_info(gc)("Trigger: Learning " SIZE_FORMAT " of " SIZE_FORMAT ". Free (" SIZE_FORMAT "M) is below initial threshold (" SIZE_FORMAT "M)",
 144                    _gc_times_learned + 1, max_learn, available / M, init_threshold / M);
 145       return true;
 146     }
 147   }
 148 
 149   // Check if allocation headroom is still okay. This also factors in:
 150   //   1. Some space to absorb allocation spikes
 151   //   2. Accumulated penalties from Degenerated and Full GC
 152 
 153   size_t allocation_headroom = available;
 154 
 155   size_t spike_headroom = ShenandoahAllocSpikeFactor * capacity / 100;
 156   size_t penalties      = _gc_time_penalties         * capacity / 100;
 157 
 158   allocation_headroom -= MIN2(allocation_headroom, spike_headroom);
 159   allocation_headroom -= MIN2(allocation_headroom, penalties);
 160 
 161   // TODO: Allocation rate is way too averaged to be useful during state changes




  55                                                                          size_t actual_free) {
  56   size_t garbage_threshold = ShenandoahHeapRegion::region_size_bytes() * ShenandoahGarbageThreshold / 100;
  57 
  58   // The logic for cset selection in adaptive is as follows:
  59   //
  60   //   1. We cannot get cset larger than available free space. Otherwise we guarantee OOME
  61   //      during evacuation, and thus guarantee full GC. In practice, we also want to let
  62   //      application to allocate something. This is why we limit CSet to some fraction of
  63   //      available space. In non-overloaded heap, max_cset would contain all plausible candidates
  64   //      over garbage threshold.
  65   //
  66   //   2. We should not get cset too low so that free threshold would not be met right
  67   //      after the cycle. Otherwise we get back-to-back cycles for no reason if heap is
  68   //      too fragmented. In non-overloaded non-fragmented heap min_garbage would be around zero.
  69   //
  70   // Therefore, we start by sorting the regions by garbage. Then we unconditionally add the best candidates
  71   // before we meet min_garbage. Then we add all candidates that fit with a garbage threshold before
  72   // we hit max_cset. When max_cset is hit, we terminate the cset selection. Note that in this scheme,
  73   // ShenandoahGarbageThreshold is the soft threshold which would be ignored until min_garbage is hit.
  74 
  75   size_t capacity    = ShenandoahHeap::heap()->capacity();
  76   size_t free_target = ShenandoahMinFreeThreshold * capacity / 100;
  77   size_t min_garbage = free_target > actual_free ? (free_target - actual_free) : 0;
  78   size_t max_cset    = (size_t)(1.0 * ShenandoahEvacReserve * capacity / 100 / ShenandoahEvacWaste);
  79 
  80   log_info(gc, ergo)("Adaptive CSet Selection. Target Free: " SIZE_FORMAT "M, Actual Free: "
  81                      SIZE_FORMAT "M, Max CSet: " SIZE_FORMAT "M, Min Garbage: " SIZE_FORMAT "M",
  82                      free_target / M, actual_free / M, max_cset / M, min_garbage / M);
  83 
  84   // Better select garbage-first regions
  85   QuickSort::sort<RegionData>(data, (int)size, compare_by_garbage, false);
  86 
  87   size_t cur_cset = 0;
  88   size_t cur_garbage = 0;
  89   _bytes_in_cset = 0;
  90 
  91   for (size_t idx = 0; idx < size; idx++) {
  92     ShenandoahHeapRegion* r = data[idx]._region;
  93 
  94     size_t new_cset    = cur_cset + r->get_live_data_bytes();
  95     size_t new_garbage = cur_garbage + r->garbage();


 106     }
 107   }
 108 }
 109 
 110 void ShenandoahAdaptiveHeuristics::record_cycle_start() {
 111   ShenandoahHeuristics::record_cycle_start();
 112   double last_cycle_gap = (_cycle_start - _last_cycle_end);
 113   _cycle_gap_history->add(last_cycle_gap);
 114 }
 115 
 116 void ShenandoahAdaptiveHeuristics::record_phase_time(ShenandoahPhaseTimings::Phase phase, double secs) {
 117   if (phase == ShenandoahPhaseTimings::conc_mark) {
 118     _conc_mark_duration_history->add(secs);
 119   } else if (phase == ShenandoahPhaseTimings::conc_update_refs) {
 120     _conc_uprefs_duration_history->add(secs);
 121   } // Else ignore
 122 }
 123 
 124 bool ShenandoahAdaptiveHeuristics::should_start_normal_gc() const {
 125   ShenandoahHeap* heap = ShenandoahHeap::heap();
 126   size_t capacity = heap->capacity();
 127   size_t available = heap->free_set()->available();
 128 
 129   // Check if we are falling below the worst limit, time to trigger the GC, regardless of
 130   // anything else.
 131   size_t min_threshold = ShenandoahMinFreeThreshold * heap->capacity() / 100;
 132   if (available < min_threshold) {
 133     log_info(gc)("Trigger: Free (" SIZE_FORMAT "M) is below minimum threshold (" SIZE_FORMAT "M)",
 134                  available / M, min_threshold / M);
 135     return true;
 136   }
 137 
 138   // Check if are need to learn a bit about the application
 139   const size_t max_learn = ShenandoahLearningSteps;
 140   if (_gc_times_learned < max_learn) {
 141     size_t init_threshold = ShenandoahInitFreeThreshold * heap->capacity() / 100;
 142     if (available < init_threshold) {
 143       log_info(gc)("Trigger: Learning " SIZE_FORMAT " of " SIZE_FORMAT ". Free (" SIZE_FORMAT "M) is below initial threshold (" SIZE_FORMAT "M)",
 144                    _gc_times_learned + 1, max_learn, available / M, init_threshold / M);
 145       return true;
 146     }
 147   }
 148 
 149   // Check if allocation headroom is still okay. This also factors in:
 150   //   1. Some space to absorb allocation spikes
 151   //   2. Accumulated penalties from Degenerated and Full GC
 152 
 153   size_t allocation_headroom = available;
 154 
 155   size_t spike_headroom = ShenandoahAllocSpikeFactor * capacity / 100;
 156   size_t penalties      = _gc_time_penalties         * capacity / 100;
 157 
 158   allocation_headroom -= MIN2(allocation_headroom, spike_headroom);
 159   allocation_headroom -= MIN2(allocation_headroom, penalties);
 160 
 161   // TODO: Allocation rate is way too averaged to be useful during state changes


< prev index next >