1 /*
  2  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 
 27 #include "gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp"
 28 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
 29 #include "gc/shenandoah/shenandoahHeap.hpp"
 30 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
 31 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
 32 #include "utilities/quickSort.hpp"
 33 
 34 #define BYTES_FORMAT    SIZE_FORMAT "%s"
 35 #define FORMAT_BYTES(b) byte_size_in_proper_unit(b), proper_unit_for_byte_size(b)
 36 
 37 uint ShenandoahOldHeuristics::NOT_FOUND = -1U;
 38 
 39 // sort by increasing live (so least live comes first)
 40 int ShenandoahOldHeuristics::compare_by_live(RegionData a, RegionData b) {
 41   if (a._u._live_data < b._u._live_data)
 42     return -1;
 43   else if (a._u._live_data > b._u._live_data)
 44     return 1;
 45   else return 0;
 46 }
 47 
 48 ShenandoahOldHeuristics::ShenandoahOldHeuristics(ShenandoahOldGeneration* generation) :
 49   ShenandoahHeuristics(generation),
 50   _first_pinned_candidate(NOT_FOUND),
 51   _last_old_collection_candidate(0),
 52   _next_old_collection_candidate(0),
 53   _last_old_region(0),
 54   _live_bytes_in_unprocessed_candidates(0),
 55   _old_generation(generation),
 56   _cannot_expand_trigger(false),
 57   _fragmentation_trigger(false),
 58   _growth_trigger(false) {
 59 }
 60 
 61 bool ShenandoahOldHeuristics::prime_collection_set(ShenandoahCollectionSet* collection_set) {
 62   ShenandoahHeap* heap = ShenandoahHeap::heap();
 63   if (unprocessed_old_collection_candidates() == 0) {
 64     return false;
 65   }
 66 
 67   _first_pinned_candidate = NOT_FOUND;
 68 
 69   uint included_old_regions = 0;
 70   size_t evacuated_old_bytes = 0;
 71   size_t collected_old_bytes = 0;
 72 
 73   // If a region is put into the collection set, then this region's free (not yet used) bytes are no longer
 74   // "available" to hold the results of other evacuations.  This may cause a decrease in the remaining amount
 75   // of memory that can still be evacuated.  We address this by reducing the evacuation budget by the amount
 76   // of live memory in that region and by the amount of unallocated memory in that region if the evacuation
 77   // budget is constrained by availability of free memory.
 78   size_t old_evacuation_budget = (size_t) ((double) heap->get_old_evac_reserve() / ShenandoahOldEvacWaste);
 79   size_t unfragmented_available = _old_generation->free_unaffiliated_regions() * ShenandoahHeapRegion::region_size_bytes();
 80   size_t fragmented_available;
 81   size_t excess_fragmented_available;
 82 
 83   if (unfragmented_available > old_evacuation_budget) {
 84     unfragmented_available = old_evacuation_budget;
 85     fragmented_available = 0;
 86     excess_fragmented_available = 0;
 87   } else {
 88     assert(_old_generation->available() >= old_evacuation_budget, "Cannot budget more than is available");
 89     fragmented_available = _old_generation->available() - unfragmented_available;
 90     assert(fragmented_available + unfragmented_available >= old_evacuation_budget, "Budgets do not add up");
 91     if (fragmented_available + unfragmented_available > old_evacuation_budget) {
 92       excess_fragmented_available = (fragmented_available + unfragmented_available) - old_evacuation_budget;
 93       fragmented_available -= excess_fragmented_available;
 94     }
 95   }
 96 
 97   size_t remaining_old_evacuation_budget = old_evacuation_budget;
 98   log_info(gc)("Choose old regions for mixed collection: old evacuation budget: " SIZE_FORMAT "%s, candidates: %u",
 99                byte_size_in_proper_unit(old_evacuation_budget), proper_unit_for_byte_size(old_evacuation_budget),
100                unprocessed_old_collection_candidates());
101 
102   size_t lost_evacuation_capacity = 0;
103 
104   // The number of old-gen regions that were selected as candidates for collection at the end of the most recent old-gen
105   // concurrent marking phase and have not yet been collected is represented by unprocessed_old_collection_candidates().
106   // Candidate regions are ordered according to increasing amount of live data.  If there is not sufficient room to
107   // evacuate region N, then there is no need to even consider evacuating region N+1.
108   while (unprocessed_old_collection_candidates() > 0) {
109     // Old collection candidates are sorted in order of decreasing garbage contained therein.
110     ShenandoahHeapRegion* r = next_old_collection_candidate();
111     if (r == nullptr) {
112       break;
113     }
114 
115     // If region r is evacuated to fragmented memory (to free memory within a partially used region), then we need
116     // to decrease the capacity of the fragmented memory by the scaled loss.
117 
118     size_t live_data_for_evacuation = r->get_live_data_bytes();
119     size_t lost_available = r->free();
120 
121     if ((lost_available > 0) && (excess_fragmented_available > 0)) {
122       if (lost_available < excess_fragmented_available) {
123         excess_fragmented_available -= lost_available;
124         lost_evacuation_capacity -= lost_available;
125         lost_available  = 0;
126       } else {
127         lost_available -= excess_fragmented_available;
128         lost_evacuation_capacity -= excess_fragmented_available;
129         excess_fragmented_available = 0;
130       }
131     }
132     size_t scaled_loss = (size_t) ((double) lost_available / ShenandoahOldEvacWaste);
133     if ((lost_available > 0) && (fragmented_available > 0)) {
134       if (scaled_loss + live_data_for_evacuation < fragmented_available) {
135         fragmented_available -= scaled_loss;
136         scaled_loss = 0;
137       } else {
138         // We will have to allocate this region's evacuation memory from unfragmented memory, so don't bother
139         // to decrement scaled_loss
140       }
141     }
142     if (scaled_loss > 0) {
143       // We were not able to account for the lost free memory within fragmented memory, so we need to take this
144       // allocation out of unfragmented memory.  Unfragmented memory does not need to account for loss of free.
145       if (live_data_for_evacuation > unfragmented_available) {
146         // There is not room to evacuate this region or any that come after it in within the candidates array.
147         break;
148       } else {
149         unfragmented_available -= live_data_for_evacuation;
150       }
151     } else {
152       // Since scaled_loss == 0, we have accounted for the loss of free memory, so we can allocate from either
153       // fragmented or unfragmented available memory.  Use up the fragmented memory budget first.
154       size_t evacuation_need = live_data_for_evacuation;
155 
156       if (evacuation_need > fragmented_available) {
157         evacuation_need -= fragmented_available;
158         fragmented_available = 0;
159       } else {
160         fragmented_available -= evacuation_need;
161         evacuation_need = 0;
162       }
163       if (evacuation_need > unfragmented_available) {
164         // There is not room to evacuate this region or any that come after it in within the candidates array.
165         break;
166       } else {
167         unfragmented_available -= evacuation_need;
168         // dead code: evacuation_need == 0;
169       }
170     }
171     collection_set->add_region(r);
172     included_old_regions++;
173     evacuated_old_bytes += live_data_for_evacuation;
174     collected_old_bytes += r->garbage();
175     consume_old_collection_candidate();
176   }
177 
178   if (_first_pinned_candidate != NOT_FOUND) {
179     // Need to deal with pinned regions
180     slide_pinned_regions_to_front();
181   }
182   decrease_unprocessed_old_collection_candidates_live_memory(evacuated_old_bytes);
183   if (included_old_regions > 0) {
184     log_info(gc)("Old-gen piggyback evac (" UINT32_FORMAT " regions, evacuating " SIZE_FORMAT "%s, reclaiming: " SIZE_FORMAT "%s)",
185                  included_old_regions,
186                  byte_size_in_proper_unit(evacuated_old_bytes), proper_unit_for_byte_size(evacuated_old_bytes),
187                  byte_size_in_proper_unit(collected_old_bytes), proper_unit_for_byte_size(collected_old_bytes));
188   }
189 
190   if (unprocessed_old_collection_candidates() == 0) {
191     // We have added the last of our collection candidates to a mixed collection.
192     // Any triggers that occurred during mixed evacuations may no longer be valid.  They can retrigger if appropriate.
193     clear_triggers();
194     _old_generation->transition_to(ShenandoahOldGeneration::IDLE);
195   } else if (included_old_regions == 0) {
196     // We have candidates, but none were included for evacuation - are they all pinned?
197     // or did we just not have enough room for any of them in this collection set?
198     // We don't want a region with a stuck pin to prevent subsequent old collections, so
199     // if they are all pinned we transition to a state that will allow us to make these uncollected
200     // (pinned) regions parseable.
201     if (all_candidates_are_pinned()) {
202       log_info(gc)("All candidate regions " UINT32_FORMAT " are pinned", unprocessed_old_collection_candidates());
203       _old_generation->transition_to(ShenandoahOldGeneration::WAITING_FOR_FILL);
204     } else {
205       log_info(gc)("No regions selected for mixed collection. "
206                    "Old evacuation budget: " BYTES_FORMAT ", Remaining evacuation budget: " BYTES_FORMAT
207                    ", Lost capacity: " BYTES_FORMAT
208                    ", Next candidate: " UINT32_FORMAT ", Last candidate: " UINT32_FORMAT,
209                    FORMAT_BYTES(heap->get_old_evac_reserve()),
210                    FORMAT_BYTES(remaining_old_evacuation_budget),
211                    FORMAT_BYTES(lost_evacuation_capacity),
212                    _next_old_collection_candidate, _last_old_collection_candidate);
213     }
214   }
215 
216   return (included_old_regions > 0);
217 }
218 
219 bool ShenandoahOldHeuristics::all_candidates_are_pinned() {
220 #ifdef ASSERT
221   if (uint(os::random()) % 100 < ShenandoahCoalesceChance) {
222     return true;
223   }
224 #endif
225 
226   for (uint i = _next_old_collection_candidate; i < _last_old_collection_candidate; ++i) {
227     ShenandoahHeapRegion* region = _region_data[i]._region;
228     if (!region->is_pinned()) {
229       return false;
230     }
231   }
232   return true;
233 }
234 
235 void ShenandoahOldHeuristics::slide_pinned_regions_to_front() {
236   // Find the first unpinned region to the left of the next region that
237   // will be added to the collection set. These regions will have been
238   // added to the cset, so we can use them to hold pointers to regions
239   // that were pinned when the cset was chosen.
240   // [ r p r p p p r r ]
241   //     ^         ^ ^
242   //     |         | | pointer to next region to add to a mixed collection is here.
243   //     |         | first r to the left should be in the collection set now.
244   //     | first pinned region, we don't need to look past this
245   uint write_index = NOT_FOUND;
246   for (uint search = _next_old_collection_candidate - 1; search > _first_pinned_candidate; --search) {
247     ShenandoahHeapRegion* region = _region_data[search]._region;
248     if (!region->is_pinned()) {
249       write_index = search;
250       assert(region->is_cset(), "Expected unpinned region to be added to the collection set.");
251       break;
252     }
253   }
254 
255   // If we could not find an unpinned region, it means there are no slots available
256   // to move up the pinned regions. In this case, we just reset our next index in the
257   // hopes that some of these regions will become unpinned before the next mixed
258   // collection. We may want to bailout of here instead, as it should be quite
259   // rare to have so many pinned regions and may indicate something is wrong.
260   if (write_index == NOT_FOUND) {
261     assert(_first_pinned_candidate != NOT_FOUND, "Should only be here if there are pinned regions.");
262     _next_old_collection_candidate = _first_pinned_candidate;
263     return;
264   }
265 
266   // Find pinned regions to the left and move their pointer into a slot
267   // that was pointing at a region that has been added to the cset (or was pointing
268   // to a pinned region that we've already moved up). We are done when the leftmost
269   // pinned region has been slid up.
270   // [ r p r x p p p r ]
271   //         ^       ^
272   //         |       | next region for mixed collections
273   //         | Write pointer is here. We know this region is already in the cset
274   //         | so we can clobber it with the next pinned region we find.
275   for (int32_t search = (int32_t)write_index - 1; search >= (int32_t)_first_pinned_candidate; --search) {
276     RegionData& skipped = _region_data[search];
277     if (skipped._region->is_pinned()) {
278       RegionData& available_slot = _region_data[write_index];
279       available_slot._region = skipped._region;
280       available_slot._u._live_data = skipped._u._live_data;
281       --write_index;
282     }
283   }
284 
285   // Update to read from the leftmost pinned region. Plus one here because we decremented
286   // the write index to hold the next found pinned region. We are just moving it back now
287   // to point to the first pinned region.
288   _next_old_collection_candidate = write_index + 1;
289 }
290 
291 void ShenandoahOldHeuristics::prepare_for_old_collections() {
292   ShenandoahHeap* heap = ShenandoahHeap::heap();
293 
294   size_t cand_idx = 0;
295   size_t total_garbage = 0;
296   size_t num_regions = heap->num_regions();
297   size_t immediate_garbage = 0;
298   size_t immediate_regions = 0;
299   size_t live_data = 0;
300 
301   RegionData* candidates = _region_data;
302   for (size_t i = 0; i < num_regions; i++) {
303     ShenandoahHeapRegion* region = heap->get_region(i);
304     if (!_old_generation->contains(region)) {
305       continue;
306     }
307 
308     size_t garbage = region->garbage();
309     size_t live_bytes = region->get_live_data_bytes();
310     total_garbage += garbage;
311     live_data += live_bytes;
312 
313     if (region->is_regular() || region->is_pinned()) {
314       if (!region->has_live()) {
315         assert(!region->is_pinned(), "Pinned region should have live (pinned) objects.");
316         region->make_trash_immediate();
317         immediate_regions++;
318         immediate_garbage += garbage;
319       } else {
320         region->begin_preemptible_coalesce_and_fill();
321         candidates[cand_idx]._region = region;
322         candidates[cand_idx]._u._live_data = live_bytes;
323         cand_idx++;
324       }
325     } else if (region->is_humongous_start()) {
326       if (!region->has_live()) {
327         // The humongous object is dead, we can just return this region and the continuations
328         // immediately to the freeset - no evacuations are necessary here. The continuations
329         // will be made into trash by this method, so they'll be skipped by the 'is_regular'
330         // check above, but we still need to count the start region.
331         immediate_regions++;
332         immediate_garbage += garbage;
333         size_t region_count = heap->trash_humongous_region_at(region);
334         log_debug(gc)("Trashed " SIZE_FORMAT " regions for humongous object.", region_count);
335       }
336     } else if (region->is_trash()) {
337       // Count humongous objects made into trash here.
338       immediate_regions++;
339       immediate_garbage += garbage;
340     }
341   }
342 
343   _old_generation->set_live_bytes_after_last_mark(live_data);
344 
345   // TODO: Consider not running mixed collects if we recovered some threshold percentage of memory from immediate garbage.
346   // This would be similar to young and global collections shortcutting evacuation, though we'd probably want a separate
347   // threshold for the old generation.
348 
349   // Unlike young, we are more interested in efficiently packing OLD-gen than in reclaiming garbage first.  We sort by live-data.
350   // Some regular regions may have been promoted in place with no garbage but also with very little live data.  When we "compact"
351   // old-gen, we want to pack these underutilized regions together so we can have more unaffiliated (unfragmented) free regions
352   // in old-gen.
353   QuickSort::sort<RegionData>(candidates, cand_idx, compare_by_live, false);
354 
355   // Any old-gen region that contains (ShenandoahOldGarbageThreshold (default value 25)% garbage or more is to be
356   // added to the list of candidates for subsequent mixed evacuations.
357   //
358   // TODO: allow ShenandoahOldGarbageThreshold to be determined adaptively, by heuristics.
359 
360   const size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
361 
362   // The convention is to collect regions that have more than this amount of garbage.
363   const size_t garbage_threshold = region_size_bytes * ShenandoahOldGarbageThreshold / 100;
364 
365   // Enlightened interpretation: collect regions that have less than this amount of live.
366   const size_t live_threshold = region_size_bytes - garbage_threshold;
367 
368   size_t candidates_garbage = 0;
369   _last_old_region = (uint)cand_idx;
370   _last_old_collection_candidate = (uint)cand_idx;
371   _next_old_collection_candidate = 0;
372 
373   size_t unfragmented = 0;
374 
375   for (size_t i = 0; i < cand_idx; i++) {
376     size_t live = candidates[i]._u._live_data;
377     if (live > live_threshold) {
378       // Candidates are sorted in increasing order of live data, so no regions after this will be below the threshold.
379       _last_old_collection_candidate = (uint)i;
380       break;
381     }
382     size_t region_garbage = candidates[i]._region->garbage();
383     size_t region_free = candidates[i]._region->free();
384     candidates_garbage += region_garbage;
385     unfragmented += region_free;
386   }
387 
388   // Note that we do not coalesce and fill occupied humongous regions
389   // HR: humongous regions, RR: regular regions, CF: coalesce and fill regions
390   size_t collectable_garbage = immediate_garbage + candidates_garbage;
391   size_t old_candidates = _last_old_collection_candidate;
392   size_t mixed_evac_live = old_candidates * region_size_bytes - (candidates_garbage + unfragmented);
393   set_unprocessed_old_collection_candidates_live_memory(mixed_evac_live);
394 
395   log_info(gc)("Old-Gen Collectable Garbage: " SIZE_FORMAT "%s "
396                "consolidated with free: " SIZE_FORMAT "%s, over " SIZE_FORMAT " regions, "
397                "Old-Gen Immediate Garbage: " SIZE_FORMAT "%s over " SIZE_FORMAT " regions.",
398                byte_size_in_proper_unit(collectable_garbage), proper_unit_for_byte_size(collectable_garbage),
399                byte_size_in_proper_unit(unfragmented),        proper_unit_for_byte_size(unfragmented), old_candidates,
400                byte_size_in_proper_unit(immediate_garbage),   proper_unit_for_byte_size(immediate_garbage), immediate_regions);
401 
402   if (unprocessed_old_collection_candidates() > 0) {
403     _old_generation->transition_to(ShenandoahOldGeneration::WAITING_FOR_EVAC);
404   } else if (has_coalesce_and_fill_candidates()) {
405     _old_generation->transition_to(ShenandoahOldGeneration::WAITING_FOR_FILL);
406   } else {
407     _old_generation->transition_to(ShenandoahOldGeneration::IDLE);
408   }
409 }
410 
411 size_t ShenandoahOldHeuristics::unprocessed_old_collection_candidates_live_memory() const {
412   return _live_bytes_in_unprocessed_candidates;
413 }
414 
415 void ShenandoahOldHeuristics::set_unprocessed_old_collection_candidates_live_memory(size_t initial_live) {
416   _live_bytes_in_unprocessed_candidates = initial_live;
417 }
418 
419 void ShenandoahOldHeuristics::decrease_unprocessed_old_collection_candidates_live_memory(size_t evacuated_live) {
420   assert(evacuated_live <= _live_bytes_in_unprocessed_candidates, "Cannot evacuate more than was present");
421   _live_bytes_in_unprocessed_candidates -= evacuated_live;
422 }
423 
424 // Used by unit test: test_shenandoahOldHeuristic.cpp
425 uint ShenandoahOldHeuristics::last_old_collection_candidate_index() const {
426   return _last_old_collection_candidate;
427 }
428 
429 uint ShenandoahOldHeuristics::unprocessed_old_collection_candidates() const {
430   return _last_old_collection_candidate - _next_old_collection_candidate;
431 }
432 
433 ShenandoahHeapRegion* ShenandoahOldHeuristics::next_old_collection_candidate() {
434   while (_next_old_collection_candidate < _last_old_collection_candidate) {
435     ShenandoahHeapRegion* next = _region_data[_next_old_collection_candidate]._region;
436     if (!next->is_pinned()) {
437       return next;
438     } else {
439       assert(next->is_pinned(), "sanity");
440       if (_first_pinned_candidate == NOT_FOUND) {
441         _first_pinned_candidate = _next_old_collection_candidate;
442       }
443     }
444 
445     _next_old_collection_candidate++;
446   }
447   return nullptr;
448 }
449 
450 void ShenandoahOldHeuristics::consume_old_collection_candidate() {
451   _next_old_collection_candidate++;
452 }
453 
454 unsigned int ShenandoahOldHeuristics::get_coalesce_and_fill_candidates(ShenandoahHeapRegion** buffer) {
455   uint end = _last_old_region;
456   uint index = _next_old_collection_candidate;
457   while (index < end) {
458     *buffer++ = _region_data[index++]._region;
459   }
460   return (_last_old_region - _next_old_collection_candidate);
461 }
462 
463 void ShenandoahOldHeuristics::abandon_collection_candidates() {
464   _last_old_collection_candidate = 0;
465   _next_old_collection_candidate = 0;
466   _last_old_region = 0;
467 }
468 
469 void ShenandoahOldHeuristics::record_cycle_end() {
470   this->ShenandoahHeuristics::record_cycle_end();
471   clear_triggers();
472 }
473 
474 void ShenandoahOldHeuristics::trigger_old_has_grown() {
475   _growth_trigger = true;
476 }
477 
478 
479 void ShenandoahOldHeuristics::clear_triggers() {
480   // Clear any triggers that were set during mixed evacuations.  Conditions may be different now that this phase has finished.
481   _cannot_expand_trigger = false;
482   _fragmentation_trigger = false;
483   _growth_trigger = false;
484  }
485 
486 bool ShenandoahOldHeuristics::should_start_gc() {
487   // Cannot start a new old-gen GC until previous one has finished.
488   //
489   // Future refinement: under certain circumstances, we might be more sophisticated about this choice.
490   // For example, we could choose to abandon the previous old collection before it has completed evacuations.
491   if (!_old_generation->can_start_gc()) {
492     return false;
493   }
494 
495   if (_cannot_expand_trigger) {
496     ShenandoahHeap* heap = ShenandoahHeap::heap();
497     size_t old_gen_capacity = _old_generation->max_capacity();
498     size_t heap_capacity = heap->capacity();
499     double percent = percent_of(old_gen_capacity, heap_capacity);
500     log_info(gc)("Trigger (OLD): Expansion failure, current size: " SIZE_FORMAT "%s which is %.1f%% of total heap size",
501                  byte_size_in_proper_unit(old_gen_capacity), proper_unit_for_byte_size(old_gen_capacity), percent);
502     return true;
503   }
504 
505   if (_fragmentation_trigger) {
506     ShenandoahHeap* heap = ShenandoahHeap::heap();
507     size_t used = _old_generation->used();
508     size_t used_regions_size = _old_generation->used_regions_size();
509     size_t used_regions = _old_generation->used_regions();
510     assert(used_regions_size > used_regions, "Cannot have more used than used regions");
511     size_t fragmented_free = used_regions_size - used;
512     double percent = percent_of(fragmented_free, used_regions_size);
513     log_info(gc)("Trigger (OLD): Old has become fragmented: "
514                  SIZE_FORMAT "%s available bytes spread between " SIZE_FORMAT " regions (%.1f%% free)",
515                  byte_size_in_proper_unit(fragmented_free), proper_unit_for_byte_size(fragmented_free), used_regions, percent);
516     return true;
517   }
518 
519   if (_growth_trigger) {
520     // Growth may be falsely triggered during mixed evacuations, before the mixed-evacuation candidates have been
521     // evacuated.  Before acting on a false trigger, we check to confirm the trigger condition is still satisfied.
522     ShenandoahHeap* heap = ShenandoahHeap::heap();
523     size_t current_usage = _old_generation->used();
524     size_t trigger_threshold = _old_generation->usage_trigger_threshold();
525     if (current_usage > trigger_threshold) {
526       size_t live_at_previous_old = _old_generation->get_live_bytes_after_last_mark();
527       double percent_growth = percent_of(current_usage - live_at_previous_old, live_at_previous_old);
528       log_info(gc)("Trigger (OLD): Old has overgrown, live at end of previous OLD marking: "
529                    SIZE_FORMAT "%s, current usage: " SIZE_FORMAT "%s, percent growth: %.1f%%",
530                    byte_size_in_proper_unit(live_at_previous_old), proper_unit_for_byte_size(live_at_previous_old),
531                    byte_size_in_proper_unit(current_usage), proper_unit_for_byte_size(current_usage), percent_growth);
532       return true;
533     } else {
534       _growth_trigger = false;
535     }
536   }
537 
538   // Otherwise, defer to inherited heuristic for gc trigger.
539   return this->ShenandoahHeuristics::should_start_gc();
540 }
541 
542 void ShenandoahOldHeuristics::record_success_concurrent(bool abbreviated) {
543   // Forget any triggers that occurred while OLD GC was ongoing.  If we really need to start another, it will retrigger.
544   clear_triggers();
545   this->ShenandoahHeuristics::record_success_concurrent(abbreviated);
546 }
547 
548 void ShenandoahOldHeuristics::record_success_degenerated() {
549   // Forget any triggers that occurred while OLD GC was ongoing.  If we really need to start another, it will retrigger.
550   clear_triggers();
551   this->ShenandoahHeuristics::record_success_degenerated();
552 }
553 
554 void ShenandoahOldHeuristics::record_success_full() {
555   // Forget any triggers that occurred while OLD GC was ongoing.  If we really need to start another, it will retrigger.
556   clear_triggers();
557   this->ShenandoahHeuristics::record_success_full();
558 }
559 
560 const char* ShenandoahOldHeuristics::name() {
561   return "Old";
562 }
563 
564 bool ShenandoahOldHeuristics::is_diagnostic() {
565   return false;
566 }
567 
568 bool ShenandoahOldHeuristics::is_experimental() {
569   return true;
570 }
571 
572 void ShenandoahOldHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* set,
573                                                                     ShenandoahHeuristics::RegionData* data,
574                                                                     size_t data_size, size_t free) {
575   ShouldNotReachHere();
576 }
577 
578 
579 #undef BYTES_FORMAT
580 #undef FORMAT_BYTES