1 /*
  2  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 
 27 #include "gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp"
 28 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
 29 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
 30 #include "gc/shenandoah/shenandoahGenerationalHeap.hpp"
 31 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
 32 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
 33 #include "logging/log.hpp"
 34 #include "utilities/quickSort.hpp"
 35 
 36 uint ShenandoahOldHeuristics::NOT_FOUND = -1U;
 37 
 38 // sort by increasing live (so least live comes first)
 39 int ShenandoahOldHeuristics::compare_by_live(RegionData a, RegionData b) {
 40   if (a.get_livedata() < b.get_livedata()) {
 41     return -1;
 42   } else if (a.get_livedata() > b.get_livedata()) {
 43     return 1;
 44   } else {
 45     return 0;
 46   }
 47 }
 48 
 49 // sort by increasing index
 50 int ShenandoahOldHeuristics::compare_by_index(RegionData a, RegionData b) {
 51   if (a.get_region()->index() < b.get_region()->index()) {
 52     return -1;
 53   } else if (a.get_region()->index() > b.get_region()->index()) {
 54     return 1;
 55   } else {
 56     // quicksort may compare to self during search for pivot
 57     return 0;
 58   }
 59 }
 60 
 61 ShenandoahOldHeuristics::ShenandoahOldHeuristics(ShenandoahOldGeneration* generation, ShenandoahGenerationalHeap* gen_heap) :
 62   ShenandoahHeuristics(generation),
 63   _heap(gen_heap),
 64   _old_gen(generation),
 65   _first_pinned_candidate(NOT_FOUND),
 66   _last_old_collection_candidate(0),
 67   _next_old_collection_candidate(0),
 68   _last_old_region(0),
 69   _live_bytes_in_unprocessed_candidates(0),
 70   _old_generation(generation),
 71   _cannot_expand_trigger(false),
 72   _fragmentation_trigger(false),
 73   _growth_trigger(false),
 74   _fragmentation_density(0.0),
 75   _fragmentation_first_old_region(0),
 76   _fragmentation_last_old_region(0)
 77 {
 78 }
 79 
 80 bool ShenandoahOldHeuristics::prime_collection_set(ShenandoahCollectionSet* collection_set) {
 81   if (unprocessed_old_collection_candidates() == 0) {
 82     return false;
 83   }
 84 
 85   if (_old_generation->is_preparing_for_mark()) {
 86     // We have unprocessed old collection candidates, but the heuristic has given up on evacuating them.
 87     // This is most likely because they were _all_ pinned at the time of the last mixed evacuation (and
 88     // this in turn is most likely because there are just one or two candidate regions remaining).
 89     log_info(gc, ergo)("Remaining " UINT32_FORMAT " old regions are being coalesced and filled", unprocessed_old_collection_candidates());
 90     return false;
 91   }
 92 
 93   _first_pinned_candidate = NOT_FOUND;
 94 
 95   uint included_old_regions = 0;
 96   size_t evacuated_old_bytes = 0;
 97   size_t collected_old_bytes = 0;
 98 
 99   // If a region is put into the collection set, then this region's free (not yet used) bytes are no longer
100   // "available" to hold the results of other evacuations.  This may cause a decrease in the remaining amount
101   // of memory that can still be evacuated.  We address this by reducing the evacuation budget by the amount
102   // of live memory in that region and by the amount of unallocated memory in that region if the evacuation
103   // budget is constrained by availability of free memory.
104   const size_t old_evacuation_reserve = _old_generation->get_evacuation_reserve();
105   const size_t old_evacuation_budget = (size_t) ((double) old_evacuation_reserve / ShenandoahOldEvacWaste);
106   size_t unfragmented_available = _old_generation->free_unaffiliated_regions() * ShenandoahHeapRegion::region_size_bytes();
107   size_t fragmented_available;
108   size_t excess_fragmented_available;
109 
110   if (unfragmented_available > old_evacuation_budget) {
111     unfragmented_available = old_evacuation_budget;
112     fragmented_available = 0;
113     excess_fragmented_available = 0;
114   } else {
115     assert(_old_generation->available() >= old_evacuation_budget, "Cannot budget more than is available");
116     fragmented_available = _old_generation->available() - unfragmented_available;
117     assert(fragmented_available + unfragmented_available >= old_evacuation_budget, "Budgets do not add up");
118     if (fragmented_available + unfragmented_available > old_evacuation_budget) {
119       excess_fragmented_available = (fragmented_available + unfragmented_available) - old_evacuation_budget;
120       fragmented_available -= excess_fragmented_available;
121     }
122   }
123 
124   size_t remaining_old_evacuation_budget = old_evacuation_budget;
125   log_debug(gc)("Choose old regions for mixed collection: old evacuation budget: " SIZE_FORMAT "%s, candidates: %u",
126                 byte_size_in_proper_unit(old_evacuation_budget), proper_unit_for_byte_size(old_evacuation_budget),
127                 unprocessed_old_collection_candidates());
128 
129   size_t lost_evacuation_capacity = 0;
130 
131   // The number of old-gen regions that were selected as candidates for collection at the end of the most recent old-gen
132   // concurrent marking phase and have not yet been collected is represented by unprocessed_old_collection_candidates().
133   // Candidate regions are ordered according to increasing amount of live data.  If there is not sufficient room to
134   // evacuate region N, then there is no need to even consider evacuating region N+1.
135   while (unprocessed_old_collection_candidates() > 0) {
136     // Old collection candidates are sorted in order of decreasing garbage contained therein.
137     ShenandoahHeapRegion* r = next_old_collection_candidate();
138     if (r == nullptr) {
139       break;
140     }
141     assert(r->is_regular(), "There should be no humongous regions in the set of mixed-evac candidates");
142 
143     // If region r is evacuated to fragmented memory (to free memory within a partially used region), then we need
144     // to decrease the capacity of the fragmented memory by the scaled loss.
145 
146     size_t live_data_for_evacuation = r->get_live_data_bytes();
147     size_t lost_available = r->free();
148 
149     if ((lost_available > 0) && (excess_fragmented_available > 0)) {
150       if (lost_available < excess_fragmented_available) {
151         excess_fragmented_available -= lost_available;
152         lost_evacuation_capacity -= lost_available;
153         lost_available  = 0;
154       } else {
155         lost_available -= excess_fragmented_available;
156         lost_evacuation_capacity -= excess_fragmented_available;
157         excess_fragmented_available = 0;
158       }
159     }
160     size_t scaled_loss = (size_t) ((double) lost_available / ShenandoahOldEvacWaste);
161     if ((lost_available > 0) && (fragmented_available > 0)) {
162       if (scaled_loss + live_data_for_evacuation < fragmented_available) {
163         fragmented_available -= scaled_loss;
164         scaled_loss = 0;
165       } else {
166         // We will have to allocate this region's evacuation memory from unfragmented memory, so don't bother
167         // to decrement scaled_loss
168       }
169     }
170     if (scaled_loss > 0) {
171       // We were not able to account for the lost free memory within fragmented memory, so we need to take this
172       // allocation out of unfragmented memory.  Unfragmented memory does not need to account for loss of free.
173       if (live_data_for_evacuation > unfragmented_available) {
174         // There is not room to evacuate this region or any that come after it in within the candidates array.
175         break;
176       } else {
177         unfragmented_available -= live_data_for_evacuation;
178       }
179     } else {
180       // Since scaled_loss == 0, we have accounted for the loss of free memory, so we can allocate from either
181       // fragmented or unfragmented available memory.  Use up the fragmented memory budget first.
182       size_t evacuation_need = live_data_for_evacuation;
183 
184       if (evacuation_need > fragmented_available) {
185         evacuation_need -= fragmented_available;
186         fragmented_available = 0;
187       } else {
188         fragmented_available -= evacuation_need;
189         evacuation_need = 0;
190       }
191       if (evacuation_need > unfragmented_available) {
192         // There is not room to evacuate this region or any that come after it in within the candidates array.
193         break;
194       } else {
195         unfragmented_available -= evacuation_need;
196         // dead code: evacuation_need == 0;
197       }
198     }
199     collection_set->add_region(r);
200     included_old_regions++;
201     evacuated_old_bytes += live_data_for_evacuation;
202     collected_old_bytes += r->garbage();
203     consume_old_collection_candidate();
204   }
205 
206   if (_first_pinned_candidate != NOT_FOUND) {
207     // Need to deal with pinned regions
208     slide_pinned_regions_to_front();
209   }
210   decrease_unprocessed_old_collection_candidates_live_memory(evacuated_old_bytes);
211   if (included_old_regions > 0) {
212     log_info(gc, ergo)("Old-gen piggyback evac (" UINT32_FORMAT " regions, evacuating " PROPERFMT ", reclaiming: " PROPERFMT ")",
213                   included_old_regions, PROPERFMTARGS(evacuated_old_bytes), PROPERFMTARGS(collected_old_bytes));
214   }
215 
216   if (unprocessed_old_collection_candidates() == 0) {
217     // We have added the last of our collection candidates to a mixed collection.
218     // Any triggers that occurred during mixed evacuations may no longer be valid.  They can retrigger if appropriate.
219     clear_triggers();
220 
221     _old_generation->complete_mixed_evacuations();
222   } else if (included_old_regions == 0) {
223     // We have candidates, but none were included for evacuation - are they all pinned?
224     // or did we just not have enough room for any of them in this collection set?
225     // We don't want a region with a stuck pin to prevent subsequent old collections, so
226     // if they are all pinned we transition to a state that will allow us to make these uncollected
227     // (pinned) regions parsable.
228     if (all_candidates_are_pinned()) {
229       log_info(gc, ergo)("All candidate regions " UINT32_FORMAT " are pinned", unprocessed_old_collection_candidates());
230       _old_generation->abandon_mixed_evacuations();
231     } else {
232       log_info(gc, ergo)("No regions selected for mixed collection. "
233                          "Old evacuation budget: " PROPERFMT ", Remaining evacuation budget: " PROPERFMT
234                          ", Lost capacity: " PROPERFMT
235                          ", Next candidate: " UINT32_FORMAT ", Last candidate: " UINT32_FORMAT,
236                          PROPERFMTARGS(old_evacuation_reserve),
237                          PROPERFMTARGS(remaining_old_evacuation_budget),
238                          PROPERFMTARGS(lost_evacuation_capacity),
239                          _next_old_collection_candidate, _last_old_collection_candidate);
240     }
241   }
242 
243   return (included_old_regions > 0);
244 }
245 
246 bool ShenandoahOldHeuristics::all_candidates_are_pinned() {
247 #ifdef ASSERT
248   if (uint(os::random()) % 100 < ShenandoahCoalesceChance) {
249     return true;
250   }
251 #endif
252 
253   for (uint i = _next_old_collection_candidate; i < _last_old_collection_candidate; ++i) {
254     ShenandoahHeapRegion* region = _region_data[i].get_region();
255     if (!region->is_pinned()) {
256       return false;
257     }
258   }
259   return true;
260 }
261 
262 void ShenandoahOldHeuristics::slide_pinned_regions_to_front() {
263   // Find the first unpinned region to the left of the next region that
264   // will be added to the collection set. These regions will have been
265   // added to the cset, so we can use them to hold pointers to regions
266   // that were pinned when the cset was chosen.
267   // [ r p r p p p r r ]
268   //     ^         ^ ^
269   //     |         | | pointer to next region to add to a mixed collection is here.
270   //     |         | first r to the left should be in the collection set now.
271   //     | first pinned region, we don't need to look past this
272   uint write_index = NOT_FOUND;
273   for (uint search = _next_old_collection_candidate - 1; search > _first_pinned_candidate; --search) {
274     ShenandoahHeapRegion* region = _region_data[search].get_region();
275     if (!region->is_pinned()) {
276       write_index = search;
277       assert(region->is_cset(), "Expected unpinned region to be added to the collection set.");
278       break;
279     }
280   }
281 
282   // If we could not find an unpinned region, it means there are no slots available
283   // to move up the pinned regions. In this case, we just reset our next index in the
284   // hopes that some of these regions will become unpinned before the next mixed
285   // collection. We may want to bailout of here instead, as it should be quite
286   // rare to have so many pinned regions and may indicate something is wrong.
287   if (write_index == NOT_FOUND) {
288     assert(_first_pinned_candidate != NOT_FOUND, "Should only be here if there are pinned regions.");
289     _next_old_collection_candidate = _first_pinned_candidate;
290     return;
291   }
292 
293   // Find pinned regions to the left and move their pointer into a slot
294   // that was pointing at a region that has been added to the cset (or was pointing
295   // to a pinned region that we've already moved up). We are done when the leftmost
296   // pinned region has been slid up.
297   // [ r p r x p p p r ]
298   //         ^       ^
299   //         |       | next region for mixed collections
300   //         | Write pointer is here. We know this region is already in the cset
301   //         | so we can clobber it with the next pinned region we find.
302   for (int32_t search = (int32_t)write_index - 1; search >= (int32_t)_first_pinned_candidate; --search) {
303     RegionData& skipped = _region_data[search];
304     if (skipped.get_region()->is_pinned()) {
305       RegionData& available_slot = _region_data[write_index];
306       available_slot.set_region_and_livedata(skipped.get_region(), skipped.get_livedata());
307       --write_index;
308     }
309   }
310 
311   // Update to read from the leftmost pinned region. Plus one here because we decremented
312   // the write index to hold the next found pinned region. We are just moving it back now
313   // to point to the first pinned region.
314   _next_old_collection_candidate = write_index + 1;
315 }
316 
317 void ShenandoahOldHeuristics::prepare_for_old_collections() {
318   ShenandoahHeap* heap = ShenandoahHeap::heap();
319 
320   const size_t num_regions = heap->num_regions();
321   size_t cand_idx = 0;
322   size_t immediate_garbage = 0;
323   size_t immediate_regions = 0;
324   size_t live_data = 0;
325 
326   RegionData* candidates = _region_data;
327   for (size_t i = 0; i < num_regions; i++) {
328     ShenandoahHeapRegion* region = heap->get_region(i);
329     if (!region->is_old()) {
330       continue;
331     }
332 
333     size_t garbage = region->garbage();
334     size_t live_bytes = region->get_live_data_bytes();
335     live_data += live_bytes;
336 
337     if (region->is_regular() || region->is_regular_pinned()) {
338         // Only place regular or pinned regions with live data into the candidate set.
339         // Pinned regions cannot be evacuated, but we are not actually choosing candidates
340         // for the collection set here. That happens later during the next young GC cycle,
341         // by which time, the pinned region may no longer be pinned.
342       if (!region->has_live()) {
343         assert(!region->is_pinned(), "Pinned region should have live (pinned) objects.");
344         region->make_trash_immediate();
345         immediate_regions++;
346         immediate_garbage += garbage;
347       } else {
348         region->begin_preemptible_coalesce_and_fill();
349         candidates[cand_idx].set_region_and_livedata(region, live_bytes);
350         cand_idx++;
351       }
352     } else if (region->is_humongous_start()) {
353       // This will handle humongous start regions whether they are also pinned, or not.
354       // If they are pinned, we expect them to hold live data, so they will not be
355       // turned into immediate garbage.
356       if (!region->has_live()) {
357         assert(!region->is_pinned(), "Pinned region should have live (pinned) objects.");
358         // The humongous object is dead, we can just return this region and the continuations
359         // immediately to the freeset - no evacuations are necessary here. The continuations
360         // will be made into trash by this method, so they'll be skipped by the 'is_regular'
361         // check above, but we still need to count the start region.
362         immediate_regions++;
363         immediate_garbage += garbage;
364         size_t region_count = heap->trash_humongous_region_at(region);
365         log_debug(gc)("Trashed " SIZE_FORMAT " regions for humongous object.", region_count);
366       }
367     } else if (region->is_trash()) {
368       // Count humongous objects made into trash here.
369       immediate_regions++;
370       immediate_garbage += garbage;
371     }
372   }
373 
374   _old_generation->set_live_bytes_after_last_mark(live_data);
375 
376   // Unlike young, we are more interested in efficiently packing OLD-gen than in reclaiming garbage first.  We sort by live-data.
377   // Some regular regions may have been promoted in place with no garbage but also with very little live data.  When we "compact"
378   // old-gen, we want to pack these underutilized regions together so we can have more unaffiliated (unfragmented) free regions
379   // in old-gen.
380 
381   QuickSort::sort<RegionData>(candidates, cand_idx, compare_by_live);
382 
383   const size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
384 
385   // The convention is to collect regions that have more than this amount of garbage.
386   const size_t garbage_threshold = region_size_bytes * ShenandoahOldGarbageThreshold / 100;
387 
388   // Enlightened interpretation: collect regions that have less than this amount of live.
389   const size_t live_threshold = region_size_bytes - garbage_threshold;
390 
391   _last_old_region = (uint)cand_idx;
392   _last_old_collection_candidate = (uint)cand_idx;
393   _next_old_collection_candidate = 0;
394 
395   size_t unfragmented = 0;
396   size_t candidates_garbage = 0;
397 
398   for (size_t i = 0; i < cand_idx; i++) {
399     size_t live = candidates[i].get_livedata();
400     if (live > live_threshold) {
401       // Candidates are sorted in increasing order of live data, so no regions after this will be below the threshold.
402       _last_old_collection_candidate = (uint)i;
403       break;
404     }
405     ShenandoahHeapRegion* r = candidates[i].get_region();
406     size_t region_garbage = r->garbage();
407     size_t region_free = r->free();
408     candidates_garbage += region_garbage;
409     unfragmented += region_free;
410   }
411 
412   // defrag_count represents regions that are placed into the old collection set in order to defragment the memory
413   // that we try to "reserve" for humongous allocations.
414   size_t defrag_count = 0;
415   size_t total_uncollected_old_regions = _last_old_region - _last_old_collection_candidate;
416 
417   if (cand_idx > _last_old_collection_candidate) {
418     // Above, we have added into the set of mixed-evacuation candidates all old-gen regions for which the live memory
419     // that they contain is below a particular old-garbage threshold.  Regions that were not selected for the collection
420     // set hold enough live memory that it is not considered efficient (by "garbage-first standards") to compact these
421     // at the current time.
422     //
423     // However, if any of these regions that were rejected from the collection set reside within areas of memory that
424     // might interfere with future humongous allocation requests, we will prioritize them for evacuation at this time.
425     // Humongous allocations target the bottom of the heap.  We want old-gen regions to congregate at the top of the
426     // heap.
427     //
428     // Sort the regions that were initially rejected from the collection set in order of index.  This allows us to
429     // focus our attention on the regions that have low index value (i.e. the old-gen regions at the bottom of the heap).
430     QuickSort::sort<RegionData>(candidates + _last_old_collection_candidate, cand_idx - _last_old_collection_candidate,
431                                 compare_by_index);
432 
433     const size_t first_unselected_old_region = candidates[_last_old_collection_candidate].get_region()->index();
434     const size_t last_unselected_old_region = candidates[cand_idx - 1].get_region()->index();
435     size_t span_of_uncollected_regions = 1 + last_unselected_old_region - first_unselected_old_region;
436 
437     // Add no more than 1/8 of the existing old-gen regions to the set of mixed evacuation candidates.
438     const int MAX_FRACTION_OF_HUMONGOUS_DEFRAG_REGIONS = 8;
439     const size_t bound_on_additional_regions = cand_idx / MAX_FRACTION_OF_HUMONGOUS_DEFRAG_REGIONS;
440 
441     // The heuristic old_is_fragmented trigger may be seeking to achieve up to 75% density.  Allow ourselves to overshoot
442     // that target (at 7/8) so we will not have to do another defragmenting old collection right away.
443     while ((defrag_count < bound_on_additional_regions) &&
444            (total_uncollected_old_regions < 7 * span_of_uncollected_regions / 8)) {
445       ShenandoahHeapRegion* r = candidates[_last_old_collection_candidate].get_region();
446       assert(r->is_regular() || r->is_regular_pinned(), "Region " SIZE_FORMAT " has wrong state for collection: %s",
447              r->index(), ShenandoahHeapRegion::region_state_to_string(r->state()));
448       const size_t region_garbage = r->garbage();
449       const size_t region_free = r->free();
450       candidates_garbage += region_garbage;
451       unfragmented += region_free;
452       defrag_count++;
453       _last_old_collection_candidate++;
454 
455       // We now have one fewer uncollected regions, and our uncollected span shrinks because we have removed its first region.
456       total_uncollected_old_regions--;
457       span_of_uncollected_regions =
458         1 + last_unselected_old_region - candidates[_last_old_collection_candidate].get_region()->index();
459     }
460   }
461 
462   // Note that we do not coalesce and fill occupied humongous regions
463   // HR: humongous regions, RR: regular regions, CF: coalesce and fill regions
464   const size_t collectable_garbage = immediate_garbage + candidates_garbage;
465   const size_t old_candidates = _last_old_collection_candidate;
466   const size_t mixed_evac_live = old_candidates * region_size_bytes - (candidates_garbage + unfragmented);
467   set_unprocessed_old_collection_candidates_live_memory(mixed_evac_live);
468 
469   log_info(gc, ergo)("Old-Gen Collectable Garbage: " PROPERFMT " consolidated with free: " PROPERFMT ", over " SIZE_FORMAT " regions",
470                      PROPERFMTARGS(collectable_garbage), PROPERFMTARGS(unfragmented), old_candidates);
471   log_info(gc, ergo)("Old-Gen Immediate Garbage: " PROPERFMT " over " SIZE_FORMAT " regions",
472                      PROPERFMTARGS(immediate_garbage), immediate_regions);
473   log_info(gc, ergo)("Old regions selected for defragmentation: " SIZE_FORMAT, defrag_count);
474   log_info(gc, ergo)("Old regions not selected: " SIZE_FORMAT, total_uncollected_old_regions);
475 
476   if (unprocessed_old_collection_candidates() > 0) {
477     _old_generation->transition_to(ShenandoahOldGeneration::EVACUATING);
478   } else if (has_coalesce_and_fill_candidates()) {
479     _old_generation->transition_to(ShenandoahOldGeneration::FILLING);
480   } else {
481     _old_generation->transition_to(ShenandoahOldGeneration::WAITING_FOR_BOOTSTRAP);
482   }
483 }
484 
485 size_t ShenandoahOldHeuristics::unprocessed_old_collection_candidates_live_memory() const {
486   return _live_bytes_in_unprocessed_candidates;
487 }
488 
489 void ShenandoahOldHeuristics::set_unprocessed_old_collection_candidates_live_memory(size_t initial_live) {
490   _live_bytes_in_unprocessed_candidates = initial_live;
491 }
492 
493 void ShenandoahOldHeuristics::decrease_unprocessed_old_collection_candidates_live_memory(size_t evacuated_live) {
494   assert(evacuated_live <= _live_bytes_in_unprocessed_candidates, "Cannot evacuate more than was present");
495   _live_bytes_in_unprocessed_candidates -= evacuated_live;
496 }
497 
498 // Used by unit test: test_shenandoahOldHeuristic.cpp
499 uint ShenandoahOldHeuristics::last_old_collection_candidate_index() const {
500   return _last_old_collection_candidate;
501 }
502 
503 uint ShenandoahOldHeuristics::unprocessed_old_collection_candidates() const {
504   return _last_old_collection_candidate - _next_old_collection_candidate;
505 }
506 
507 ShenandoahHeapRegion* ShenandoahOldHeuristics::next_old_collection_candidate() {
508   while (_next_old_collection_candidate < _last_old_collection_candidate) {
509     ShenandoahHeapRegion* next = _region_data[_next_old_collection_candidate].get_region();
510     if (!next->is_pinned()) {
511       return next;
512     } else {
513       assert(next->is_pinned(), "sanity");
514       if (_first_pinned_candidate == NOT_FOUND) {
515         _first_pinned_candidate = _next_old_collection_candidate;
516       }
517     }
518 
519     _next_old_collection_candidate++;
520   }
521   return nullptr;
522 }
523 
524 void ShenandoahOldHeuristics::consume_old_collection_candidate() {
525   _next_old_collection_candidate++;
526 }
527 
528 unsigned int ShenandoahOldHeuristics::get_coalesce_and_fill_candidates(ShenandoahHeapRegion** buffer) {
529   uint end = _last_old_region;
530   uint index = _next_old_collection_candidate;
531   while (index < end) {
532     *buffer++ = _region_data[index++].get_region();
533   }
534   return (_last_old_region - _next_old_collection_candidate);
535 }
536 
537 void ShenandoahOldHeuristics::abandon_collection_candidates() {
538   _last_old_collection_candidate = 0;
539   _next_old_collection_candidate = 0;
540   _last_old_region = 0;
541 }
542 
543 void ShenandoahOldHeuristics::record_cycle_end() {
544   this->ShenandoahHeuristics::record_cycle_end();
545   clear_triggers();
546 }
547 
548 void ShenandoahOldHeuristics::clear_triggers() {
549   // Clear any triggers that were set during mixed evacuations.  Conditions may be different now that this phase has finished.
550   _cannot_expand_trigger = false;
551   _fragmentation_trigger = false;
552   _growth_trigger = false;
553 }
554 
555 // This triggers old-gen collection if the number of regions "dedicated" to old generation is much larger than
556 // is required to represent the memory currently used within the old generation.  This trigger looks specifically
557 // at density of the old-gen spanned region.  A different mechanism triggers old-gen GC if the total number of
558 // old-gen regions (regardless of how close the regions are to one another) grows beyond an anticipated growth target.
559 void ShenandoahOldHeuristics::set_trigger_if_old_is_fragmented(size_t first_old_region, size_t last_old_region,
560                                                                size_t old_region_count, size_t num_regions) {
561   if (ShenandoahGenerationalHumongousReserve > 0) {
562     // Our intent is to pack old-gen memory into the highest-numbered regions of the heap.  Count all memory
563     // above first_old_region as the "span" of old generation.
564     size_t old_region_span = (first_old_region <= last_old_region)? (num_regions - first_old_region): 0;
565     // Given that memory at the bottom of the heap is reserved to represent humongous objects, the number of
566     // regions that old_gen is "allowed" to consume is less than the total heap size.  The restriction on allowed
567     // span is not strictly enforced.  This is a heuristic designed to reduce the likelihood that a humongous
568     // allocation request will require a STW full GC.
569     size_t allowed_old_gen_span = num_regions - (ShenandoahGenerationalHumongousReserve * num_regions) / 100;
570 
571     size_t old_available = _old_gen->available() / HeapWordSize;
572     size_t region_size_words = ShenandoahHeapRegion::region_size_words();
573     size_t old_unaffiliated_available = _old_gen->free_unaffiliated_regions() * region_size_words;
574     assert(old_available >= old_unaffiliated_available, "sanity");
575     size_t old_fragmented_available = old_available - old_unaffiliated_available;
576 
577     size_t old_words_consumed = old_region_count * region_size_words - old_fragmented_available;
578     size_t old_words_spanned = old_region_span * region_size_words;
579     double old_density = ((double) old_words_consumed) / old_words_spanned;
580 
581     double old_span_percent = ((double) old_region_span) / allowed_old_gen_span;
582     if (old_span_percent > 0.50) {
583       // Squaring old_span_percent in the denominator below allows more aggressive triggering when we are
584       // above desired maximum span and less aggressive triggering when we are far below the desired maximum span.
585       double old_span_percent_squared = old_span_percent * old_span_percent;
586       if (old_density / old_span_percent_squared < 0.75) {
587         // We trigger old defragmentation, for example, if:
588         //  old_span_percent is 110% and old_density is below 90.8%, or
589         //  old_span_percent is 100% and old_density is below 75.0%, or
590         //  old_span_percent is  90% and old_density is below 60.8%, or
591         //  old_span_percent is  80% and old_density is below 48.0%, or
592         //  old_span_percent is  70% and old_density is below 36.8%, or
593         //  old_span_percent is  60% and old_density is below 27.0%, or
594         //  old_span_percent is  50% and old_density is below 18.8%.
595 
596         // Set the fragmentation trigger and related attributes
597         _fragmentation_trigger = true;
598         _fragmentation_density = old_density;
599         _fragmentation_first_old_region = first_old_region;
600         _fragmentation_last_old_region = last_old_region;
601       }
602     }
603   }
604 }
605 
606 void ShenandoahOldHeuristics::set_trigger_if_old_is_overgrown() {
607   size_t old_used = _old_gen->used() + _old_gen->get_humongous_waste();
608   size_t trigger_threshold = _old_gen->usage_trigger_threshold();
609   // Detects unsigned arithmetic underflow
610   assert(old_used <= _heap->capacity(),
611          "Old used (" SIZE_FORMAT ", " SIZE_FORMAT") must not be more than heap capacity (" SIZE_FORMAT ")",
612          _old_gen->used(), _old_gen->get_humongous_waste(), _heap->capacity());
613   if (old_used > trigger_threshold) {
614     _growth_trigger = true;
615   }
616 }
617 
618 void ShenandoahOldHeuristics::evaluate_triggers(size_t first_old_region, size_t last_old_region,
619                                                 size_t old_region_count, size_t num_regions) {
620   set_trigger_if_old_is_fragmented(first_old_region, last_old_region, old_region_count, num_regions);
621   set_trigger_if_old_is_overgrown();
622 }
623 
624 bool ShenandoahOldHeuristics::should_start_gc() {
625   // Cannot start a new old-gen GC until previous one has finished.
626   //
627   // Future refinement: under certain circumstances, we might be more sophisticated about this choice.
628   // For example, we could choose to abandon the previous old collection before it has completed evacuations.
629   ShenandoahHeap* heap = ShenandoahHeap::heap();
630   if (!_old_generation->can_start_gc() || heap->collection_set()->has_old_regions()) {
631     return false;
632   }
633 
634   if (_cannot_expand_trigger) {
635     const size_t old_gen_capacity = _old_generation->max_capacity();
636     const size_t heap_capacity = heap->capacity();
637     const double percent = percent_of(old_gen_capacity, heap_capacity);
638     log_trigger("Expansion failure, current size: " SIZE_FORMAT "%s which is %.1f%% of total heap size",
639                  byte_size_in_proper_unit(old_gen_capacity), proper_unit_for_byte_size(old_gen_capacity), percent);
640     return true;
641   }
642 
643   if (_fragmentation_trigger) {
644     const size_t used = _old_generation->used();
645     const size_t used_regions_size = _old_generation->used_regions_size();
646 
647     // used_regions includes humongous regions
648     const size_t used_regions = _old_generation->used_regions();
649     assert(used_regions_size > used_regions, "Cannot have more used than used regions");
650 
651     size_t first_old_region, last_old_region;
652     double density;
653     get_fragmentation_trigger_reason_for_log_message(density, first_old_region, last_old_region);
654     const size_t span_of_old_regions = (last_old_region >= first_old_region)? last_old_region + 1 - first_old_region: 0;
655     const size_t fragmented_free = used_regions_size - used;
656 
657     log_trigger("Old has become fragmented: "
658                 SIZE_FORMAT "%s available bytes spread between range spanned from "
659                 SIZE_FORMAT " to " SIZE_FORMAT " (" SIZE_FORMAT "), density: %.1f%%",
660                 byte_size_in_proper_unit(fragmented_free), proper_unit_for_byte_size(fragmented_free),
661                 first_old_region, last_old_region, span_of_old_regions, density * 100);
662     return true;
663   }
664 
665   if (_growth_trigger) {
666     // Growth may be falsely triggered during mixed evacuations, before the mixed-evacuation candidates have been
667     // evacuated.  Before acting on a false trigger, we check to confirm the trigger condition is still satisfied.
668     const size_t current_usage = _old_generation->used() + _old_generation->get_humongous_waste();
669     const size_t trigger_threshold = _old_generation->usage_trigger_threshold();
670     const size_t heap_size = heap->capacity();
671     const size_t ignore_threshold = (ShenandoahIgnoreOldGrowthBelowPercentage * heap_size) / 100;
672     size_t consecutive_young_cycles;
673     if ((current_usage < ignore_threshold) &&
674         ((consecutive_young_cycles = heap->shenandoah_policy()->consecutive_young_gc_count())
675          < ShenandoahDoNotIgnoreGrowthAfterYoungCycles)) {
676       log_debug(gc)("Ignoring Trigger: Old has overgrown: usage (" SIZE_FORMAT "%s) is below threshold ("
677                     SIZE_FORMAT "%s) after " SIZE_FORMAT " consecutive completed young GCs",
678                     byte_size_in_proper_unit(current_usage), proper_unit_for_byte_size(current_usage),
679                     byte_size_in_proper_unit(ignore_threshold), proper_unit_for_byte_size(ignore_threshold),
680                     consecutive_young_cycles);
681       _growth_trigger = false;
682     } else if (current_usage > trigger_threshold) {
683       const size_t live_at_previous_old = _old_generation->get_live_bytes_after_last_mark();
684       const double percent_growth = percent_of(current_usage - live_at_previous_old, live_at_previous_old);
685       log_trigger("Old has overgrown, live at end of previous OLD marking: "
686                   SIZE_FORMAT "%s, current usage: " SIZE_FORMAT "%s, percent growth: %.1f%%",
687                   byte_size_in_proper_unit(live_at_previous_old), proper_unit_for_byte_size(live_at_previous_old),
688                   byte_size_in_proper_unit(current_usage), proper_unit_for_byte_size(current_usage), percent_growth);
689       return true;
690     } else {
691       // Mixed evacuations have decreased current_usage such that old-growth trigger is no longer relevant.
692       _growth_trigger = false;
693     }
694   }
695 
696   // Otherwise, defer to inherited heuristic for gc trigger.
697   return this->ShenandoahHeuristics::should_start_gc();
698 }
699 
700 void ShenandoahOldHeuristics::record_success_concurrent() {
701   // Forget any triggers that occurred while OLD GC was ongoing.  If we really need to start another, it will retrigger.
702   clear_triggers();
703   this->ShenandoahHeuristics::record_success_concurrent();
704 }
705 
706 void ShenandoahOldHeuristics::record_success_degenerated() {
707   // Forget any triggers that occurred while OLD GC was ongoing.  If we really need to start another, it will retrigger.
708   clear_triggers();
709   this->ShenandoahHeuristics::record_success_degenerated();
710 }
711 
712 void ShenandoahOldHeuristics::record_success_full() {
713   // Forget any triggers that occurred while OLD GC was ongoing.  If we really need to start another, it will retrigger.
714   clear_triggers();
715   this->ShenandoahHeuristics::record_success_full();
716 }
717 
718 const char* ShenandoahOldHeuristics::name() {
719   return "Old";
720 }
721 
722 bool ShenandoahOldHeuristics::is_diagnostic() {
723   return false;
724 }
725 
726 bool ShenandoahOldHeuristics::is_experimental() {
727   return true;
728 }
729 
730 void ShenandoahOldHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* set,
731                                                                     ShenandoahHeuristics::RegionData* data,
732                                                                     size_t data_size, size_t free) {
733   ShouldNotReachHere();
734 }