1 /*
  2  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 
 27 #include "gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp"
 28 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
 29 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
 30 #include "gc/shenandoah/shenandoahGenerationalHeap.hpp"
 31 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
 32 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
 33 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
 34 #include "logging/log.hpp"
 35 #include "utilities/quickSort.hpp"
 36 
 37 uint ShenandoahOldHeuristics::NOT_FOUND = -1U;
 38 
 39 // sort by increasing live (so least live comes first)
 40 int ShenandoahOldHeuristics::compare_by_live(RegionData a, RegionData b) {
 41   if (a._u._live_data < b._u._live_data)
 42     return -1;
 43   else if (a._u._live_data > b._u._live_data)
 44     return 1;
 45   else return 0;
 46 }
 47 
 48 // sort by increasing index
 49 int ShenandoahOldHeuristics::compare_by_index(RegionData a, RegionData b) {
 50   if (a._region->index() < b._region->index()) {
 51     return -1;
 52   } else if (a._region->index() > b._region->index()) {
 53     return 1;
 54   } else {
 55     // quicksort may compare to self during search for pivot
 56     return 0;
 57   }
 58 }
 59 
 60 ShenandoahOldHeuristics::ShenandoahOldHeuristics(ShenandoahOldGeneration* generation, ShenandoahGenerationalHeap* gen_heap) :
 61   ShenandoahHeuristics(generation),
 62   _heap(gen_heap),
 63   _old_gen(generation),
 64   _first_pinned_candidate(NOT_FOUND),
 65   _last_old_collection_candidate(0),
 66   _next_old_collection_candidate(0),
 67   _last_old_region(0),
 68   _live_bytes_in_unprocessed_candidates(0),
 69   _old_generation(generation),
 70   _cannot_expand_trigger(false),
 71   _fragmentation_trigger(false),
 72   _growth_trigger(false),
 73   _fragmentation_density(0.0),
 74   _fragmentation_first_old_region(0),
 75   _fragmentation_last_old_region(0)
 76 {
 77 }
 78 
 79 bool ShenandoahOldHeuristics::prime_collection_set(ShenandoahCollectionSet* collection_set) {
 80   if (unprocessed_old_collection_candidates() == 0) {
 81     return false;
 82   }
 83 
 84   _first_pinned_candidate = NOT_FOUND;
 85 
 86   uint included_old_regions = 0;
 87   size_t evacuated_old_bytes = 0;
 88   size_t collected_old_bytes = 0;
 89 
 90   // If a region is put into the collection set, then this region's free (not yet used) bytes are no longer
 91   // "available" to hold the results of other evacuations.  This may cause a decrease in the remaining amount
 92   // of memory that can still be evacuated.  We address this by reducing the evacuation budget by the amount
 93   // of live memory in that region and by the amount of unallocated memory in that region if the evacuation
 94   // budget is constrained by availability of free memory.
 95   const size_t old_evacuation_reserve = _old_generation->get_evacuation_reserve();
 96   const size_t old_evacuation_budget = (size_t) ((double) old_evacuation_reserve / ShenandoahOldEvacWaste);
 97   size_t unfragmented_available = _old_generation->free_unaffiliated_regions() * ShenandoahHeapRegion::region_size_bytes();
 98   size_t fragmented_available;
 99   size_t excess_fragmented_available;
100 
101   if (unfragmented_available > old_evacuation_budget) {
102     unfragmented_available = old_evacuation_budget;
103     fragmented_available = 0;
104     excess_fragmented_available = 0;
105   } else {
106     assert(_old_generation->available() >= old_evacuation_budget, "Cannot budget more than is available");
107     fragmented_available = _old_generation->available() - unfragmented_available;
108     assert(fragmented_available + unfragmented_available >= old_evacuation_budget, "Budgets do not add up");
109     if (fragmented_available + unfragmented_available > old_evacuation_budget) {
110       excess_fragmented_available = (fragmented_available + unfragmented_available) - old_evacuation_budget;
111       fragmented_available -= excess_fragmented_available;
112     }
113   }
114 
115   size_t remaining_old_evacuation_budget = old_evacuation_budget;
116   log_info(gc)("Choose old regions for mixed collection: old evacuation budget: " SIZE_FORMAT "%s, candidates: %u",
117                byte_size_in_proper_unit(old_evacuation_budget), proper_unit_for_byte_size(old_evacuation_budget),
118                unprocessed_old_collection_candidates());
119 
120   size_t lost_evacuation_capacity = 0;
121 
122   // The number of old-gen regions that were selected as candidates for collection at the end of the most recent old-gen
123   // concurrent marking phase and have not yet been collected is represented by unprocessed_old_collection_candidates().
124   // Candidate regions are ordered according to increasing amount of live data.  If there is not sufficient room to
125   // evacuate region N, then there is no need to even consider evacuating region N+1.
126   while (unprocessed_old_collection_candidates() > 0) {
127     // Old collection candidates are sorted in order of decreasing garbage contained therein.
128     ShenandoahHeapRegion* r = next_old_collection_candidate();
129     if (r == nullptr) {
130       break;
131     }
132     assert(r->is_regular(), "There should be no humongous regions in the set of mixed-evac candidates");
133 
134     // If region r is evacuated to fragmented memory (to free memory within a partially used region), then we need
135     // to decrease the capacity of the fragmented memory by the scaled loss.
136 
137     size_t live_data_for_evacuation = r->get_live_data_bytes();
138     size_t lost_available = r->free();
139 
140     if ((lost_available > 0) && (excess_fragmented_available > 0)) {
141       if (lost_available < excess_fragmented_available) {
142         excess_fragmented_available -= lost_available;
143         lost_evacuation_capacity -= lost_available;
144         lost_available  = 0;
145       } else {
146         lost_available -= excess_fragmented_available;
147         lost_evacuation_capacity -= excess_fragmented_available;
148         excess_fragmented_available = 0;
149       }
150     }
151     size_t scaled_loss = (size_t) ((double) lost_available / ShenandoahOldEvacWaste);
152     if ((lost_available > 0) && (fragmented_available > 0)) {
153       if (scaled_loss + live_data_for_evacuation < fragmented_available) {
154         fragmented_available -= scaled_loss;
155         scaled_loss = 0;
156       } else {
157         // We will have to allocate this region's evacuation memory from unfragmented memory, so don't bother
158         // to decrement scaled_loss
159       }
160     }
161     if (scaled_loss > 0) {
162       // We were not able to account for the lost free memory within fragmented memory, so we need to take this
163       // allocation out of unfragmented memory.  Unfragmented memory does not need to account for loss of free.
164       if (live_data_for_evacuation > unfragmented_available) {
165         // There is not room to evacuate this region or any that come after it in within the candidates array.
166         break;
167       } else {
168         unfragmented_available -= live_data_for_evacuation;
169       }
170     } else {
171       // Since scaled_loss == 0, we have accounted for the loss of free memory, so we can allocate from either
172       // fragmented or unfragmented available memory.  Use up the fragmented memory budget first.
173       size_t evacuation_need = live_data_for_evacuation;
174 
175       if (evacuation_need > fragmented_available) {
176         evacuation_need -= fragmented_available;
177         fragmented_available = 0;
178       } else {
179         fragmented_available -= evacuation_need;
180         evacuation_need = 0;
181       }
182       if (evacuation_need > unfragmented_available) {
183         // There is not room to evacuate this region or any that come after it in within the candidates array.
184         break;
185       } else {
186         unfragmented_available -= evacuation_need;
187         // dead code: evacuation_need == 0;
188       }
189     }
190     collection_set->add_region(r);
191     included_old_regions++;
192     evacuated_old_bytes += live_data_for_evacuation;
193     collected_old_bytes += r->garbage();
194     consume_old_collection_candidate();
195   }
196 
197   if (_first_pinned_candidate != NOT_FOUND) {
198     // Need to deal with pinned regions
199     slide_pinned_regions_to_front();
200   }
201   decrease_unprocessed_old_collection_candidates_live_memory(evacuated_old_bytes);
202   if (included_old_regions > 0) {
203     log_info(gc)("Old-gen piggyback evac (" UINT32_FORMAT " regions, evacuating " SIZE_FORMAT "%s, reclaiming: " SIZE_FORMAT "%s)",
204                  included_old_regions,
205                  byte_size_in_proper_unit(evacuated_old_bytes), proper_unit_for_byte_size(evacuated_old_bytes),
206                  byte_size_in_proper_unit(collected_old_bytes), proper_unit_for_byte_size(collected_old_bytes));
207   }
208 
209   if (unprocessed_old_collection_candidates() == 0) {
210     // We have added the last of our collection candidates to a mixed collection.
211     // Any triggers that occurred during mixed evacuations may no longer be valid.  They can retrigger if appropriate.
212     clear_triggers();
213 
214     _old_generation->complete_mixed_evacuations();
215   } else if (included_old_regions == 0) {
216     // We have candidates, but none were included for evacuation - are they all pinned?
217     // or did we just not have enough room for any of them in this collection set?
218     // We don't want a region with a stuck pin to prevent subsequent old collections, so
219     // if they are all pinned we transition to a state that will allow us to make these uncollected
220     // (pinned) regions parsable.
221     if (all_candidates_are_pinned()) {
222       log_info(gc)("All candidate regions " UINT32_FORMAT " are pinned", unprocessed_old_collection_candidates());
223       _old_generation->abandon_mixed_evacuations();
224     } else {
225       log_info(gc)("No regions selected for mixed collection. "
226                    "Old evacuation budget: " PROPERFMT ", Remaining evacuation budget: " PROPERFMT
227                    ", Lost capacity: " PROPERFMT
228                    ", Next candidate: " UINT32_FORMAT ", Last candidate: " UINT32_FORMAT,
229                    PROPERFMTARGS(old_evacuation_reserve),
230                    PROPERFMTARGS(remaining_old_evacuation_budget),
231                    PROPERFMTARGS(lost_evacuation_capacity),
232                    _next_old_collection_candidate, _last_old_collection_candidate);
233     }
234   }
235 
236   return (included_old_regions > 0);
237 }
238 
239 bool ShenandoahOldHeuristics::all_candidates_are_pinned() {
240 #ifdef ASSERT
241   if (uint(os::random()) % 100 < ShenandoahCoalesceChance) {
242     return true;
243   }
244 #endif
245 
246   for (uint i = _next_old_collection_candidate; i < _last_old_collection_candidate; ++i) {
247     ShenandoahHeapRegion* region = _region_data[i]._region;
248     if (!region->is_pinned()) {
249       return false;
250     }
251   }
252   return true;
253 }
254 
255 void ShenandoahOldHeuristics::slide_pinned_regions_to_front() {
256   // Find the first unpinned region to the left of the next region that
257   // will be added to the collection set. These regions will have been
258   // added to the cset, so we can use them to hold pointers to regions
259   // that were pinned when the cset was chosen.
260   // [ r p r p p p r r ]
261   //     ^         ^ ^
262   //     |         | | pointer to next region to add to a mixed collection is here.
263   //     |         | first r to the left should be in the collection set now.
264   //     | first pinned region, we don't need to look past this
265   uint write_index = NOT_FOUND;
266   for (uint search = _next_old_collection_candidate - 1; search > _first_pinned_candidate; --search) {
267     ShenandoahHeapRegion* region = _region_data[search]._region;
268     if (!region->is_pinned()) {
269       write_index = search;
270       assert(region->is_cset(), "Expected unpinned region to be added to the collection set.");
271       break;
272     }
273   }
274 
275   // If we could not find an unpinned region, it means there are no slots available
276   // to move up the pinned regions. In this case, we just reset our next index in the
277   // hopes that some of these regions will become unpinned before the next mixed
278   // collection. We may want to bailout of here instead, as it should be quite
279   // rare to have so many pinned regions and may indicate something is wrong.
280   if (write_index == NOT_FOUND) {
281     assert(_first_pinned_candidate != NOT_FOUND, "Should only be here if there are pinned regions.");
282     _next_old_collection_candidate = _first_pinned_candidate;
283     return;
284   }
285 
286   // Find pinned regions to the left and move their pointer into a slot
287   // that was pointing at a region that has been added to the cset (or was pointing
288   // to a pinned region that we've already moved up). We are done when the leftmost
289   // pinned region has been slid up.
290   // [ r p r x p p p r ]
291   //         ^       ^
292   //         |       | next region for mixed collections
293   //         | Write pointer is here. We know this region is already in the cset
294   //         | so we can clobber it with the next pinned region we find.
295   for (int32_t search = (int32_t)write_index - 1; search >= (int32_t)_first_pinned_candidate; --search) {
296     RegionData& skipped = _region_data[search];
297     if (skipped._region->is_pinned()) {
298       RegionData& available_slot = _region_data[write_index];
299       available_slot._region = skipped._region;
300       available_slot._u._live_data = skipped._u._live_data;
301       --write_index;
302     }
303   }
304 
305   // Update to read from the leftmost pinned region. Plus one here because we decremented
306   // the write index to hold the next found pinned region. We are just moving it back now
307   // to point to the first pinned region.
308   _next_old_collection_candidate = write_index + 1;
309 }
310 
311 void ShenandoahOldHeuristics::prepare_for_old_collections() {
312   ShenandoahHeap* heap = ShenandoahHeap::heap();
313 
314   const size_t num_regions = heap->num_regions();
315   size_t cand_idx = 0;
316   size_t immediate_garbage = 0;
317   size_t immediate_regions = 0;
318   size_t live_data = 0;
319 
320   RegionData* candidates = _region_data;
321   for (size_t i = 0; i < num_regions; i++) {
322     ShenandoahHeapRegion* region = heap->get_region(i);
323     if (!region->is_old()) {
324       continue;
325     }
326 
327     size_t garbage = region->garbage();
328     size_t live_bytes = region->get_live_data_bytes();
329     live_data += live_bytes;
330 
331     if (region->is_regular() || region->is_regular_pinned()) {
332         // Only place regular or pinned regions with live data into the candidate set.
333         // Pinned regions cannot be evacuated, but we are not actually choosing candidates
334         // for the collection set here. That happens later during the next young GC cycle,
335         // by which time, the pinned region may no longer be pinned.
336       if (!region->has_live()) {
337         assert(!region->is_pinned(), "Pinned region should have live (pinned) objects.");
338         region->make_trash_immediate();
339         immediate_regions++;
340         immediate_garbage += garbage;
341       } else {
342         region->begin_preemptible_coalesce_and_fill();
343         candidates[cand_idx]._region = region;
344         candidates[cand_idx]._u._live_data = live_bytes;
345         cand_idx++;
346       }
347     } else if (region->is_humongous_start()) {
348       // This will handle humongous start regions whether they are also pinned, or not.
349       // If they are pinned, we expect them to hold live data, so they will not be
350       // turned into immediate garbage.
351       if (!region->has_live()) {
352         assert(!region->is_pinned(), "Pinned region should have live (pinned) objects.");
353         // The humongous object is dead, we can just return this region and the continuations
354         // immediately to the freeset - no evacuations are necessary here. The continuations
355         // will be made into trash by this method, so they'll be skipped by the 'is_regular'
356         // check above, but we still need to count the start region.
357         immediate_regions++;
358         immediate_garbage += garbage;
359         size_t region_count = heap->trash_humongous_region_at(region);
360         log_debug(gc)("Trashed " SIZE_FORMAT " regions for humongous object.", region_count);
361       }
362     } else if (region->is_trash()) {
363       // Count humongous objects made into trash here.
364       immediate_regions++;
365       immediate_garbage += garbage;
366     }
367   }
368 
369   _old_generation->set_live_bytes_after_last_mark(live_data);
370 
371   // TODO: Consider not running mixed collects if we recovered some threshold percentage of memory from immediate garbage.
372   // This would be similar to young and global collections shortcutting evacuation, though we'd probably want a separate
373   // threshold for the old generation.
374 
375   // Unlike young, we are more interested in efficiently packing OLD-gen than in reclaiming garbage first.  We sort by live-data.
376   // Some regular regions may have been promoted in place with no garbage but also with very little live data.  When we "compact"
377   // old-gen, we want to pack these underutilized regions together so we can have more unaffiliated (unfragmented) free regions
378   // in old-gen.
379 
380   QuickSort::sort<RegionData>(candidates, cand_idx, compare_by_live, false);
381 
382   // Any old-gen region that contains (ShenandoahOldGarbageThreshold (default value 25)% garbage or more is to be
383   // added to the list of candidates for subsequent mixed evacuations.
384   //
385   // TODO: allow ShenandoahOldGarbageThreshold to be determined adaptively, by heuristics.
386 
387   const size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
388 
389   // The convention is to collect regions that have more than this amount of garbage.
390   const size_t garbage_threshold = region_size_bytes * ShenandoahOldGarbageThreshold / 100;
391 
392   // Enlightened interpretation: collect regions that have less than this amount of live.
393   const size_t live_threshold = region_size_bytes - garbage_threshold;
394 
395   _last_old_region = (uint)cand_idx;
396   _last_old_collection_candidate = (uint)cand_idx;
397   _next_old_collection_candidate = 0;
398 
399   size_t unfragmented = 0;
400   size_t candidates_garbage = 0;
401 
402   for (size_t i = 0; i < cand_idx; i++) {
403     size_t live = candidates[i]._u._live_data;
404     if (live > live_threshold) {
405       // Candidates are sorted in increasing order of live data, so no regions after this will be below the threshold.
406       _last_old_collection_candidate = (uint)i;
407       break;
408     }
409     size_t region_garbage = candidates[i]._region->garbage();
410     size_t region_free = candidates[i]._region->free();
411     candidates_garbage += region_garbage;
412     unfragmented += region_free;
413   }
414 
415   // defrag_count represents regions that are placed into the old collection set in order to defragment the memory
416   // that we try to "reserve" for humongous allocations.
417   size_t defrag_count = 0;
418   size_t total_uncollected_old_regions = _last_old_region - _last_old_collection_candidate;
419 
420   if (cand_idx > _last_old_collection_candidate) {
421     // Above, we have added into the set of mixed-evacuation candidates all old-gen regions for which the live memory
422     // that they contain is below a particular old-garbage threshold.  Regions that were not selected for the collection
423     // set hold enough live memory that it is not considered efficient (by "garbage-first standards") to compact these
424     // at the current time.
425     //
426     // However, if any of these regions that were rejected from the collection set reside within areas of memory that
427     // might interfere with future humongous allocation requests, we will prioritize them for evacuation at this time.
428     // Humongous allocations target the bottom of the heap.  We want old-gen regions to congregate at the top of the
429     // heap.
430     //
431     // Sort the regions that were initially rejected from the collection set in order of index.  This allows us to
432     // focus our attention on the regions that have low index value (i.e. the old-gen regions at the bottom of the heap).
433     QuickSort::sort<RegionData>(candidates + _last_old_collection_candidate, cand_idx - _last_old_collection_candidate,
434                                 compare_by_index, false);
435 
436     const size_t first_unselected_old_region = candidates[_last_old_collection_candidate]._region->index();
437     const size_t last_unselected_old_region = candidates[cand_idx - 1]._region->index();
438     size_t span_of_uncollected_regions = 1 + last_unselected_old_region - first_unselected_old_region;
439 
440     // Add no more than 1/8 of the existing old-gen regions to the set of mixed evacuation candidates.
441     const int MAX_FRACTION_OF_HUMONGOUS_DEFRAG_REGIONS = 8;
442     const size_t bound_on_additional_regions = cand_idx / MAX_FRACTION_OF_HUMONGOUS_DEFRAG_REGIONS;
443 
444     // The heuristic old_is_fragmented trigger may be seeking to achieve up to 7/8 density.  Allow ourselves to overshoot
445     // that target (at 15/16) so we will not have to do another defragmenting old collection right away.
446     while ((defrag_count < bound_on_additional_regions) &&
447            (total_uncollected_old_regions < 15 * span_of_uncollected_regions / 16)) {
448       ShenandoahHeapRegion* r = candidates[_last_old_collection_candidate]._region;
449       assert(r->is_regular() || r->is_regular_pinned(), "Region " SIZE_FORMAT " has wrong state for collection: %s",
450              r->index(), ShenandoahHeapRegion::region_state_to_string(r->state()));
451       const size_t region_garbage = candidates[_last_old_collection_candidate]._region->garbage();
452       const size_t region_free = r->free();
453       candidates_garbage += region_garbage;
454       unfragmented += region_free;
455       defrag_count++;
456       _last_old_collection_candidate++;
457 
458       // We now have one fewer uncollected regions, and our uncollected span shrinks because we have removed its first region.
459       total_uncollected_old_regions--;
460       span_of_uncollected_regions = 1 + last_unselected_old_region - candidates[_last_old_collection_candidate]._region->index();
461     }
462   }
463 
464   // Note that we do not coalesce and fill occupied humongous regions
465   // HR: humongous regions, RR: regular regions, CF: coalesce and fill regions
466   const size_t collectable_garbage = immediate_garbage + candidates_garbage;
467   const size_t old_candidates = _last_old_collection_candidate;
468   const size_t mixed_evac_live = old_candidates * region_size_bytes - (candidates_garbage + unfragmented);
469   set_unprocessed_old_collection_candidates_live_memory(mixed_evac_live);
470 
471   log_info(gc)("Old-Gen Collectable Garbage: " PROPERFMT " consolidated with free: " PROPERFMT ", over " SIZE_FORMAT " regions",
472                PROPERFMTARGS(collectable_garbage), PROPERFMTARGS(unfragmented), old_candidates);
473   log_info(gc)("Old-Gen Immediate Garbage: " PROPERFMT " over " SIZE_FORMAT " regions",
474               PROPERFMTARGS(immediate_garbage), immediate_regions);
475   log_info(gc)("Old regions selected for defragmentation: " SIZE_FORMAT, defrag_count);
476   log_info(gc)("Old regions not selected: " SIZE_FORMAT, total_uncollected_old_regions);
477 
478   if (unprocessed_old_collection_candidates() > 0) {
479     _old_generation->transition_to(ShenandoahOldGeneration::EVACUATING);
480   } else if (has_coalesce_and_fill_candidates()) {
481     _old_generation->transition_to(ShenandoahOldGeneration::FILLING);
482   } else {
483     _old_generation->transition_to(ShenandoahOldGeneration::WAITING_FOR_BOOTSTRAP);
484   }
485 }
486 
487 size_t ShenandoahOldHeuristics::unprocessed_old_collection_candidates_live_memory() const {
488   return _live_bytes_in_unprocessed_candidates;
489 }
490 
491 void ShenandoahOldHeuristics::set_unprocessed_old_collection_candidates_live_memory(size_t initial_live) {
492   _live_bytes_in_unprocessed_candidates = initial_live;
493 }
494 
495 void ShenandoahOldHeuristics::decrease_unprocessed_old_collection_candidates_live_memory(size_t evacuated_live) {
496   assert(evacuated_live <= _live_bytes_in_unprocessed_candidates, "Cannot evacuate more than was present");
497   _live_bytes_in_unprocessed_candidates -= evacuated_live;
498 }
499 
500 // Used by unit test: test_shenandoahOldHeuristic.cpp
501 uint ShenandoahOldHeuristics::last_old_collection_candidate_index() const {
502   return _last_old_collection_candidate;
503 }
504 
505 uint ShenandoahOldHeuristics::unprocessed_old_collection_candidates() const {
506   return _last_old_collection_candidate - _next_old_collection_candidate;
507 }
508 
509 ShenandoahHeapRegion* ShenandoahOldHeuristics::next_old_collection_candidate() {
510   while (_next_old_collection_candidate < _last_old_collection_candidate) {
511     ShenandoahHeapRegion* next = _region_data[_next_old_collection_candidate]._region;
512     if (!next->is_pinned()) {
513       return next;
514     } else {
515       assert(next->is_pinned(), "sanity");
516       if (_first_pinned_candidate == NOT_FOUND) {
517         _first_pinned_candidate = _next_old_collection_candidate;
518       }
519     }
520 
521     _next_old_collection_candidate++;
522   }
523   return nullptr;
524 }
525 
526 void ShenandoahOldHeuristics::consume_old_collection_candidate() {
527   _next_old_collection_candidate++;
528 }
529 
530 unsigned int ShenandoahOldHeuristics::get_coalesce_and_fill_candidates(ShenandoahHeapRegion** buffer) {
531   uint end = _last_old_region;
532   uint index = _next_old_collection_candidate;
533   while (index < end) {
534     *buffer++ = _region_data[index++]._region;
535   }
536   return (_last_old_region - _next_old_collection_candidate);
537 }
538 
539 void ShenandoahOldHeuristics::abandon_collection_candidates() {
540   _last_old_collection_candidate = 0;
541   _next_old_collection_candidate = 0;
542   _last_old_region = 0;
543 }
544 
545 void ShenandoahOldHeuristics::record_cycle_end() {
546   this->ShenandoahHeuristics::record_cycle_end();
547   clear_triggers();
548 }
549 
550 void ShenandoahOldHeuristics::clear_triggers() {
551   // Clear any triggers that were set during mixed evacuations.  Conditions may be different now that this phase has finished.
552   _cannot_expand_trigger = false;
553   _fragmentation_trigger = false;
554   _growth_trigger = false;
555 }
556 
557 void ShenandoahOldHeuristics::trigger_collection_if_fragmented(size_t first_old_region, size_t last_old_region, size_t old_region_count, size_t num_regions) {
558   if (ShenandoahGenerationalHumongousReserve > 0) {
559     size_t old_region_span = (first_old_region <= last_old_region)? (last_old_region + 1 - first_old_region): 0;
560     size_t allowed_old_gen_span = num_regions - (ShenandoahGenerationalHumongousReserve * num_regions) / 100;
561 
562     // Tolerate lower density if total span is small.  Here's the implementation:
563     //   if old_gen spans more than 100% and density < 75%, trigger old-defrag
564     //   else if old_gen spans more than 87.5% and density < 62.5%, trigger old-defrag
565     //   else if old_gen spans more than 75% and density < 50%, trigger old-defrag
566     //   else if old_gen spans more than 62.5% and density < 37.5%, trigger old-defrag
567     //   else if old_gen spans more than 50% and density < 25%, trigger old-defrag
568     //
569     // A previous implementation was more aggressive in triggering, resulting in degraded throughput when
570     // humongous allocation was not required.
571 
572     size_t old_available = _old_gen->available();
573     size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
574     size_t old_unaffiliated_available = _old_gen->free_unaffiliated_regions() * region_size_bytes;
575     assert(old_available >= old_unaffiliated_available, "sanity");
576     size_t old_fragmented_available = old_available - old_unaffiliated_available;
577 
578     size_t old_bytes_consumed = old_region_count * region_size_bytes - old_fragmented_available;
579     size_t old_bytes_spanned = old_region_span * region_size_bytes;
580     double old_density = ((double) old_bytes_consumed) / old_bytes_spanned;
581 
582     uint eighths = 8;
583     for (uint i = 0; i < 5; i++) {
584       size_t span_threshold = eighths * allowed_old_gen_span / 8;
585       double density_threshold = (eighths - 2) / 8.0;
586       if ((old_region_span >= span_threshold) && (old_density < density_threshold)) {
587         trigger_old_is_fragmented(old_density, first_old_region, last_old_region);
588         return;
589       }
590       eighths--;
591     }
592   }
593 }
594 
595 void ShenandoahOldHeuristics::trigger_collection_if_overgrown() {
596   size_t old_used = _old_gen->used() + _old_gen->get_humongous_waste();
597   size_t trigger_threshold = _old_gen->usage_trigger_threshold();
598   // Detects unsigned arithmetic underflow
599   assert(old_used <= _heap->capacity(),
600          "Old used (" SIZE_FORMAT ", " SIZE_FORMAT") must not be more than heap capacity (" SIZE_FORMAT ")",
601          _old_gen->used(), _old_gen->get_humongous_waste(), _heap->capacity());
602   if (old_used > trigger_threshold) {
603     trigger_old_has_grown();
604   }
605 }
606 
607 void ShenandoahOldHeuristics::trigger_maybe(size_t first_old_region, size_t last_old_region,
608                                             size_t old_region_count, size_t num_regions) {
609   trigger_collection_if_fragmented(first_old_region, last_old_region, old_region_count, num_regions);
610   trigger_collection_if_overgrown();
611 }
612 
613 bool ShenandoahOldHeuristics::should_start_gc() {
614   // Cannot start a new old-gen GC until previous one has finished.
615   //
616   // Future refinement: under certain circumstances, we might be more sophisticated about this choice.
617   // For example, we could choose to abandon the previous old collection before it has completed evacuations.
618   ShenandoahHeap* heap = ShenandoahHeap::heap();
619   if (!_old_generation->can_start_gc() || heap->collection_set()->has_old_regions()) {
620     return false;
621   }
622 
623   if (_cannot_expand_trigger) {
624     const size_t old_gen_capacity = _old_generation->max_capacity();
625     const size_t heap_capacity = heap->capacity();
626     const double percent = percent_of(old_gen_capacity, heap_capacity);
627     log_info(gc)("Trigger (OLD): Expansion failure, current size: " SIZE_FORMAT "%s which is %.1f%% of total heap size",
628                  byte_size_in_proper_unit(old_gen_capacity), proper_unit_for_byte_size(old_gen_capacity), percent);
629     return true;
630   }
631 
632   if (_fragmentation_trigger) {
633     const size_t used = _old_generation->used();
634     const size_t used_regions_size = _old_generation->used_regions_size();
635 
636     // used_regions includes humongous regions
637     const size_t used_regions = _old_generation->used_regions();
638     assert(used_regions_size > used_regions, "Cannot have more used than used regions");
639 
640     size_t first_old_region, last_old_region;
641     double density;
642     get_fragmentation_trigger_reason_for_log_message(density, first_old_region, last_old_region);
643     const size_t span_of_old_regions = (last_old_region >= first_old_region)? last_old_region + 1 - first_old_region: 0;
644     const size_t fragmented_free = used_regions_size - used;
645 
646     log_info(gc)("Trigger (OLD): Old has become fragmented: "
647                  SIZE_FORMAT "%s available bytes spread between range spanned from "
648                  SIZE_FORMAT " to " SIZE_FORMAT " (" SIZE_FORMAT "), density: %.1f%%",
649                  byte_size_in_proper_unit(fragmented_free), proper_unit_for_byte_size(fragmented_free),
650                  first_old_region, last_old_region, span_of_old_regions, density * 100);
651     return true;
652   }
653 
654   if (_growth_trigger) {
655     // Growth may be falsely triggered during mixed evacuations, before the mixed-evacuation candidates have been
656     // evacuated.  Before acting on a false trigger, we check to confirm the trigger condition is still satisfied.
657     const size_t current_usage = _old_generation->used() + _old_generation->get_humongous_waste();
658     const size_t trigger_threshold = _old_generation->usage_trigger_threshold();
659     const size_t heap_size = heap->capacity();
660     const size_t ignore_threshold = (ShenandoahIgnoreOldGrowthBelowPercentage * heap_size) / 100;
661     size_t consecutive_young_cycles;
662     if ((current_usage < ignore_threshold) &&
663         ((consecutive_young_cycles = heap->shenandoah_policy()->consecutive_young_gc_count())
664          < ShenandoahDoNotIgnoreGrowthAfterYoungCycles)) {
665       log_debug(gc)("Ignoring Trigger (OLD): Old has overgrown: usage (" SIZE_FORMAT "%s) is below threshold ("
666                     SIZE_FORMAT "%s) after " SIZE_FORMAT " consecutive completed young GCs",
667                     byte_size_in_proper_unit(current_usage), proper_unit_for_byte_size(current_usage),
668                     byte_size_in_proper_unit(ignore_threshold), proper_unit_for_byte_size(ignore_threshold),
669                     consecutive_young_cycles);
670       _growth_trigger = false;
671     } else if (current_usage > trigger_threshold) {
672       const size_t live_at_previous_old = _old_generation->get_live_bytes_after_last_mark();
673       const double percent_growth = percent_of(current_usage - live_at_previous_old, live_at_previous_old);
674       log_info(gc)("Trigger (OLD): Old has overgrown, live at end of previous OLD marking: "
675                    SIZE_FORMAT "%s, current usage: " SIZE_FORMAT "%s, percent growth: %.1f%%",
676                    byte_size_in_proper_unit(live_at_previous_old), proper_unit_for_byte_size(live_at_previous_old),
677                    byte_size_in_proper_unit(current_usage), proper_unit_for_byte_size(current_usage), percent_growth);
678       return true;
679     } else {
680       // Mixed evacuations have decreased current_usage such that old-growth trigger is no longer relevant.
681       _growth_trigger = false;
682     }
683   }
684 
685   // Otherwise, defer to inherited heuristic for gc trigger.
686   return this->ShenandoahHeuristics::should_start_gc();
687 }
688 
689 void ShenandoahOldHeuristics::record_success_concurrent() {
690   // Forget any triggers that occurred while OLD GC was ongoing.  If we really need to start another, it will retrigger.
691   clear_triggers();
692   this->ShenandoahHeuristics::record_success_concurrent();
693 }
694 
695 void ShenandoahOldHeuristics::record_success_degenerated() {
696   // Forget any triggers that occurred while OLD GC was ongoing.  If we really need to start another, it will retrigger.
697   clear_triggers();
698   this->ShenandoahHeuristics::record_success_degenerated();
699 }
700 
701 void ShenandoahOldHeuristics::record_success_full() {
702   // Forget any triggers that occurred while OLD GC was ongoing.  If we really need to start another, it will retrigger.
703   clear_triggers();
704   this->ShenandoahHeuristics::record_success_full();
705 }
706 
707 const char* ShenandoahOldHeuristics::name() {
708   return "Old";
709 }
710 
711 bool ShenandoahOldHeuristics::is_diagnostic() {
712   return false;
713 }
714 
715 bool ShenandoahOldHeuristics::is_experimental() {
716   return true;
717 }
718 
719 void ShenandoahOldHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* set,
720                                                                     ShenandoahHeuristics::RegionData* data,
721                                                                     size_t data_size, size_t free) {
722   ShouldNotReachHere();
723 }