1 /*
  2  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 
 27 #include "gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp"
 28 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
 29 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
 30 #include "gc/shenandoah/shenandoahHeap.hpp"
 31 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
 32 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
 33 #include "utilities/quickSort.hpp"
 34 
 35 #define BYTES_FORMAT    SIZE_FORMAT "%s"
 36 #define FORMAT_BYTES(b) byte_size_in_proper_unit(b), proper_unit_for_byte_size(b)
 37 
 38 uint ShenandoahOldHeuristics::NOT_FOUND = -1U;
 39 
 40 // sort by increasing live (so least live comes first)
 41 int ShenandoahOldHeuristics::compare_by_live(RegionData a, RegionData b) {
 42   if (a._u._live_data < b._u._live_data)
 43     return -1;
 44   else if (a._u._live_data > b._u._live_data)
 45     return 1;
 46   else return 0;
 47 }
 48 
 49 // sort by increasing index
 50 int ShenandoahOldHeuristics::compare_by_index(RegionData a, RegionData b) {
 51   if (a._region->index() < b._region->index()) {
 52     return -1;
 53   } else if (a._region->index() > b._region->index()) {
 54     return 1;
 55   } else {
 56     // quicksort may compare to self during search for pivot
 57     return 0;
 58   }
 59 }
 60 
 61 ShenandoahOldHeuristics::ShenandoahOldHeuristics(ShenandoahOldGeneration* generation) :
 62   ShenandoahHeuristics(generation),
 63   _first_pinned_candidate(NOT_FOUND),
 64   _last_old_collection_candidate(0),
 65   _next_old_collection_candidate(0),
 66   _last_old_region(0),
 67   _live_bytes_in_unprocessed_candidates(0),
 68   _old_generation(generation),
 69   _cannot_expand_trigger(false),
 70   _fragmentation_trigger(false),
 71   _growth_trigger(false) {
 72 }
 73 
 74 bool ShenandoahOldHeuristics::prime_collection_set(ShenandoahCollectionSet* collection_set) {
 75   ShenandoahHeap* heap = ShenandoahHeap::heap();
 76   if (unprocessed_old_collection_candidates() == 0) {
 77     return false;
 78   }
 79 
 80   _first_pinned_candidate = NOT_FOUND;
 81 
 82   uint included_old_regions = 0;
 83   size_t evacuated_old_bytes = 0;
 84   size_t collected_old_bytes = 0;
 85 
 86   // If a region is put into the collection set, then this region's free (not yet used) bytes are no longer
 87   // "available" to hold the results of other evacuations.  This may cause a decrease in the remaining amount
 88   // of memory that can still be evacuated.  We address this by reducing the evacuation budget by the amount
 89   // of live memory in that region and by the amount of unallocated memory in that region if the evacuation
 90   // budget is constrained by availability of free memory.
 91   size_t old_evacuation_budget = (size_t) ((double) heap->get_old_evac_reserve() / ShenandoahOldEvacWaste);
 92   size_t unfragmented_available = _old_generation->free_unaffiliated_regions() * ShenandoahHeapRegion::region_size_bytes();
 93   size_t fragmented_available;
 94   size_t excess_fragmented_available;
 95 
 96   if (unfragmented_available > old_evacuation_budget) {
 97     unfragmented_available = old_evacuation_budget;
 98     fragmented_available = 0;
 99     excess_fragmented_available = 0;
100   } else {
101     assert(_old_generation->available() >= old_evacuation_budget, "Cannot budget more than is available");
102     fragmented_available = _old_generation->available() - unfragmented_available;
103     assert(fragmented_available + unfragmented_available >= old_evacuation_budget, "Budgets do not add up");
104     if (fragmented_available + unfragmented_available > old_evacuation_budget) {
105       excess_fragmented_available = (fragmented_available + unfragmented_available) - old_evacuation_budget;
106       fragmented_available -= excess_fragmented_available;
107     }
108   }
109 
110   size_t remaining_old_evacuation_budget = old_evacuation_budget;
111   log_info(gc)("Choose old regions for mixed collection: old evacuation budget: " SIZE_FORMAT "%s, candidates: %u",
112                byte_size_in_proper_unit(old_evacuation_budget), proper_unit_for_byte_size(old_evacuation_budget),
113                unprocessed_old_collection_candidates());
114 
115   size_t lost_evacuation_capacity = 0;
116 
117   // The number of old-gen regions that were selected as candidates for collection at the end of the most recent old-gen
118   // concurrent marking phase and have not yet been collected is represented by unprocessed_old_collection_candidates().
119   // Candidate regions are ordered according to increasing amount of live data.  If there is not sufficient room to
120   // evacuate region N, then there is no need to even consider evacuating region N+1.
121   while (unprocessed_old_collection_candidates() > 0) {
122     // Old collection candidates are sorted in order of decreasing garbage contained therein.
123     ShenandoahHeapRegion* r = next_old_collection_candidate();
124     if (r == nullptr) {
125       break;
126     }
127     assert(r->is_regular(), "There should be no humongous regions in the set of mixed-evac candidates");
128 
129     // If region r is evacuated to fragmented memory (to free memory within a partially used region), then we need
130     // to decrease the capacity of the fragmented memory by the scaled loss.
131 
132     size_t live_data_for_evacuation = r->get_live_data_bytes();
133     size_t lost_available = r->free();
134 
135     if ((lost_available > 0) && (excess_fragmented_available > 0)) {
136       if (lost_available < excess_fragmented_available) {
137         excess_fragmented_available -= lost_available;
138         lost_evacuation_capacity -= lost_available;
139         lost_available  = 0;
140       } else {
141         lost_available -= excess_fragmented_available;
142         lost_evacuation_capacity -= excess_fragmented_available;
143         excess_fragmented_available = 0;
144       }
145     }
146     size_t scaled_loss = (size_t) ((double) lost_available / ShenandoahOldEvacWaste);
147     if ((lost_available > 0) && (fragmented_available > 0)) {
148       if (scaled_loss + live_data_for_evacuation < fragmented_available) {
149         fragmented_available -= scaled_loss;
150         scaled_loss = 0;
151       } else {
152         // We will have to allocate this region's evacuation memory from unfragmented memory, so don't bother
153         // to decrement scaled_loss
154       }
155     }
156     if (scaled_loss > 0) {
157       // We were not able to account for the lost free memory within fragmented memory, so we need to take this
158       // allocation out of unfragmented memory.  Unfragmented memory does not need to account for loss of free.
159       if (live_data_for_evacuation > unfragmented_available) {
160         // There is not room to evacuate this region or any that come after it in within the candidates array.
161         break;
162       } else {
163         unfragmented_available -= live_data_for_evacuation;
164       }
165     } else {
166       // Since scaled_loss == 0, we have accounted for the loss of free memory, so we can allocate from either
167       // fragmented or unfragmented available memory.  Use up the fragmented memory budget first.
168       size_t evacuation_need = live_data_for_evacuation;
169 
170       if (evacuation_need > fragmented_available) {
171         evacuation_need -= fragmented_available;
172         fragmented_available = 0;
173       } else {
174         fragmented_available -= evacuation_need;
175         evacuation_need = 0;
176       }
177       if (evacuation_need > unfragmented_available) {
178         // There is not room to evacuate this region or any that come after it in within the candidates array.
179         break;
180       } else {
181         unfragmented_available -= evacuation_need;
182         // dead code: evacuation_need == 0;
183       }
184     }
185     collection_set->add_region(r);
186     included_old_regions++;
187     evacuated_old_bytes += live_data_for_evacuation;
188     collected_old_bytes += r->garbage();
189     consume_old_collection_candidate();
190   }
191 
192   if (_first_pinned_candidate != NOT_FOUND) {
193     // Need to deal with pinned regions
194     slide_pinned_regions_to_front();
195   }
196   decrease_unprocessed_old_collection_candidates_live_memory(evacuated_old_bytes);
197   if (included_old_regions > 0) {
198     log_info(gc)("Old-gen piggyback evac (" UINT32_FORMAT " regions, evacuating " SIZE_FORMAT "%s, reclaiming: " SIZE_FORMAT "%s)",
199                  included_old_regions,
200                  byte_size_in_proper_unit(evacuated_old_bytes), proper_unit_for_byte_size(evacuated_old_bytes),
201                  byte_size_in_proper_unit(collected_old_bytes), proper_unit_for_byte_size(collected_old_bytes));
202   }
203 
204   if (unprocessed_old_collection_candidates() == 0) {
205     // We have added the last of our collection candidates to a mixed collection.
206     // Any triggers that occurred during mixed evacuations may no longer be valid.  They can retrigger if appropriate.
207     clear_triggers();
208     if (has_coalesce_and_fill_candidates()) {
209       _old_generation->transition_to(ShenandoahOldGeneration::FILLING);
210     } else {
211       _old_generation->transition_to(ShenandoahOldGeneration::WAITING_FOR_BOOTSTRAP);
212     }
213   } else if (included_old_regions == 0) {
214     // We have candidates, but none were included for evacuation - are they all pinned?
215     // or did we just not have enough room for any of them in this collection set?
216     // We don't want a region with a stuck pin to prevent subsequent old collections, so
217     // if they are all pinned we transition to a state that will allow us to make these uncollected
218     // (pinned) regions parsable.
219     if (all_candidates_are_pinned()) {
220       log_info(gc)("All candidate regions " UINT32_FORMAT " are pinned", unprocessed_old_collection_candidates());
221       _old_generation->transition_to(ShenandoahOldGeneration::FILLING);
222     } else {
223       log_info(gc)("No regions selected for mixed collection. "
224                    "Old evacuation budget: " BYTES_FORMAT ", Remaining evacuation budget: " BYTES_FORMAT
225                    ", Lost capacity: " BYTES_FORMAT
226                    ", Next candidate: " UINT32_FORMAT ", Last candidate: " UINT32_FORMAT,
227                    FORMAT_BYTES(heap->get_old_evac_reserve()),
228                    FORMAT_BYTES(remaining_old_evacuation_budget),
229                    FORMAT_BYTES(lost_evacuation_capacity),
230                    _next_old_collection_candidate, _last_old_collection_candidate);
231     }
232   }
233 
234   return (included_old_regions > 0);
235 }
236 
237 bool ShenandoahOldHeuristics::all_candidates_are_pinned() {
238 #ifdef ASSERT
239   if (uint(os::random()) % 100 < ShenandoahCoalesceChance) {
240     return true;
241   }
242 #endif
243 
244   for (uint i = _next_old_collection_candidate; i < _last_old_collection_candidate; ++i) {
245     ShenandoahHeapRegion* region = _region_data[i]._region;
246     if (!region->is_pinned()) {
247       return false;
248     }
249   }
250   return true;
251 }
252 
253 void ShenandoahOldHeuristics::slide_pinned_regions_to_front() {
254   // Find the first unpinned region to the left of the next region that
255   // will be added to the collection set. These regions will have been
256   // added to the cset, so we can use them to hold pointers to regions
257   // that were pinned when the cset was chosen.
258   // [ r p r p p p r r ]
259   //     ^         ^ ^
260   //     |         | | pointer to next region to add to a mixed collection is here.
261   //     |         | first r to the left should be in the collection set now.
262   //     | first pinned region, we don't need to look past this
263   uint write_index = NOT_FOUND;
264   for (uint search = _next_old_collection_candidate - 1; search > _first_pinned_candidate; --search) {
265     ShenandoahHeapRegion* region = _region_data[search]._region;
266     if (!region->is_pinned()) {
267       write_index = search;
268       assert(region->is_cset(), "Expected unpinned region to be added to the collection set.");
269       break;
270     }
271   }
272 
273   // If we could not find an unpinned region, it means there are no slots available
274   // to move up the pinned regions. In this case, we just reset our next index in the
275   // hopes that some of these regions will become unpinned before the next mixed
276   // collection. We may want to bailout of here instead, as it should be quite
277   // rare to have so many pinned regions and may indicate something is wrong.
278   if (write_index == NOT_FOUND) {
279     assert(_first_pinned_candidate != NOT_FOUND, "Should only be here if there are pinned regions.");
280     _next_old_collection_candidate = _first_pinned_candidate;
281     return;
282   }
283 
284   // Find pinned regions to the left and move their pointer into a slot
285   // that was pointing at a region that has been added to the cset (or was pointing
286   // to a pinned region that we've already moved up). We are done when the leftmost
287   // pinned region has been slid up.
288   // [ r p r x p p p r ]
289   //         ^       ^
290   //         |       | next region for mixed collections
291   //         | Write pointer is here. We know this region is already in the cset
292   //         | so we can clobber it with the next pinned region we find.
293   for (int32_t search = (int32_t)write_index - 1; search >= (int32_t)_first_pinned_candidate; --search) {
294     RegionData& skipped = _region_data[search];
295     if (skipped._region->is_pinned()) {
296       RegionData& available_slot = _region_data[write_index];
297       available_slot._region = skipped._region;
298       available_slot._u._live_data = skipped._u._live_data;
299       --write_index;
300     }
301   }
302 
303   // Update to read from the leftmost pinned region. Plus one here because we decremented
304   // the write index to hold the next found pinned region. We are just moving it back now
305   // to point to the first pinned region.
306   _next_old_collection_candidate = write_index + 1;
307 }
308 
309 void ShenandoahOldHeuristics::prepare_for_old_collections() {
310   ShenandoahHeap* heap = ShenandoahHeap::heap();
311 
312   size_t cand_idx = 0;
313   size_t total_garbage = 0;
314   size_t num_regions = heap->num_regions();
315   size_t immediate_garbage = 0;
316   size_t immediate_regions = 0;
317   size_t live_data = 0;
318 
319   RegionData* candidates = _region_data;
320   for (size_t i = 0; i < num_regions; i++) {
321     ShenandoahHeapRegion* region = heap->get_region(i);
322     if (!_old_generation->contains(region)) {
323       continue;
324     }
325 
326     size_t garbage = region->garbage();
327     size_t live_bytes = region->get_live_data_bytes();
328     total_garbage += garbage;
329     live_data += live_bytes;
330 
331     // Only place regular regions into the candidate set
332     if (region->is_regular()) {
333       if (!region->has_live()) {
334         assert(!region->is_pinned(), "Pinned region should have live (pinned) objects.");
335         region->make_trash_immediate();
336         immediate_regions++;
337         immediate_garbage += garbage;
338       } else {
339         region->begin_preemptible_coalesce_and_fill();
340         candidates[cand_idx]._region = region;
341         candidates[cand_idx]._u._live_data = live_bytes;
342         cand_idx++;
343       }
344     } else if (region->is_humongous_start()) {
345       if (!region->has_live()) {
346         assert(!region->is_pinned(), "Pinned region should have live (pinned) objects.");
347         // The humongous object is dead, we can just return this region and the continuations
348         // immediately to the freeset - no evacuations are necessary here. The continuations
349         // will be made into trash by this method, so they'll be skipped by the 'is_regular'
350         // check above, but we still need to count the start region.
351         immediate_regions++;
352         immediate_garbage += garbage;
353         size_t region_count = heap->trash_humongous_region_at(region);
354         log_debug(gc)("Trashed " SIZE_FORMAT " regions for humongous object.", region_count);
355       }
356     } else if (region->is_trash()) {
357       // Count humongous objects made into trash here.
358       immediate_regions++;
359       immediate_garbage += garbage;
360     }
361   }
362 
363   _old_generation->set_live_bytes_after_last_mark(live_data);
364 
365   // TODO: Consider not running mixed collects if we recovered some threshold percentage of memory from immediate garbage.
366   // This would be similar to young and global collections shortcutting evacuation, though we'd probably want a separate
367   // threshold for the old generation.
368 
369   // Unlike young, we are more interested in efficiently packing OLD-gen than in reclaiming garbage first.  We sort by live-data.
370   // Some regular regions may have been promoted in place with no garbage but also with very little live data.  When we "compact"
371   // old-gen, we want to pack these underutilized regions together so we can have more unaffiliated (unfragmented) free regions
372   // in old-gen.
373 
374   QuickSort::sort<RegionData>(candidates, cand_idx, compare_by_live, false);
375 
376   // Any old-gen region that contains (ShenandoahOldGarbageThreshold (default value 25)% garbage or more is to be
377   // added to the list of candidates for subsequent mixed evacuations.
378   //
379   // TODO: allow ShenandoahOldGarbageThreshold to be determined adaptively, by heuristics.
380 
381   const size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
382 
383   // The convention is to collect regions that have more than this amount of garbage.
384   const size_t garbage_threshold = region_size_bytes * ShenandoahOldGarbageThreshold / 100;
385 
386   // Enlightened interpretation: collect regions that have less than this amount of live.
387   const size_t live_threshold = region_size_bytes - garbage_threshold;
388 
389   size_t candidates_garbage = 0;
390   _last_old_region = (uint)cand_idx;
391   _last_old_collection_candidate = (uint)cand_idx;
392   _next_old_collection_candidate = 0;
393 
394   size_t unfragmented = 0;
395 
396   for (size_t i = 0; i < cand_idx; i++) {
397     size_t live = candidates[i]._u._live_data;
398     if (live > live_threshold) {
399       // Candidates are sorted in increasing order of live data, so no regions after this will be below the threshold.
400       _last_old_collection_candidate = (uint)i;
401       break;
402     }
403     size_t region_garbage = candidates[i]._region->garbage();
404     size_t region_free = candidates[i]._region->free();
405     candidates_garbage += region_garbage;
406     unfragmented += region_free;
407   }
408 
409   size_t defrag_count = 0;
410   if (cand_idx > _last_old_collection_candidate) {
411     // Above, we have added into the set of mixed-evacuation candidates all old-gen regions for which the live memory
412     // that they contain is below a particular old-garbage threshold.  Regions that were not selected for the collection
413     // set hold enough live memory that it is not considered efficient (by "garbage-first standards") to compact these
414     // at the current time.
415     //
416     // However, if any of these regions that were rejected from the collection set reside within areas of memory that
417     // might interfere with future humongous allocation requests, we will prioritize them for evacuation at this time.
418     // Humongous allocations target the bottom of the heap.  We want old-gen regions to congregate at the top of the
419     // heap.
420     //
421     // Sort the regions that were initially rejected from the collection set in order of index.  This allows us to
422     // focus our attention on the regions that have low index value (i.e. the old-gen regions at the bottom of the heap).
423     QuickSort::sort<RegionData>(candidates + _last_old_collection_candidate, cand_idx - _last_old_collection_candidate,
424                                 compare_by_index, false);
425 
426     size_t first_unselected_old_region = candidates[_last_old_collection_candidate]._region->index();
427     size_t last_unselected_old_region = candidates[cand_idx - 1]._region->index();
428     size_t span_of_uncollected_regions = 1 + last_unselected_old_region - first_unselected_old_region;
429     size_t total_uncollected_old_regions = cand_idx - _last_old_collection_candidate;
430 
431     // Add no more than 1/8 of the existing old-gen regions to the set of mixed evacuation candidates.
432     const int MAX_FRACTION_OF_HUMONGOUS_DEFRAG_REGIONS = 8;
433     size_t bound_on_additional_regions = cand_idx / MAX_FRACTION_OF_HUMONGOUS_DEFRAG_REGIONS;
434 
435     // The heuristic old_is_fragmented trigger may be seeking to achieve up to 7/8 density.  Allow ourselves to overshoot
436     // that target (at 15/16) so we will not have to do another defragmenting old collection right away.
437     while ((defrag_count < bound_on_additional_regions) &&
438            (total_uncollected_old_regions < 15 * span_of_uncollected_regions / 16)) {
439       ShenandoahHeapRegion* r = candidates[_last_old_collection_candidate]._region;
440       assert (r->is_regular(), "Only regular regions are in the candidate set");
441       size_t region_garbage = candidates[_last_old_collection_candidate]._region->garbage();
442       size_t region_free = r->free();
443       candidates_garbage += region_garbage;
444       unfragmented += region_free;
445       defrag_count++;
446       _last_old_collection_candidate++;
447 
448       // We now have one fewer uncollected regions, and our uncollected span shrinks because we have removed its first region.
449       total_uncollected_old_regions--;
450       span_of_uncollected_regions = 1 + last_unselected_old_region - candidates[_last_old_collection_candidate]._region->index();
451     }
452   }
453 
454   // Note that we do not coalesce and fill occupied humongous regions
455   // HR: humongous regions, RR: regular regions, CF: coalesce and fill regions
456   size_t collectable_garbage = immediate_garbage + candidates_garbage;
457   size_t old_candidates = _last_old_collection_candidate;
458   size_t mixed_evac_live = old_candidates * region_size_bytes - (candidates_garbage + unfragmented);
459   set_unprocessed_old_collection_candidates_live_memory(mixed_evac_live);
460 
461   log_info(gc)("Old-Gen Collectable Garbage: " SIZE_FORMAT "%s "
462                "consolidated with free: " SIZE_FORMAT "%s, over " SIZE_FORMAT " regions (humongous defragmentation: "
463                SIZE_FORMAT " regions), Old-Gen Immediate Garbage: " SIZE_FORMAT "%s over " SIZE_FORMAT " regions.",
464                byte_size_in_proper_unit(collectable_garbage), proper_unit_for_byte_size(collectable_garbage),
465                byte_size_in_proper_unit(unfragmented),        proper_unit_for_byte_size(unfragmented),
466                old_candidates, defrag_count,
467                byte_size_in_proper_unit(immediate_garbage),   proper_unit_for_byte_size(immediate_garbage), immediate_regions);
468 
469   if (unprocessed_old_collection_candidates() > 0) {
470     _old_generation->transition_to(ShenandoahOldGeneration::EVACUATING);
471   } else if (has_coalesce_and_fill_candidates()) {
472     _old_generation->transition_to(ShenandoahOldGeneration::FILLING);
473   } else {
474     _old_generation->transition_to(ShenandoahOldGeneration::WAITING_FOR_BOOTSTRAP);
475   }
476 }
477 
478 size_t ShenandoahOldHeuristics::unprocessed_old_collection_candidates_live_memory() const {
479   return _live_bytes_in_unprocessed_candidates;
480 }
481 
482 void ShenandoahOldHeuristics::set_unprocessed_old_collection_candidates_live_memory(size_t initial_live) {
483   _live_bytes_in_unprocessed_candidates = initial_live;
484 }
485 
486 void ShenandoahOldHeuristics::decrease_unprocessed_old_collection_candidates_live_memory(size_t evacuated_live) {
487   assert(evacuated_live <= _live_bytes_in_unprocessed_candidates, "Cannot evacuate more than was present");
488   _live_bytes_in_unprocessed_candidates -= evacuated_live;
489 }
490 
491 // Used by unit test: test_shenandoahOldHeuristic.cpp
492 uint ShenandoahOldHeuristics::last_old_collection_candidate_index() const {
493   return _last_old_collection_candidate;
494 }
495 
496 uint ShenandoahOldHeuristics::unprocessed_old_collection_candidates() const {
497   return _last_old_collection_candidate - _next_old_collection_candidate;
498 }
499 
500 ShenandoahHeapRegion* ShenandoahOldHeuristics::next_old_collection_candidate() {
501   while (_next_old_collection_candidate < _last_old_collection_candidate) {
502     ShenandoahHeapRegion* next = _region_data[_next_old_collection_candidate]._region;
503     if (!next->is_pinned()) {
504       return next;
505     } else {
506       assert(next->is_pinned(), "sanity");
507       if (_first_pinned_candidate == NOT_FOUND) {
508         _first_pinned_candidate = _next_old_collection_candidate;
509       }
510     }
511 
512     _next_old_collection_candidate++;
513   }
514   return nullptr;
515 }
516 
517 void ShenandoahOldHeuristics::consume_old_collection_candidate() {
518   _next_old_collection_candidate++;
519 }
520 
521 unsigned int ShenandoahOldHeuristics::get_coalesce_and_fill_candidates(ShenandoahHeapRegion** buffer) {
522   uint end = _last_old_region;
523   uint index = _next_old_collection_candidate;
524   while (index < end) {
525     *buffer++ = _region_data[index++]._region;
526   }
527   return (_last_old_region - _next_old_collection_candidate);
528 }
529 
530 void ShenandoahOldHeuristics::abandon_collection_candidates() {
531   _last_old_collection_candidate = 0;
532   _next_old_collection_candidate = 0;
533   _last_old_region = 0;
534 }
535 
536 void ShenandoahOldHeuristics::record_cycle_end() {
537   this->ShenandoahHeuristics::record_cycle_end();
538   clear_triggers();
539 }
540 
541 void ShenandoahOldHeuristics::clear_triggers() {
542   // Clear any triggers that were set during mixed evacuations.  Conditions may be different now that this phase has finished.
543   _cannot_expand_trigger = false;
544   _fragmentation_trigger = false;
545   _growth_trigger = false;
546  }
547 
548 bool ShenandoahOldHeuristics::should_start_gc() {
549   // Cannot start a new old-gen GC until previous one has finished.
550   //
551   // Future refinement: under certain circumstances, we might be more sophisticated about this choice.
552   // For example, we could choose to abandon the previous old collection before it has completed evacuations.
553   ShenandoahHeap* heap = ShenandoahHeap::heap();
554   if (!_old_generation->can_start_gc() || heap->collection_set()->has_old_regions()) {
555     return false;
556   }
557 
558   if (_cannot_expand_trigger) {
559     size_t old_gen_capacity = _old_generation->max_capacity();
560     size_t heap_capacity = heap->capacity();
561     double percent = percent_of(old_gen_capacity, heap_capacity);
562     log_info(gc)("Trigger (OLD): Expansion failure, current size: " SIZE_FORMAT "%s which is %.1f%% of total heap size",
563                  byte_size_in_proper_unit(old_gen_capacity), proper_unit_for_byte_size(old_gen_capacity), percent);
564     return true;
565   }
566 
567   if (_fragmentation_trigger) {
568     size_t used = _old_generation->used();
569     size_t used_regions_size = _old_generation->used_regions_size();
570 
571     // used_regions includes humongous regions
572     size_t used_regions = _old_generation->used_regions();
573     assert(used_regions_size > used_regions, "Cannot have more used than used regions");
574 
575     size_t first_old_region, last_old_region;
576     double density;
577     get_fragmentation_trigger_reason_for_log_message(density, first_old_region, last_old_region);
578     size_t span_of_old_regions = (last_old_region >= first_old_region)? last_old_region + 1 - first_old_region: 0;
579     size_t fragmented_free = used_regions_size - used;
580 
581     log_info(gc)("Trigger (OLD): Old has become fragmented: "
582                  SIZE_FORMAT "%s available bytes spread between range spanned from "
583                  SIZE_FORMAT " to " SIZE_FORMAT " (" SIZE_FORMAT "), density: %.1f%%",
584                  byte_size_in_proper_unit(fragmented_free), proper_unit_for_byte_size(fragmented_free),
585                  first_old_region, last_old_region, span_of_old_regions, density * 100);
586     return true;
587   }
588 
589   if (_growth_trigger) {
590     // Growth may be falsely triggered during mixed evacuations, before the mixed-evacuation candidates have been
591     // evacuated.  Before acting on a false trigger, we check to confirm the trigger condition is still satisfied.
592     size_t current_usage = _old_generation->used();
593     size_t trigger_threshold = _old_generation->usage_trigger_threshold();
594     size_t heap_size = heap->capacity();
595     size_t consecutive_young_cycles;
596     size_t ignore_threshold = (ShenandoahIgnoreOldGrowthBelowPercentage * heap_size) / 100;
597     if ((current_usage < ignore_threshold) &&
598         ((consecutive_young_cycles = heap->shenandoah_policy()->consecutive_young_gc_count())
599          < ShenandoahDoNotIgnoreGrowthAfterYoungCycles)) {
600       log_debug(gc)("Ignoring Trigger (OLD): Old has overgrown: usage (" SIZE_FORMAT "%s) is below threshold ("
601                     SIZE_FORMAT "%s) after " SIZE_FORMAT " consecutive completed young GCs",
602                     byte_size_in_proper_unit(current_usage), proper_unit_for_byte_size(current_usage),
603                     byte_size_in_proper_unit(ignore_threshold), proper_unit_for_byte_size(ignore_threshold),
604                     consecutive_young_cycles);
605       _growth_trigger = false;
606     } else if (current_usage > trigger_threshold) {
607       size_t live_at_previous_old = _old_generation->get_live_bytes_after_last_mark();
608       double percent_growth = percent_of(current_usage - live_at_previous_old, live_at_previous_old);
609       log_info(gc)("Trigger (OLD): Old has overgrown, live at end of previous OLD marking: "
610                    SIZE_FORMAT "%s, current usage: " SIZE_FORMAT "%s, percent growth: %.1f%%",
611                    byte_size_in_proper_unit(live_at_previous_old), proper_unit_for_byte_size(live_at_previous_old),
612                    byte_size_in_proper_unit(current_usage), proper_unit_for_byte_size(current_usage), percent_growth);
613       return true;
614     } else {
615       _growth_trigger = false;
616     }
617   }
618 
619   // Otherwise, defer to inherited heuristic for gc trigger.
620   return this->ShenandoahHeuristics::should_start_gc();
621 }
622 
623 void ShenandoahOldHeuristics::record_success_concurrent(bool abbreviated) {
624   // Forget any triggers that occurred while OLD GC was ongoing.  If we really need to start another, it will retrigger.
625   clear_triggers();
626   this->ShenandoahHeuristics::record_success_concurrent(abbreviated);
627 }
628 
629 void ShenandoahOldHeuristics::record_success_degenerated() {
630   // Forget any triggers that occurred while OLD GC was ongoing.  If we really need to start another, it will retrigger.
631   clear_triggers();
632   this->ShenandoahHeuristics::record_success_degenerated();
633 }
634 
635 void ShenandoahOldHeuristics::record_success_full() {
636   // Forget any triggers that occurred while OLD GC was ongoing.  If we really need to start another, it will retrigger.
637   clear_triggers();
638   this->ShenandoahHeuristics::record_success_full();
639 }
640 
641 const char* ShenandoahOldHeuristics::name() {
642   return "Old";
643 }
644 
645 bool ShenandoahOldHeuristics::is_diagnostic() {
646   return false;
647 }
648 
649 bool ShenandoahOldHeuristics::is_experimental() {
650   return true;
651 }
652 
653 void ShenandoahOldHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* set,
654                                                                     ShenandoahHeuristics::RegionData* data,
655                                                                     size_t data_size, size_t free) {
656   ShouldNotReachHere();
657 }
658 
659 
660 #undef BYTES_FORMAT
661 #undef FORMAT_BYTES