1 /* 2 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 27 #include "gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp" 28 #include "gc/shenandoah/shenandoahCollectionSet.hpp" 29 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp" 30 #include "gc/shenandoah/shenandoahGenerationalHeap.hpp" 31 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp" 32 #include "gc/shenandoah/shenandoahOldGeneration.hpp" 33 #include "logging/log.hpp" 34 #include "utilities/quickSort.hpp" 35 36 uint ShenandoahOldHeuristics::NOT_FOUND = -1U; 37 38 // sort by increasing live (so least live comes first) 39 int ShenandoahOldHeuristics::compare_by_live(RegionData a, RegionData b) { 40 if (a._u._live_data < b._u._live_data) 41 return -1; 42 else if (a._u._live_data > b._u._live_data) 43 return 1; 44 else return 0; 45 } 46 47 // sort by increasing index 48 int ShenandoahOldHeuristics::compare_by_index(RegionData a, RegionData b) { 49 if (a._region->index() < b._region->index()) { 50 return -1; 51 } else if (a._region->index() > b._region->index()) { 52 return 1; 53 } else { 54 // quicksort may compare to self during search for pivot 55 return 0; 56 } 57 } 58 59 ShenandoahOldHeuristics::ShenandoahOldHeuristics(ShenandoahOldGeneration* generation) : 60 ShenandoahHeuristics(generation), 61 _first_pinned_candidate(NOT_FOUND), 62 _last_old_collection_candidate(0), 63 _next_old_collection_candidate(0), 64 _last_old_region(0), 65 _live_bytes_in_unprocessed_candidates(0), 66 _old_generation(generation), 67 _cannot_expand_trigger(false), 68 _fragmentation_trigger(false), 69 _growth_trigger(false) { 70 } 71 72 bool ShenandoahOldHeuristics::prime_collection_set(ShenandoahCollectionSet* collection_set) { 73 auto heap = ShenandoahGenerationalHeap::heap(); 74 if (unprocessed_old_collection_candidates() == 0) { 75 return false; 76 } 77 78 _first_pinned_candidate = NOT_FOUND; 79 80 uint included_old_regions = 0; 81 size_t evacuated_old_bytes = 0; 82 size_t collected_old_bytes = 0; 83 84 // If a region is put into the collection set, then this region's free (not yet used) bytes are no longer 85 // "available" to hold the results of other evacuations. This may cause a decrease in the remaining amount 86 // of memory that can still be evacuated. We address this by reducing the evacuation budget by the amount 87 // of live memory in that region and by the amount of unallocated memory in that region if the evacuation 88 // budget is constrained by availability of free memory. 89 const size_t old_evacuation_reserve = heap->old_generation()->get_evacuation_reserve(); 90 const size_t old_evacuation_budget = (size_t) ((double) old_evacuation_reserve / ShenandoahOldEvacWaste); 91 size_t unfragmented_available = _old_generation->free_unaffiliated_regions() * ShenandoahHeapRegion::region_size_bytes(); 92 size_t fragmented_available; 93 size_t excess_fragmented_available; 94 95 if (unfragmented_available > old_evacuation_budget) { 96 unfragmented_available = old_evacuation_budget; 97 fragmented_available = 0; 98 excess_fragmented_available = 0; 99 } else { 100 assert(_old_generation->available() >= old_evacuation_budget, "Cannot budget more than is available"); 101 fragmented_available = _old_generation->available() - unfragmented_available; 102 assert(fragmented_available + unfragmented_available >= old_evacuation_budget, "Budgets do not add up"); 103 if (fragmented_available + unfragmented_available > old_evacuation_budget) { 104 excess_fragmented_available = (fragmented_available + unfragmented_available) - old_evacuation_budget; 105 fragmented_available -= excess_fragmented_available; 106 } 107 } 108 109 size_t remaining_old_evacuation_budget = old_evacuation_budget; 110 log_info(gc)("Choose old regions for mixed collection: old evacuation budget: " SIZE_FORMAT "%s, candidates: %u", 111 byte_size_in_proper_unit(old_evacuation_budget), proper_unit_for_byte_size(old_evacuation_budget), 112 unprocessed_old_collection_candidates()); 113 114 size_t lost_evacuation_capacity = 0; 115 116 // The number of old-gen regions that were selected as candidates for collection at the end of the most recent old-gen 117 // concurrent marking phase and have not yet been collected is represented by unprocessed_old_collection_candidates(). 118 // Candidate regions are ordered according to increasing amount of live data. If there is not sufficient room to 119 // evacuate region N, then there is no need to even consider evacuating region N+1. 120 while (unprocessed_old_collection_candidates() > 0) { 121 // Old collection candidates are sorted in order of decreasing garbage contained therein. 122 ShenandoahHeapRegion* r = next_old_collection_candidate(); 123 if (r == nullptr) { 124 break; 125 } 126 assert(r->is_regular(), "There should be no humongous regions in the set of mixed-evac candidates"); 127 128 // If region r is evacuated to fragmented memory (to free memory within a partially used region), then we need 129 // to decrease the capacity of the fragmented memory by the scaled loss. 130 131 size_t live_data_for_evacuation = r->get_live_data_bytes(); 132 size_t lost_available = r->free(); 133 134 if ((lost_available > 0) && (excess_fragmented_available > 0)) { 135 if (lost_available < excess_fragmented_available) { 136 excess_fragmented_available -= lost_available; 137 lost_evacuation_capacity -= lost_available; 138 lost_available = 0; 139 } else { 140 lost_available -= excess_fragmented_available; 141 lost_evacuation_capacity -= excess_fragmented_available; 142 excess_fragmented_available = 0; 143 } 144 } 145 size_t scaled_loss = (size_t) ((double) lost_available / ShenandoahOldEvacWaste); 146 if ((lost_available > 0) && (fragmented_available > 0)) { 147 if (scaled_loss + live_data_for_evacuation < fragmented_available) { 148 fragmented_available -= scaled_loss; 149 scaled_loss = 0; 150 } else { 151 // We will have to allocate this region's evacuation memory from unfragmented memory, so don't bother 152 // to decrement scaled_loss 153 } 154 } 155 if (scaled_loss > 0) { 156 // We were not able to account for the lost free memory within fragmented memory, so we need to take this 157 // allocation out of unfragmented memory. Unfragmented memory does not need to account for loss of free. 158 if (live_data_for_evacuation > unfragmented_available) { 159 // There is not room to evacuate this region or any that come after it in within the candidates array. 160 break; 161 } else { 162 unfragmented_available -= live_data_for_evacuation; 163 } 164 } else { 165 // Since scaled_loss == 0, we have accounted for the loss of free memory, so we can allocate from either 166 // fragmented or unfragmented available memory. Use up the fragmented memory budget first. 167 size_t evacuation_need = live_data_for_evacuation; 168 169 if (evacuation_need > fragmented_available) { 170 evacuation_need -= fragmented_available; 171 fragmented_available = 0; 172 } else { 173 fragmented_available -= evacuation_need; 174 evacuation_need = 0; 175 } 176 if (evacuation_need > unfragmented_available) { 177 // There is not room to evacuate this region or any that come after it in within the candidates array. 178 break; 179 } else { 180 unfragmented_available -= evacuation_need; 181 // dead code: evacuation_need == 0; 182 } 183 } 184 collection_set->add_region(r); 185 included_old_regions++; 186 evacuated_old_bytes += live_data_for_evacuation; 187 collected_old_bytes += r->garbage(); 188 consume_old_collection_candidate(); 189 } 190 191 if (_first_pinned_candidate != NOT_FOUND) { 192 // Need to deal with pinned regions 193 slide_pinned_regions_to_front(); 194 } 195 decrease_unprocessed_old_collection_candidates_live_memory(evacuated_old_bytes); 196 if (included_old_regions > 0) { 197 log_info(gc)("Old-gen piggyback evac (" UINT32_FORMAT " regions, evacuating " SIZE_FORMAT "%s, reclaiming: " SIZE_FORMAT "%s)", 198 included_old_regions, 199 byte_size_in_proper_unit(evacuated_old_bytes), proper_unit_for_byte_size(evacuated_old_bytes), 200 byte_size_in_proper_unit(collected_old_bytes), proper_unit_for_byte_size(collected_old_bytes)); 201 } 202 203 if (unprocessed_old_collection_candidates() == 0) { 204 // We have added the last of our collection candidates to a mixed collection. 205 // Any triggers that occurred during mixed evacuations may no longer be valid. They can retrigger if appropriate. 206 clear_triggers(); 207 if (has_coalesce_and_fill_candidates()) { 208 _old_generation->transition_to(ShenandoahOldGeneration::FILLING); 209 } else { 210 _old_generation->transition_to(ShenandoahOldGeneration::WAITING_FOR_BOOTSTRAP); 211 } 212 } else if (included_old_regions == 0) { 213 // We have candidates, but none were included for evacuation - are they all pinned? 214 // or did we just not have enough room for any of them in this collection set? 215 // We don't want a region with a stuck pin to prevent subsequent old collections, so 216 // if they are all pinned we transition to a state that will allow us to make these uncollected 217 // (pinned) regions parsable. 218 if (all_candidates_are_pinned()) { 219 log_info(gc)("All candidate regions " UINT32_FORMAT " are pinned", unprocessed_old_collection_candidates()); 220 _old_generation->transition_to(ShenandoahOldGeneration::FILLING); 221 } else { 222 log_info(gc)("No regions selected for mixed collection. " 223 "Old evacuation budget: " PROPERFMT ", Remaining evacuation budget: " PROPERFMT 224 ", Lost capacity: " PROPERFMT 225 ", Next candidate: " UINT32_FORMAT ", Last candidate: " UINT32_FORMAT, 226 PROPERFMTARGS(old_evacuation_reserve), 227 PROPERFMTARGS(remaining_old_evacuation_budget), 228 PROPERFMTARGS(lost_evacuation_capacity), 229 _next_old_collection_candidate, _last_old_collection_candidate); 230 } 231 } 232 233 return (included_old_regions > 0); 234 } 235 236 bool ShenandoahOldHeuristics::all_candidates_are_pinned() { 237 #ifdef ASSERT 238 if (uint(os::random()) % 100 < ShenandoahCoalesceChance) { 239 return true; 240 } 241 #endif 242 243 for (uint i = _next_old_collection_candidate; i < _last_old_collection_candidate; ++i) { 244 ShenandoahHeapRegion* region = _region_data[i]._region; 245 if (!region->is_pinned()) { 246 return false; 247 } 248 } 249 return true; 250 } 251 252 void ShenandoahOldHeuristics::slide_pinned_regions_to_front() { 253 // Find the first unpinned region to the left of the next region that 254 // will be added to the collection set. These regions will have been 255 // added to the cset, so we can use them to hold pointers to regions 256 // that were pinned when the cset was chosen. 257 // [ r p r p p p r r ] 258 // ^ ^ ^ 259 // | | | pointer to next region to add to a mixed collection is here. 260 // | | first r to the left should be in the collection set now. 261 // | first pinned region, we don't need to look past this 262 uint write_index = NOT_FOUND; 263 for (uint search = _next_old_collection_candidate - 1; search > _first_pinned_candidate; --search) { 264 ShenandoahHeapRegion* region = _region_data[search]._region; 265 if (!region->is_pinned()) { 266 write_index = search; 267 assert(region->is_cset(), "Expected unpinned region to be added to the collection set."); 268 break; 269 } 270 } 271 272 // If we could not find an unpinned region, it means there are no slots available 273 // to move up the pinned regions. In this case, we just reset our next index in the 274 // hopes that some of these regions will become unpinned before the next mixed 275 // collection. We may want to bailout of here instead, as it should be quite 276 // rare to have so many pinned regions and may indicate something is wrong. 277 if (write_index == NOT_FOUND) { 278 assert(_first_pinned_candidate != NOT_FOUND, "Should only be here if there are pinned regions."); 279 _next_old_collection_candidate = _first_pinned_candidate; 280 return; 281 } 282 283 // Find pinned regions to the left and move their pointer into a slot 284 // that was pointing at a region that has been added to the cset (or was pointing 285 // to a pinned region that we've already moved up). We are done when the leftmost 286 // pinned region has been slid up. 287 // [ r p r x p p p r ] 288 // ^ ^ 289 // | | next region for mixed collections 290 // | Write pointer is here. We know this region is already in the cset 291 // | so we can clobber it with the next pinned region we find. 292 for (int32_t search = (int32_t)write_index - 1; search >= (int32_t)_first_pinned_candidate; --search) { 293 RegionData& skipped = _region_data[search]; 294 if (skipped._region->is_pinned()) { 295 RegionData& available_slot = _region_data[write_index]; 296 available_slot._region = skipped._region; 297 available_slot._u._live_data = skipped._u._live_data; 298 --write_index; 299 } 300 } 301 302 // Update to read from the leftmost pinned region. Plus one here because we decremented 303 // the write index to hold the next found pinned region. We are just moving it back now 304 // to point to the first pinned region. 305 _next_old_collection_candidate = write_index + 1; 306 } 307 308 void ShenandoahOldHeuristics::prepare_for_old_collections() { 309 ShenandoahHeap* heap = ShenandoahHeap::heap(); 310 311 const size_t num_regions = heap->num_regions(); 312 size_t cand_idx = 0; 313 size_t total_garbage = 0; 314 size_t immediate_garbage = 0; 315 size_t immediate_regions = 0; 316 size_t live_data = 0; 317 318 RegionData* candidates = _region_data; 319 for (size_t i = 0; i < num_regions; i++) { 320 ShenandoahHeapRegion* region = heap->get_region(i); 321 if (!_old_generation->contains(region)) { 322 continue; 323 } 324 325 size_t garbage = region->garbage(); 326 size_t live_bytes = region->get_live_data_bytes(); 327 total_garbage += garbage; 328 live_data += live_bytes; 329 330 // Only place regular regions into the candidate set 331 if (region->is_regular()) { 332 if (!region->has_live()) { 333 assert(!region->is_pinned(), "Pinned region should have live (pinned) objects."); 334 region->make_trash_immediate(); 335 immediate_regions++; 336 immediate_garbage += garbage; 337 } else { 338 region->begin_preemptible_coalesce_and_fill(); 339 candidates[cand_idx]._region = region; 340 candidates[cand_idx]._u._live_data = live_bytes; 341 cand_idx++; 342 } 343 } else if (region->is_humongous_start()) { 344 if (!region->has_live()) { 345 assert(!region->is_pinned(), "Pinned region should have live (pinned) objects."); 346 // The humongous object is dead, we can just return this region and the continuations 347 // immediately to the freeset - no evacuations are necessary here. The continuations 348 // will be made into trash by this method, so they'll be skipped by the 'is_regular' 349 // check above, but we still need to count the start region. 350 immediate_regions++; 351 immediate_garbage += garbage; 352 size_t region_count = heap->trash_humongous_region_at(region); 353 log_debug(gc)("Trashed " SIZE_FORMAT " regions for humongous object.", region_count); 354 } 355 } else if (region->is_trash()) { 356 // Count humongous objects made into trash here. 357 immediate_regions++; 358 immediate_garbage += garbage; 359 } 360 } 361 362 _old_generation->set_live_bytes_after_last_mark(live_data); 363 364 // TODO: Consider not running mixed collects if we recovered some threshold percentage of memory from immediate garbage. 365 // This would be similar to young and global collections shortcutting evacuation, though we'd probably want a separate 366 // threshold for the old generation. 367 368 // Unlike young, we are more interested in efficiently packing OLD-gen than in reclaiming garbage first. We sort by live-data. 369 // Some regular regions may have been promoted in place with no garbage but also with very little live data. When we "compact" 370 // old-gen, we want to pack these underutilized regions together so we can have more unaffiliated (unfragmented) free regions 371 // in old-gen. 372 373 QuickSort::sort<RegionData>(candidates, cand_idx, compare_by_live, false); 374 375 // Any old-gen region that contains (ShenandoahOldGarbageThreshold (default value 25)% garbage or more is to be 376 // added to the list of candidates for subsequent mixed evacuations. 377 // 378 // TODO: allow ShenandoahOldGarbageThreshold to be determined adaptively, by heuristics. 379 380 const size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes(); 381 382 // The convention is to collect regions that have more than this amount of garbage. 383 const size_t garbage_threshold = region_size_bytes * ShenandoahOldGarbageThreshold / 100; 384 385 // Enlightened interpretation: collect regions that have less than this amount of live. 386 const size_t live_threshold = region_size_bytes - garbage_threshold; 387 388 size_t candidates_garbage = 0; 389 _last_old_region = (uint)cand_idx; 390 _last_old_collection_candidate = (uint)cand_idx; 391 _next_old_collection_candidate = 0; 392 393 size_t unfragmented = 0; 394 395 for (size_t i = 0; i < cand_idx; i++) { 396 size_t live = candidates[i]._u._live_data; 397 if (live > live_threshold) { 398 // Candidates are sorted in increasing order of live data, so no regions after this will be below the threshold. 399 _last_old_collection_candidate = (uint)i; 400 break; 401 } 402 size_t region_garbage = candidates[i]._region->garbage(); 403 size_t region_free = candidates[i]._region->free(); 404 candidates_garbage += region_garbage; 405 unfragmented += region_free; 406 } 407 408 size_t defrag_count = 0; 409 if (cand_idx > _last_old_collection_candidate) { 410 // Above, we have added into the set of mixed-evacuation candidates all old-gen regions for which the live memory 411 // that they contain is below a particular old-garbage threshold. Regions that were not selected for the collection 412 // set hold enough live memory that it is not considered efficient (by "garbage-first standards") to compact these 413 // at the current time. 414 // 415 // However, if any of these regions that were rejected from the collection set reside within areas of memory that 416 // might interfere with future humongous allocation requests, we will prioritize them for evacuation at this time. 417 // Humongous allocations target the bottom of the heap. We want old-gen regions to congregate at the top of the 418 // heap. 419 // 420 // Sort the regions that were initially rejected from the collection set in order of index. This allows us to 421 // focus our attention on the regions that have low index value (i.e. the old-gen regions at the bottom of the heap). 422 QuickSort::sort<RegionData>(candidates + _last_old_collection_candidate, cand_idx - _last_old_collection_candidate, 423 compare_by_index, false); 424 425 const size_t first_unselected_old_region = candidates[_last_old_collection_candidate]._region->index(); 426 const size_t last_unselected_old_region = candidates[cand_idx - 1]._region->index(); 427 size_t span_of_uncollected_regions = 1 + last_unselected_old_region - first_unselected_old_region; 428 size_t total_uncollected_old_regions = cand_idx - _last_old_collection_candidate; 429 430 // Add no more than 1/8 of the existing old-gen regions to the set of mixed evacuation candidates. 431 const int MAX_FRACTION_OF_HUMONGOUS_DEFRAG_REGIONS = 8; 432 const size_t bound_on_additional_regions = cand_idx / MAX_FRACTION_OF_HUMONGOUS_DEFRAG_REGIONS; 433 434 // The heuristic old_is_fragmented trigger may be seeking to achieve up to 7/8 density. Allow ourselves to overshoot 435 // that target (at 15/16) so we will not have to do another defragmenting old collection right away. 436 while ((defrag_count < bound_on_additional_regions) && 437 (total_uncollected_old_regions < 15 * span_of_uncollected_regions / 16)) { 438 ShenandoahHeapRegion* r = candidates[_last_old_collection_candidate]._region; 439 assert (r->is_regular(), "Only regular regions are in the candidate set"); 440 const size_t region_garbage = candidates[_last_old_collection_candidate]._region->garbage(); 441 const size_t region_free = r->free(); 442 candidates_garbage += region_garbage; 443 unfragmented += region_free; 444 defrag_count++; 445 _last_old_collection_candidate++; 446 447 // We now have one fewer uncollected regions, and our uncollected span shrinks because we have removed its first region. 448 total_uncollected_old_regions--; 449 span_of_uncollected_regions = 1 + last_unselected_old_region - candidates[_last_old_collection_candidate]._region->index(); 450 } 451 } 452 453 // Note that we do not coalesce and fill occupied humongous regions 454 // HR: humongous regions, RR: regular regions, CF: coalesce and fill regions 455 const size_t collectable_garbage = immediate_garbage + candidates_garbage; 456 const size_t old_candidates = _last_old_collection_candidate; 457 const size_t mixed_evac_live = old_candidates * region_size_bytes - (candidates_garbage + unfragmented); 458 set_unprocessed_old_collection_candidates_live_memory(mixed_evac_live); 459 460 log_info(gc)("Old-Gen Collectable Garbage: " SIZE_FORMAT "%s " 461 "consolidated with free: " SIZE_FORMAT "%s, over " SIZE_FORMAT " regions (humongous defragmentation: " 462 SIZE_FORMAT " regions), Old-Gen Immediate Garbage: " SIZE_FORMAT "%s over " SIZE_FORMAT " regions.", 463 byte_size_in_proper_unit(collectable_garbage), proper_unit_for_byte_size(collectable_garbage), 464 byte_size_in_proper_unit(unfragmented), proper_unit_for_byte_size(unfragmented), 465 old_candidates, defrag_count, 466 byte_size_in_proper_unit(immediate_garbage), proper_unit_for_byte_size(immediate_garbage), immediate_regions); 467 468 if (unprocessed_old_collection_candidates() > 0) { 469 _old_generation->transition_to(ShenandoahOldGeneration::EVACUATING); 470 } else if (has_coalesce_and_fill_candidates()) { 471 _old_generation->transition_to(ShenandoahOldGeneration::FILLING); 472 } else { 473 _old_generation->transition_to(ShenandoahOldGeneration::WAITING_FOR_BOOTSTRAP); 474 } 475 } 476 477 size_t ShenandoahOldHeuristics::unprocessed_old_collection_candidates_live_memory() const { 478 return _live_bytes_in_unprocessed_candidates; 479 } 480 481 void ShenandoahOldHeuristics::set_unprocessed_old_collection_candidates_live_memory(size_t initial_live) { 482 _live_bytes_in_unprocessed_candidates = initial_live; 483 } 484 485 void ShenandoahOldHeuristics::decrease_unprocessed_old_collection_candidates_live_memory(size_t evacuated_live) { 486 assert(evacuated_live <= _live_bytes_in_unprocessed_candidates, "Cannot evacuate more than was present"); 487 _live_bytes_in_unprocessed_candidates -= evacuated_live; 488 } 489 490 // Used by unit test: test_shenandoahOldHeuristic.cpp 491 uint ShenandoahOldHeuristics::last_old_collection_candidate_index() const { 492 return _last_old_collection_candidate; 493 } 494 495 uint ShenandoahOldHeuristics::unprocessed_old_collection_candidates() const { 496 return _last_old_collection_candidate - _next_old_collection_candidate; 497 } 498 499 ShenandoahHeapRegion* ShenandoahOldHeuristics::next_old_collection_candidate() { 500 while (_next_old_collection_candidate < _last_old_collection_candidate) { 501 ShenandoahHeapRegion* next = _region_data[_next_old_collection_candidate]._region; 502 if (!next->is_pinned()) { 503 return next; 504 } else { 505 assert(next->is_pinned(), "sanity"); 506 if (_first_pinned_candidate == NOT_FOUND) { 507 _first_pinned_candidate = _next_old_collection_candidate; 508 } 509 } 510 511 _next_old_collection_candidate++; 512 } 513 return nullptr; 514 } 515 516 void ShenandoahOldHeuristics::consume_old_collection_candidate() { 517 _next_old_collection_candidate++; 518 } 519 520 unsigned int ShenandoahOldHeuristics::get_coalesce_and_fill_candidates(ShenandoahHeapRegion** buffer) { 521 uint end = _last_old_region; 522 uint index = _next_old_collection_candidate; 523 while (index < end) { 524 *buffer++ = _region_data[index++]._region; 525 } 526 return (_last_old_region - _next_old_collection_candidate); 527 } 528 529 void ShenandoahOldHeuristics::abandon_collection_candidates() { 530 _last_old_collection_candidate = 0; 531 _next_old_collection_candidate = 0; 532 _last_old_region = 0; 533 } 534 535 void ShenandoahOldHeuristics::record_cycle_end() { 536 this->ShenandoahHeuristics::record_cycle_end(); 537 clear_triggers(); 538 } 539 540 void ShenandoahOldHeuristics::clear_triggers() { 541 // Clear any triggers that were set during mixed evacuations. Conditions may be different now that this phase has finished. 542 _cannot_expand_trigger = false; 543 _fragmentation_trigger = false; 544 _growth_trigger = false; 545 } 546 547 bool ShenandoahOldHeuristics::should_start_gc() { 548 // Cannot start a new old-gen GC until previous one has finished. 549 // 550 // Future refinement: under certain circumstances, we might be more sophisticated about this choice. 551 // For example, we could choose to abandon the previous old collection before it has completed evacuations. 552 ShenandoahHeap* heap = ShenandoahHeap::heap(); 553 if (!_old_generation->can_start_gc() || heap->collection_set()->has_old_regions()) { 554 return false; 555 } 556 557 if (_cannot_expand_trigger) { 558 const size_t old_gen_capacity = _old_generation->max_capacity(); 559 const size_t heap_capacity = heap->capacity(); 560 const double percent = percent_of(old_gen_capacity, heap_capacity); 561 log_info(gc)("Trigger (OLD): Expansion failure, current size: " SIZE_FORMAT "%s which is %.1f%% of total heap size", 562 byte_size_in_proper_unit(old_gen_capacity), proper_unit_for_byte_size(old_gen_capacity), percent); 563 return true; 564 } 565 566 if (_fragmentation_trigger) { 567 const size_t used = _old_generation->used(); 568 const size_t used_regions_size = _old_generation->used_regions_size(); 569 570 // used_regions includes humongous regions 571 const size_t used_regions = _old_generation->used_regions(); 572 assert(used_regions_size > used_regions, "Cannot have more used than used regions"); 573 574 size_t first_old_region, last_old_region; 575 double density; 576 get_fragmentation_trigger_reason_for_log_message(density, first_old_region, last_old_region); 577 const size_t span_of_old_regions = (last_old_region >= first_old_region)? last_old_region + 1 - first_old_region: 0; 578 const size_t fragmented_free = used_regions_size - used; 579 580 log_info(gc)("Trigger (OLD): Old has become fragmented: " 581 SIZE_FORMAT "%s available bytes spread between range spanned from " 582 SIZE_FORMAT " to " SIZE_FORMAT " (" SIZE_FORMAT "), density: %.1f%%", 583 byte_size_in_proper_unit(fragmented_free), proper_unit_for_byte_size(fragmented_free), 584 first_old_region, last_old_region, span_of_old_regions, density * 100); 585 return true; 586 } 587 588 if (_growth_trigger) { 589 // Growth may be falsely triggered during mixed evacuations, before the mixed-evacuation candidates have been 590 // evacuated. Before acting on a false trigger, we check to confirm the trigger condition is still satisfied. 591 const size_t current_usage = _old_generation->used(); 592 const size_t trigger_threshold = _old_generation->usage_trigger_threshold(); 593 const size_t heap_size = heap->capacity(); 594 const size_t ignore_threshold = (ShenandoahIgnoreOldGrowthBelowPercentage * heap_size) / 100; 595 size_t consecutive_young_cycles; 596 if ((current_usage < ignore_threshold) && 597 ((consecutive_young_cycles = heap->shenandoah_policy()->consecutive_young_gc_count()) 598 < ShenandoahDoNotIgnoreGrowthAfterYoungCycles)) { 599 log_debug(gc)("Ignoring Trigger (OLD): Old has overgrown: usage (" SIZE_FORMAT "%s) is below threshold (" 600 SIZE_FORMAT "%s) after " SIZE_FORMAT " consecutive completed young GCs", 601 byte_size_in_proper_unit(current_usage), proper_unit_for_byte_size(current_usage), 602 byte_size_in_proper_unit(ignore_threshold), proper_unit_for_byte_size(ignore_threshold), 603 consecutive_young_cycles); 604 _growth_trigger = false; 605 } else if (current_usage > trigger_threshold) { 606 const size_t live_at_previous_old = _old_generation->get_live_bytes_after_last_mark(); 607 const double percent_growth = percent_of(current_usage - live_at_previous_old, live_at_previous_old); 608 log_info(gc)("Trigger (OLD): Old has overgrown, live at end of previous OLD marking: " 609 SIZE_FORMAT "%s, current usage: " SIZE_FORMAT "%s, percent growth: %.1f%%", 610 byte_size_in_proper_unit(live_at_previous_old), proper_unit_for_byte_size(live_at_previous_old), 611 byte_size_in_proper_unit(current_usage), proper_unit_for_byte_size(current_usage), percent_growth); 612 return true; 613 } else { 614 _growth_trigger = false; 615 } 616 } 617 618 // Otherwise, defer to inherited heuristic for gc trigger. 619 return this->ShenandoahHeuristics::should_start_gc(); 620 } 621 622 void ShenandoahOldHeuristics::record_success_concurrent(bool abbreviated) { 623 // Forget any triggers that occurred while OLD GC was ongoing. If we really need to start another, it will retrigger. 624 clear_triggers(); 625 this->ShenandoahHeuristics::record_success_concurrent(abbreviated); 626 } 627 628 void ShenandoahOldHeuristics::record_success_degenerated() { 629 // Forget any triggers that occurred while OLD GC was ongoing. If we really need to start another, it will retrigger. 630 clear_triggers(); 631 this->ShenandoahHeuristics::record_success_degenerated(); 632 } 633 634 void ShenandoahOldHeuristics::record_success_full() { 635 // Forget any triggers that occurred while OLD GC was ongoing. If we really need to start another, it will retrigger. 636 clear_triggers(); 637 this->ShenandoahHeuristics::record_success_full(); 638 } 639 640 const char* ShenandoahOldHeuristics::name() { 641 return "Old"; 642 } 643 644 bool ShenandoahOldHeuristics::is_diagnostic() { 645 return false; 646 } 647 648 bool ShenandoahOldHeuristics::is_experimental() { 649 return true; 650 } 651 652 void ShenandoahOldHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* set, 653 ShenandoahHeuristics::RegionData* data, 654 size_t data_size, size_t free) { 655 ShouldNotReachHere(); 656 }