1 /* 2 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 27 #include "gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp" 28 #include "gc/shenandoah/shenandoahCollectionSet.hpp" 29 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp" 30 #include "gc/shenandoah/shenandoahGenerationalHeap.hpp" 31 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp" 32 #include "gc/shenandoah/shenandoahOldGeneration.hpp" 33 #include "logging/log.hpp" 34 #include "utilities/quickSort.hpp" 35 36 uint ShenandoahOldHeuristics::NOT_FOUND = -1U; 37 38 // sort by increasing live (so least live comes first) 39 int ShenandoahOldHeuristics::compare_by_live(RegionData a, RegionData b) { 40 if (a._u._live_data < b._u._live_data) 41 return -1; 42 else if (a._u._live_data > b._u._live_data) 43 return 1; 44 else return 0; 45 } 46 47 // sort by increasing index 48 int ShenandoahOldHeuristics::compare_by_index(RegionData a, RegionData b) { 49 if (a._region->index() < b._region->index()) { 50 return -1; 51 } else if (a._region->index() > b._region->index()) { 52 return 1; 53 } else { 54 // quicksort may compare to self during search for pivot 55 return 0; 56 } 57 } 58 59 ShenandoahOldHeuristics::ShenandoahOldHeuristics(ShenandoahOldGeneration* generation, ShenandoahGenerationalHeap* gen_heap) : 60 ShenandoahHeuristics(generation), 61 _heap(gen_heap), 62 _old_gen(generation), 63 _first_pinned_candidate(NOT_FOUND), 64 _last_old_collection_candidate(0), 65 _next_old_collection_candidate(0), 66 _last_old_region(0), 67 _live_bytes_in_unprocessed_candidates(0), 68 _old_generation(generation), 69 _cannot_expand_trigger(false), 70 _fragmentation_trigger(false), 71 _growth_trigger(false), 72 _fragmentation_density(0.0), 73 _fragmentation_first_old_region(0), 74 _fragmentation_last_old_region(0) 75 { 76 } 77 78 bool ShenandoahOldHeuristics::prime_collection_set(ShenandoahCollectionSet* collection_set) { 79 if (unprocessed_old_collection_candidates() == 0) { 80 return false; 81 } 82 83 _first_pinned_candidate = NOT_FOUND; 84 85 uint included_old_regions = 0; 86 size_t evacuated_old_bytes = 0; 87 size_t collected_old_bytes = 0; 88 89 // If a region is put into the collection set, then this region's free (not yet used) bytes are no longer 90 // "available" to hold the results of other evacuations. This may cause a decrease in the remaining amount 91 // of memory that can still be evacuated. We address this by reducing the evacuation budget by the amount 92 // of live memory in that region and by the amount of unallocated memory in that region if the evacuation 93 // budget is constrained by availability of free memory. 94 const size_t old_evacuation_reserve = _old_generation->get_evacuation_reserve(); 95 const size_t old_evacuation_budget = (size_t) ((double) old_evacuation_reserve / ShenandoahOldEvacWaste); 96 size_t unfragmented_available = _old_generation->free_unaffiliated_regions() * ShenandoahHeapRegion::region_size_bytes(); 97 size_t fragmented_available; 98 size_t excess_fragmented_available; 99 100 if (unfragmented_available > old_evacuation_budget) { 101 unfragmented_available = old_evacuation_budget; 102 fragmented_available = 0; 103 excess_fragmented_available = 0; 104 } else { 105 assert(_old_generation->available() >= old_evacuation_budget, "Cannot budget more than is available"); 106 fragmented_available = _old_generation->available() - unfragmented_available; 107 assert(fragmented_available + unfragmented_available >= old_evacuation_budget, "Budgets do not add up"); 108 if (fragmented_available + unfragmented_available > old_evacuation_budget) { 109 excess_fragmented_available = (fragmented_available + unfragmented_available) - old_evacuation_budget; 110 fragmented_available -= excess_fragmented_available; 111 } 112 } 113 114 size_t remaining_old_evacuation_budget = old_evacuation_budget; 115 log_info(gc)("Choose old regions for mixed collection: old evacuation budget: " SIZE_FORMAT "%s, candidates: %u", 116 byte_size_in_proper_unit(old_evacuation_budget), proper_unit_for_byte_size(old_evacuation_budget), 117 unprocessed_old_collection_candidates()); 118 119 size_t lost_evacuation_capacity = 0; 120 121 // The number of old-gen regions that were selected as candidates for collection at the end of the most recent old-gen 122 // concurrent marking phase and have not yet been collected is represented by unprocessed_old_collection_candidates(). 123 // Candidate regions are ordered according to increasing amount of live data. If there is not sufficient room to 124 // evacuate region N, then there is no need to even consider evacuating region N+1. 125 while (unprocessed_old_collection_candidates() > 0) { 126 // Old collection candidates are sorted in order of decreasing garbage contained therein. 127 ShenandoahHeapRegion* r = next_old_collection_candidate(); 128 if (r == nullptr) { 129 break; 130 } 131 assert(r->is_regular(), "There should be no humongous regions in the set of mixed-evac candidates"); 132 133 // If region r is evacuated to fragmented memory (to free memory within a partially used region), then we need 134 // to decrease the capacity of the fragmented memory by the scaled loss. 135 136 size_t live_data_for_evacuation = r->get_live_data_bytes(); 137 size_t lost_available = r->free(); 138 139 if ((lost_available > 0) && (excess_fragmented_available > 0)) { 140 if (lost_available < excess_fragmented_available) { 141 excess_fragmented_available -= lost_available; 142 lost_evacuation_capacity -= lost_available; 143 lost_available = 0; 144 } else { 145 lost_available -= excess_fragmented_available; 146 lost_evacuation_capacity -= excess_fragmented_available; 147 excess_fragmented_available = 0; 148 } 149 } 150 size_t scaled_loss = (size_t) ((double) lost_available / ShenandoahOldEvacWaste); 151 if ((lost_available > 0) && (fragmented_available > 0)) { 152 if (scaled_loss + live_data_for_evacuation < fragmented_available) { 153 fragmented_available -= scaled_loss; 154 scaled_loss = 0; 155 } else { 156 // We will have to allocate this region's evacuation memory from unfragmented memory, so don't bother 157 // to decrement scaled_loss 158 } 159 } 160 if (scaled_loss > 0) { 161 // We were not able to account for the lost free memory within fragmented memory, so we need to take this 162 // allocation out of unfragmented memory. Unfragmented memory does not need to account for loss of free. 163 if (live_data_for_evacuation > unfragmented_available) { 164 // There is not room to evacuate this region or any that come after it in within the candidates array. 165 break; 166 } else { 167 unfragmented_available -= live_data_for_evacuation; 168 } 169 } else { 170 // Since scaled_loss == 0, we have accounted for the loss of free memory, so we can allocate from either 171 // fragmented or unfragmented available memory. Use up the fragmented memory budget first. 172 size_t evacuation_need = live_data_for_evacuation; 173 174 if (evacuation_need > fragmented_available) { 175 evacuation_need -= fragmented_available; 176 fragmented_available = 0; 177 } else { 178 fragmented_available -= evacuation_need; 179 evacuation_need = 0; 180 } 181 if (evacuation_need > unfragmented_available) { 182 // There is not room to evacuate this region or any that come after it in within the candidates array. 183 break; 184 } else { 185 unfragmented_available -= evacuation_need; 186 // dead code: evacuation_need == 0; 187 } 188 } 189 collection_set->add_region(r); 190 included_old_regions++; 191 evacuated_old_bytes += live_data_for_evacuation; 192 collected_old_bytes += r->garbage(); 193 consume_old_collection_candidate(); 194 } 195 196 if (_first_pinned_candidate != NOT_FOUND) { 197 // Need to deal with pinned regions 198 slide_pinned_regions_to_front(); 199 } 200 decrease_unprocessed_old_collection_candidates_live_memory(evacuated_old_bytes); 201 if (included_old_regions > 0) { 202 log_info(gc)("Old-gen piggyback evac (" UINT32_FORMAT " regions, evacuating " SIZE_FORMAT "%s, reclaiming: " SIZE_FORMAT "%s)", 203 included_old_regions, 204 byte_size_in_proper_unit(evacuated_old_bytes), proper_unit_for_byte_size(evacuated_old_bytes), 205 byte_size_in_proper_unit(collected_old_bytes), proper_unit_for_byte_size(collected_old_bytes)); 206 } 207 208 if (unprocessed_old_collection_candidates() == 0) { 209 // We have added the last of our collection candidates to a mixed collection. 210 // Any triggers that occurred during mixed evacuations may no longer be valid. They can retrigger if appropriate. 211 clear_triggers(); 212 213 _old_generation->complete_mixed_evacuations(); 214 } else if (included_old_regions == 0) { 215 // We have candidates, but none were included for evacuation - are they all pinned? 216 // or did we just not have enough room for any of them in this collection set? 217 // We don't want a region with a stuck pin to prevent subsequent old collections, so 218 // if they are all pinned we transition to a state that will allow us to make these uncollected 219 // (pinned) regions parsable. 220 if (all_candidates_are_pinned()) { 221 log_info(gc)("All candidate regions " UINT32_FORMAT " are pinned", unprocessed_old_collection_candidates()); 222 _old_generation->abandon_mixed_evacuations(); 223 } else { 224 log_info(gc)("No regions selected for mixed collection. " 225 "Old evacuation budget: " PROPERFMT ", Remaining evacuation budget: " PROPERFMT 226 ", Lost capacity: " PROPERFMT 227 ", Next candidate: " UINT32_FORMAT ", Last candidate: " UINT32_FORMAT, 228 PROPERFMTARGS(old_evacuation_reserve), 229 PROPERFMTARGS(remaining_old_evacuation_budget), 230 PROPERFMTARGS(lost_evacuation_capacity), 231 _next_old_collection_candidate, _last_old_collection_candidate); 232 } 233 } 234 235 return (included_old_regions > 0); 236 } 237 238 bool ShenandoahOldHeuristics::all_candidates_are_pinned() { 239 #ifdef ASSERT 240 if (uint(os::random()) % 100 < ShenandoahCoalesceChance) { 241 return true; 242 } 243 #endif 244 245 for (uint i = _next_old_collection_candidate; i < _last_old_collection_candidate; ++i) { 246 ShenandoahHeapRegion* region = _region_data[i]._region; 247 if (!region->is_pinned()) { 248 return false; 249 } 250 } 251 return true; 252 } 253 254 void ShenandoahOldHeuristics::slide_pinned_regions_to_front() { 255 // Find the first unpinned region to the left of the next region that 256 // will be added to the collection set. These regions will have been 257 // added to the cset, so we can use them to hold pointers to regions 258 // that were pinned when the cset was chosen. 259 // [ r p r p p p r r ] 260 // ^ ^ ^ 261 // | | | pointer to next region to add to a mixed collection is here. 262 // | | first r to the left should be in the collection set now. 263 // | first pinned region, we don't need to look past this 264 uint write_index = NOT_FOUND; 265 for (uint search = _next_old_collection_candidate - 1; search > _first_pinned_candidate; --search) { 266 ShenandoahHeapRegion* region = _region_data[search]._region; 267 if (!region->is_pinned()) { 268 write_index = search; 269 assert(region->is_cset(), "Expected unpinned region to be added to the collection set."); 270 break; 271 } 272 } 273 274 // If we could not find an unpinned region, it means there are no slots available 275 // to move up the pinned regions. In this case, we just reset our next index in the 276 // hopes that some of these regions will become unpinned before the next mixed 277 // collection. We may want to bailout of here instead, as it should be quite 278 // rare to have so many pinned regions and may indicate something is wrong. 279 if (write_index == NOT_FOUND) { 280 assert(_first_pinned_candidate != NOT_FOUND, "Should only be here if there are pinned regions."); 281 _next_old_collection_candidate = _first_pinned_candidate; 282 return; 283 } 284 285 // Find pinned regions to the left and move their pointer into a slot 286 // that was pointing at a region that has been added to the cset (or was pointing 287 // to a pinned region that we've already moved up). We are done when the leftmost 288 // pinned region has been slid up. 289 // [ r p r x p p p r ] 290 // ^ ^ 291 // | | next region for mixed collections 292 // | Write pointer is here. We know this region is already in the cset 293 // | so we can clobber it with the next pinned region we find. 294 for (int32_t search = (int32_t)write_index - 1; search >= (int32_t)_first_pinned_candidate; --search) { 295 RegionData& skipped = _region_data[search]; 296 if (skipped._region->is_pinned()) { 297 RegionData& available_slot = _region_data[write_index]; 298 available_slot._region = skipped._region; 299 available_slot._u._live_data = skipped._u._live_data; 300 --write_index; 301 } 302 } 303 304 // Update to read from the leftmost pinned region. Plus one here because we decremented 305 // the write index to hold the next found pinned region. We are just moving it back now 306 // to point to the first pinned region. 307 _next_old_collection_candidate = write_index + 1; 308 } 309 310 void ShenandoahOldHeuristics::prepare_for_old_collections() { 311 ShenandoahHeap* heap = ShenandoahHeap::heap(); 312 313 const size_t num_regions = heap->num_regions(); 314 size_t cand_idx = 0; 315 size_t immediate_garbage = 0; 316 size_t immediate_regions = 0; 317 size_t live_data = 0; 318 319 RegionData* candidates = _region_data; 320 for (size_t i = 0; i < num_regions; i++) { 321 ShenandoahHeapRegion* region = heap->get_region(i); 322 if (!region->is_old()) { 323 continue; 324 } 325 326 size_t garbage = region->garbage(); 327 size_t live_bytes = region->get_live_data_bytes(); 328 live_data += live_bytes; 329 330 if (region->is_regular() || region->is_regular_pinned()) { 331 // Only place regular or pinned regions with live data into the candidate set. 332 // Pinned regions cannot be evacuated, but we are not actually choosing candidates 333 // for the collection set here. That happens later during the next young GC cycle, 334 // by which time, the pinned region may no longer be pinned. 335 if (!region->has_live()) { 336 assert(!region->is_pinned(), "Pinned region should have live (pinned) objects."); 337 region->make_trash_immediate(); 338 immediate_regions++; 339 immediate_garbage += garbage; 340 } else { 341 region->begin_preemptible_coalesce_and_fill(); 342 candidates[cand_idx]._region = region; 343 candidates[cand_idx]._u._live_data = live_bytes; 344 cand_idx++; 345 } 346 } else if (region->is_humongous_start()) { 347 // This will handle humongous start regions whether they are also pinned, or not. 348 // If they are pinned, we expect them to hold live data, so they will not be 349 // turned into immediate garbage. 350 if (!region->has_live()) { 351 assert(!region->is_pinned(), "Pinned region should have live (pinned) objects."); 352 // The humongous object is dead, we can just return this region and the continuations 353 // immediately to the freeset - no evacuations are necessary here. The continuations 354 // will be made into trash by this method, so they'll be skipped by the 'is_regular' 355 // check above, but we still need to count the start region. 356 immediate_regions++; 357 immediate_garbage += garbage; 358 size_t region_count = heap->trash_humongous_region_at(region); 359 log_debug(gc)("Trashed " SIZE_FORMAT " regions for humongous object.", region_count); 360 } 361 } else if (region->is_trash()) { 362 // Count humongous objects made into trash here. 363 immediate_regions++; 364 immediate_garbage += garbage; 365 } 366 } 367 368 _old_generation->set_live_bytes_after_last_mark(live_data); 369 370 // TODO: Consider not running mixed collects if we recovered some threshold percentage of memory from immediate garbage. 371 // This would be similar to young and global collections shortcutting evacuation, though we'd probably want a separate 372 // threshold for the old generation. 373 374 // Unlike young, we are more interested in efficiently packing OLD-gen than in reclaiming garbage first. We sort by live-data. 375 // Some regular regions may have been promoted in place with no garbage but also with very little live data. When we "compact" 376 // old-gen, we want to pack these underutilized regions together so we can have more unaffiliated (unfragmented) free regions 377 // in old-gen. 378 379 QuickSort::sort<RegionData>(candidates, cand_idx, compare_by_live); 380 381 // Any old-gen region that contains (ShenandoahOldGarbageThreshold (default value 25)% garbage or more is to be 382 // added to the list of candidates for subsequent mixed evacuations. 383 // 384 // TODO: allow ShenandoahOldGarbageThreshold to be determined adaptively, by heuristics. 385 386 const size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes(); 387 388 // The convention is to collect regions that have more than this amount of garbage. 389 const size_t garbage_threshold = region_size_bytes * ShenandoahOldGarbageThreshold / 100; 390 391 // Enlightened interpretation: collect regions that have less than this amount of live. 392 const size_t live_threshold = region_size_bytes - garbage_threshold; 393 394 _last_old_region = (uint)cand_idx; 395 _last_old_collection_candidate = (uint)cand_idx; 396 _next_old_collection_candidate = 0; 397 398 size_t unfragmented = 0; 399 size_t candidates_garbage = 0; 400 401 for (size_t i = 0; i < cand_idx; i++) { 402 size_t live = candidates[i]._u._live_data; 403 if (live > live_threshold) { 404 // Candidates are sorted in increasing order of live data, so no regions after this will be below the threshold. 405 _last_old_collection_candidate = (uint)i; 406 break; 407 } 408 size_t region_garbage = candidates[i]._region->garbage(); 409 size_t region_free = candidates[i]._region->free(); 410 candidates_garbage += region_garbage; 411 unfragmented += region_free; 412 } 413 414 // defrag_count represents regions that are placed into the old collection set in order to defragment the memory 415 // that we try to "reserve" for humongous allocations. 416 size_t defrag_count = 0; 417 size_t total_uncollected_old_regions = _last_old_region - _last_old_collection_candidate; 418 419 if (cand_idx > _last_old_collection_candidate) { 420 // Above, we have added into the set of mixed-evacuation candidates all old-gen regions for which the live memory 421 // that they contain is below a particular old-garbage threshold. Regions that were not selected for the collection 422 // set hold enough live memory that it is not considered efficient (by "garbage-first standards") to compact these 423 // at the current time. 424 // 425 // However, if any of these regions that were rejected from the collection set reside within areas of memory that 426 // might interfere with future humongous allocation requests, we will prioritize them for evacuation at this time. 427 // Humongous allocations target the bottom of the heap. We want old-gen regions to congregate at the top of the 428 // heap. 429 // 430 // Sort the regions that were initially rejected from the collection set in order of index. This allows us to 431 // focus our attention on the regions that have low index value (i.e. the old-gen regions at the bottom of the heap). 432 QuickSort::sort<RegionData>(candidates + _last_old_collection_candidate, cand_idx - _last_old_collection_candidate, 433 compare_by_index); 434 435 const size_t first_unselected_old_region = candidates[_last_old_collection_candidate]._region->index(); 436 const size_t last_unselected_old_region = candidates[cand_idx - 1]._region->index(); 437 size_t span_of_uncollected_regions = 1 + last_unselected_old_region - first_unselected_old_region; 438 439 // Add no more than 1/8 of the existing old-gen regions to the set of mixed evacuation candidates. 440 const int MAX_FRACTION_OF_HUMONGOUS_DEFRAG_REGIONS = 8; 441 const size_t bound_on_additional_regions = cand_idx / MAX_FRACTION_OF_HUMONGOUS_DEFRAG_REGIONS; 442 443 // The heuristic old_is_fragmented trigger may be seeking to achieve up to 7/8 density. Allow ourselves to overshoot 444 // that target (at 15/16) so we will not have to do another defragmenting old collection right away. 445 while ((defrag_count < bound_on_additional_regions) && 446 (total_uncollected_old_regions < 15 * span_of_uncollected_regions / 16)) { 447 ShenandoahHeapRegion* r = candidates[_last_old_collection_candidate]._region; 448 assert(r->is_regular() || r->is_regular_pinned(), "Region " SIZE_FORMAT " has wrong state for collection: %s", 449 r->index(), ShenandoahHeapRegion::region_state_to_string(r->state())); 450 const size_t region_garbage = candidates[_last_old_collection_candidate]._region->garbage(); 451 const size_t region_free = r->free(); 452 candidates_garbage += region_garbage; 453 unfragmented += region_free; 454 defrag_count++; 455 _last_old_collection_candidate++; 456 457 // We now have one fewer uncollected regions, and our uncollected span shrinks because we have removed its first region. 458 total_uncollected_old_regions--; 459 span_of_uncollected_regions = 1 + last_unselected_old_region - candidates[_last_old_collection_candidate]._region->index(); 460 } 461 } 462 463 // Note that we do not coalesce and fill occupied humongous regions 464 // HR: humongous regions, RR: regular regions, CF: coalesce and fill regions 465 const size_t collectable_garbage = immediate_garbage + candidates_garbage; 466 const size_t old_candidates = _last_old_collection_candidate; 467 const size_t mixed_evac_live = old_candidates * region_size_bytes - (candidates_garbage + unfragmented); 468 set_unprocessed_old_collection_candidates_live_memory(mixed_evac_live); 469 470 log_info(gc)("Old-Gen Collectable Garbage: " PROPERFMT " consolidated with free: " PROPERFMT ", over " SIZE_FORMAT " regions", 471 PROPERFMTARGS(collectable_garbage), PROPERFMTARGS(unfragmented), old_candidates); 472 log_info(gc)("Old-Gen Immediate Garbage: " PROPERFMT " over " SIZE_FORMAT " regions", 473 PROPERFMTARGS(immediate_garbage), immediate_regions); 474 log_info(gc)("Old regions selected for defragmentation: " SIZE_FORMAT, defrag_count); 475 log_info(gc)("Old regions not selected: " SIZE_FORMAT, total_uncollected_old_regions); 476 477 if (unprocessed_old_collection_candidates() > 0) { 478 _old_generation->transition_to(ShenandoahOldGeneration::EVACUATING); 479 } else if (has_coalesce_and_fill_candidates()) { 480 _old_generation->transition_to(ShenandoahOldGeneration::FILLING); 481 } else { 482 _old_generation->transition_to(ShenandoahOldGeneration::WAITING_FOR_BOOTSTRAP); 483 } 484 } 485 486 size_t ShenandoahOldHeuristics::unprocessed_old_collection_candidates_live_memory() const { 487 return _live_bytes_in_unprocessed_candidates; 488 } 489 490 void ShenandoahOldHeuristics::set_unprocessed_old_collection_candidates_live_memory(size_t initial_live) { 491 _live_bytes_in_unprocessed_candidates = initial_live; 492 } 493 494 void ShenandoahOldHeuristics::decrease_unprocessed_old_collection_candidates_live_memory(size_t evacuated_live) { 495 assert(evacuated_live <= _live_bytes_in_unprocessed_candidates, "Cannot evacuate more than was present"); 496 _live_bytes_in_unprocessed_candidates -= evacuated_live; 497 } 498 499 // Used by unit test: test_shenandoahOldHeuristic.cpp 500 uint ShenandoahOldHeuristics::last_old_collection_candidate_index() const { 501 return _last_old_collection_candidate; 502 } 503 504 uint ShenandoahOldHeuristics::unprocessed_old_collection_candidates() const { 505 return _last_old_collection_candidate - _next_old_collection_candidate; 506 } 507 508 ShenandoahHeapRegion* ShenandoahOldHeuristics::next_old_collection_candidate() { 509 while (_next_old_collection_candidate < _last_old_collection_candidate) { 510 ShenandoahHeapRegion* next = _region_data[_next_old_collection_candidate]._region; 511 if (!next->is_pinned()) { 512 return next; 513 } else { 514 assert(next->is_pinned(), "sanity"); 515 if (_first_pinned_candidate == NOT_FOUND) { 516 _first_pinned_candidate = _next_old_collection_candidate; 517 } 518 } 519 520 _next_old_collection_candidate++; 521 } 522 return nullptr; 523 } 524 525 void ShenandoahOldHeuristics::consume_old_collection_candidate() { 526 _next_old_collection_candidate++; 527 } 528 529 unsigned int ShenandoahOldHeuristics::get_coalesce_and_fill_candidates(ShenandoahHeapRegion** buffer) { 530 uint end = _last_old_region; 531 uint index = _next_old_collection_candidate; 532 while (index < end) { 533 *buffer++ = _region_data[index++]._region; 534 } 535 return (_last_old_region - _next_old_collection_candidate); 536 } 537 538 void ShenandoahOldHeuristics::abandon_collection_candidates() { 539 _last_old_collection_candidate = 0; 540 _next_old_collection_candidate = 0; 541 _last_old_region = 0; 542 } 543 544 void ShenandoahOldHeuristics::record_cycle_end() { 545 this->ShenandoahHeuristics::record_cycle_end(); 546 clear_triggers(); 547 } 548 549 void ShenandoahOldHeuristics::clear_triggers() { 550 // Clear any triggers that were set during mixed evacuations. Conditions may be different now that this phase has finished. 551 _cannot_expand_trigger = false; 552 _fragmentation_trigger = false; 553 _growth_trigger = false; 554 } 555 556 void ShenandoahOldHeuristics::trigger_collection_if_fragmented(size_t first_old_region, size_t last_old_region, size_t old_region_count, size_t num_regions) { 557 if (ShenandoahGenerationalHumongousReserve > 0) { 558 size_t old_region_span = (first_old_region <= last_old_region)? (last_old_region + 1 - first_old_region): 0; 559 size_t allowed_old_gen_span = num_regions - (ShenandoahGenerationalHumongousReserve * num_regions) / 100; 560 561 // Tolerate lower density if total span is small. Here's the implementation: 562 // if old_gen spans more than 100% and density < 75%, trigger old-defrag 563 // else if old_gen spans more than 87.5% and density < 62.5%, trigger old-defrag 564 // else if old_gen spans more than 75% and density < 50%, trigger old-defrag 565 // else if old_gen spans more than 62.5% and density < 37.5%, trigger old-defrag 566 // else if old_gen spans more than 50% and density < 25%, trigger old-defrag 567 // 568 // A previous implementation was more aggressive in triggering, resulting in degraded throughput when 569 // humongous allocation was not required. 570 571 size_t old_available = _old_gen->available(); 572 size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes(); 573 size_t old_unaffiliated_available = _old_gen->free_unaffiliated_regions() * region_size_bytes; 574 assert(old_available >= old_unaffiliated_available, "sanity"); 575 size_t old_fragmented_available = old_available - old_unaffiliated_available; 576 577 size_t old_bytes_consumed = old_region_count * region_size_bytes - old_fragmented_available; 578 size_t old_bytes_spanned = old_region_span * region_size_bytes; 579 double old_density = ((double) old_bytes_consumed) / old_bytes_spanned; 580 581 uint eighths = 8; 582 for (uint i = 0; i < 5; i++) { 583 size_t span_threshold = eighths * allowed_old_gen_span / 8; 584 double density_threshold = (eighths - 2) / 8.0; 585 if ((old_region_span >= span_threshold) && (old_density < density_threshold)) { 586 trigger_old_is_fragmented(old_density, first_old_region, last_old_region); 587 return; 588 } 589 eighths--; 590 } 591 } 592 } 593 594 void ShenandoahOldHeuristics::trigger_collection_if_overgrown() { 595 size_t old_used = _old_gen->used() + _old_gen->get_humongous_waste(); 596 size_t trigger_threshold = _old_gen->usage_trigger_threshold(); 597 // Detects unsigned arithmetic underflow 598 assert(old_used <= _heap->capacity(), 599 "Old used (" SIZE_FORMAT ", " SIZE_FORMAT") must not be more than heap capacity (" SIZE_FORMAT ")", 600 _old_gen->used(), _old_gen->get_humongous_waste(), _heap->capacity()); 601 if (old_used > trigger_threshold) { 602 trigger_old_has_grown(); 603 } 604 } 605 606 void ShenandoahOldHeuristics::trigger_maybe(size_t first_old_region, size_t last_old_region, 607 size_t old_region_count, size_t num_regions) { 608 trigger_collection_if_fragmented(first_old_region, last_old_region, old_region_count, num_regions); 609 trigger_collection_if_overgrown(); 610 } 611 612 bool ShenandoahOldHeuristics::should_start_gc() { 613 // Cannot start a new old-gen GC until previous one has finished. 614 // 615 // Future refinement: under certain circumstances, we might be more sophisticated about this choice. 616 // For example, we could choose to abandon the previous old collection before it has completed evacuations. 617 ShenandoahHeap* heap = ShenandoahHeap::heap(); 618 if (!_old_generation->can_start_gc() || heap->collection_set()->has_old_regions()) { 619 return false; 620 } 621 622 if (_cannot_expand_trigger) { 623 const size_t old_gen_capacity = _old_generation->max_capacity(); 624 const size_t heap_capacity = heap->capacity(); 625 const double percent = percent_of(old_gen_capacity, heap_capacity); 626 log_info(gc)("Trigger (OLD): Expansion failure, current size: " SIZE_FORMAT "%s which is %.1f%% of total heap size", 627 byte_size_in_proper_unit(old_gen_capacity), proper_unit_for_byte_size(old_gen_capacity), percent); 628 return true; 629 } 630 631 if (_fragmentation_trigger) { 632 const size_t used = _old_generation->used(); 633 const size_t used_regions_size = _old_generation->used_regions_size(); 634 635 // used_regions includes humongous regions 636 const size_t used_regions = _old_generation->used_regions(); 637 assert(used_regions_size > used_regions, "Cannot have more used than used regions"); 638 639 size_t first_old_region, last_old_region; 640 double density; 641 get_fragmentation_trigger_reason_for_log_message(density, first_old_region, last_old_region); 642 const size_t span_of_old_regions = (last_old_region >= first_old_region)? last_old_region + 1 - first_old_region: 0; 643 const size_t fragmented_free = used_regions_size - used; 644 645 log_info(gc)("Trigger (OLD): Old has become fragmented: " 646 SIZE_FORMAT "%s available bytes spread between range spanned from " 647 SIZE_FORMAT " to " SIZE_FORMAT " (" SIZE_FORMAT "), density: %.1f%%", 648 byte_size_in_proper_unit(fragmented_free), proper_unit_for_byte_size(fragmented_free), 649 first_old_region, last_old_region, span_of_old_regions, density * 100); 650 return true; 651 } 652 653 if (_growth_trigger) { 654 // Growth may be falsely triggered during mixed evacuations, before the mixed-evacuation candidates have been 655 // evacuated. Before acting on a false trigger, we check to confirm the trigger condition is still satisfied. 656 const size_t current_usage = _old_generation->used() + _old_generation->get_humongous_waste(); 657 const size_t trigger_threshold = _old_generation->usage_trigger_threshold(); 658 const size_t heap_size = heap->capacity(); 659 const size_t ignore_threshold = (ShenandoahIgnoreOldGrowthBelowPercentage * heap_size) / 100; 660 size_t consecutive_young_cycles; 661 if ((current_usage < ignore_threshold) && 662 ((consecutive_young_cycles = heap->shenandoah_policy()->consecutive_young_gc_count()) 663 < ShenandoahDoNotIgnoreGrowthAfterYoungCycles)) { 664 log_debug(gc)("Ignoring Trigger (OLD): Old has overgrown: usage (" SIZE_FORMAT "%s) is below threshold (" 665 SIZE_FORMAT "%s) after " SIZE_FORMAT " consecutive completed young GCs", 666 byte_size_in_proper_unit(current_usage), proper_unit_for_byte_size(current_usage), 667 byte_size_in_proper_unit(ignore_threshold), proper_unit_for_byte_size(ignore_threshold), 668 consecutive_young_cycles); 669 _growth_trigger = false; 670 } else if (current_usage > trigger_threshold) { 671 const size_t live_at_previous_old = _old_generation->get_live_bytes_after_last_mark(); 672 const double percent_growth = percent_of(current_usage - live_at_previous_old, live_at_previous_old); 673 log_info(gc)("Trigger (OLD): Old has overgrown, live at end of previous OLD marking: " 674 SIZE_FORMAT "%s, current usage: " SIZE_FORMAT "%s, percent growth: %.1f%%", 675 byte_size_in_proper_unit(live_at_previous_old), proper_unit_for_byte_size(live_at_previous_old), 676 byte_size_in_proper_unit(current_usage), proper_unit_for_byte_size(current_usage), percent_growth); 677 return true; 678 } else { 679 // Mixed evacuations have decreased current_usage such that old-growth trigger is no longer relevant. 680 _growth_trigger = false; 681 } 682 } 683 684 // Otherwise, defer to inherited heuristic for gc trigger. 685 return this->ShenandoahHeuristics::should_start_gc(); 686 } 687 688 void ShenandoahOldHeuristics::record_success_concurrent() { 689 // Forget any triggers that occurred while OLD GC was ongoing. If we really need to start another, it will retrigger. 690 clear_triggers(); 691 this->ShenandoahHeuristics::record_success_concurrent(); 692 } 693 694 void ShenandoahOldHeuristics::record_success_degenerated() { 695 // Forget any triggers that occurred while OLD GC was ongoing. If we really need to start another, it will retrigger. 696 clear_triggers(); 697 this->ShenandoahHeuristics::record_success_degenerated(); 698 } 699 700 void ShenandoahOldHeuristics::record_success_full() { 701 // Forget any triggers that occurred while OLD GC was ongoing. If we really need to start another, it will retrigger. 702 clear_triggers(); 703 this->ShenandoahHeuristics::record_success_full(); 704 } 705 706 const char* ShenandoahOldHeuristics::name() { 707 return "Old"; 708 } 709 710 bool ShenandoahOldHeuristics::is_diagnostic() { 711 return false; 712 } 713 714 bool ShenandoahOldHeuristics::is_experimental() { 715 return true; 716 } 717 718 void ShenandoahOldHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* set, 719 ShenandoahHeuristics::RegionData* data, 720 size_t data_size, size_t free) { 721 ShouldNotReachHere(); 722 }