1 /* 2 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 27 #include "gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp" 28 #include "gc/shenandoah/shenandoahCollectionSet.hpp" 29 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp" 30 #include "gc/shenandoah/shenandoahGenerationalHeap.hpp" 31 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp" 32 #include "gc/shenandoah/shenandoahOldGeneration.hpp" 33 #include "logging/log.hpp" 34 #include "utilities/quickSort.hpp" 35 36 uint ShenandoahOldHeuristics::NOT_FOUND = -1U; 37 38 // sort by increasing live (so least live comes first) 39 int ShenandoahOldHeuristics::compare_by_live(RegionData a, RegionData b) { 40 if (a.get_livedata() < b.get_livedata()) { 41 return -1; 42 } else if (a.get_livedata() > b.get_livedata()) { 43 return 1; 44 } else { 45 return 0; 46 } 47 } 48 49 // sort by increasing index 50 int ShenandoahOldHeuristics::compare_by_index(RegionData a, RegionData b) { 51 if (a.get_region()->index() < b.get_region()->index()) { 52 return -1; 53 } else if (a.get_region()->index() > b.get_region()->index()) { 54 return 1; 55 } else { 56 // quicksort may compare to self during search for pivot 57 return 0; 58 } 59 } 60 61 ShenandoahOldHeuristics::ShenandoahOldHeuristics(ShenandoahOldGeneration* generation, ShenandoahGenerationalHeap* gen_heap) : 62 ShenandoahHeuristics(generation), 63 _heap(gen_heap), 64 _old_gen(generation), 65 _first_pinned_candidate(NOT_FOUND), 66 _last_old_collection_candidate(0), 67 _next_old_collection_candidate(0), 68 _last_old_region(0), 69 _live_bytes_in_unprocessed_candidates(0), 70 _old_generation(generation), 71 _cannot_expand_trigger(false), 72 _fragmentation_trigger(false), 73 _growth_trigger(false), 74 _fragmentation_density(0.0), 75 _fragmentation_first_old_region(0), 76 _fragmentation_last_old_region(0) 77 { 78 } 79 80 bool ShenandoahOldHeuristics::prime_collection_set(ShenandoahCollectionSet* collection_set) { 81 if (unprocessed_old_collection_candidates() == 0) { 82 return false; 83 } 84 85 _first_pinned_candidate = NOT_FOUND; 86 87 uint included_old_regions = 0; 88 size_t evacuated_old_bytes = 0; 89 size_t collected_old_bytes = 0; 90 91 // If a region is put into the collection set, then this region's free (not yet used) bytes are no longer 92 // "available" to hold the results of other evacuations. This may cause a decrease in the remaining amount 93 // of memory that can still be evacuated. We address this by reducing the evacuation budget by the amount 94 // of live memory in that region and by the amount of unallocated memory in that region if the evacuation 95 // budget is constrained by availability of free memory. 96 const size_t old_evacuation_reserve = _old_generation->get_evacuation_reserve(); 97 const size_t old_evacuation_budget = (size_t) ((double) old_evacuation_reserve / ShenandoahOldEvacWaste); 98 size_t unfragmented_available = _old_generation->free_unaffiliated_regions() * ShenandoahHeapRegion::region_size_bytes(); 99 size_t fragmented_available; 100 size_t excess_fragmented_available; 101 102 if (unfragmented_available > old_evacuation_budget) { 103 unfragmented_available = old_evacuation_budget; 104 fragmented_available = 0; 105 excess_fragmented_available = 0; 106 } else { 107 assert(_old_generation->available() >= old_evacuation_budget, "Cannot budget more than is available"); 108 fragmented_available = _old_generation->available() - unfragmented_available; 109 assert(fragmented_available + unfragmented_available >= old_evacuation_budget, "Budgets do not add up"); 110 if (fragmented_available + unfragmented_available > old_evacuation_budget) { 111 excess_fragmented_available = (fragmented_available + unfragmented_available) - old_evacuation_budget; 112 fragmented_available -= excess_fragmented_available; 113 } 114 } 115 116 size_t remaining_old_evacuation_budget = old_evacuation_budget; 117 log_info(gc)("Choose old regions for mixed collection: old evacuation budget: " SIZE_FORMAT "%s, candidates: %u", 118 byte_size_in_proper_unit(old_evacuation_budget), proper_unit_for_byte_size(old_evacuation_budget), 119 unprocessed_old_collection_candidates()); 120 121 size_t lost_evacuation_capacity = 0; 122 123 // The number of old-gen regions that were selected as candidates for collection at the end of the most recent old-gen 124 // concurrent marking phase and have not yet been collected is represented by unprocessed_old_collection_candidates(). 125 // Candidate regions are ordered according to increasing amount of live data. If there is not sufficient room to 126 // evacuate region N, then there is no need to even consider evacuating region N+1. 127 while (unprocessed_old_collection_candidates() > 0) { 128 // Old collection candidates are sorted in order of decreasing garbage contained therein. 129 ShenandoahHeapRegion* r = next_old_collection_candidate(); 130 if (r == nullptr) { 131 break; 132 } 133 assert(r->is_regular(), "There should be no humongous regions in the set of mixed-evac candidates"); 134 135 // If region r is evacuated to fragmented memory (to free memory within a partially used region), then we need 136 // to decrease the capacity of the fragmented memory by the scaled loss. 137 138 size_t live_data_for_evacuation = r->get_live_data_bytes(); 139 size_t lost_available = r->free(); 140 141 if ((lost_available > 0) && (excess_fragmented_available > 0)) { 142 if (lost_available < excess_fragmented_available) { 143 excess_fragmented_available -= lost_available; 144 lost_evacuation_capacity -= lost_available; 145 lost_available = 0; 146 } else { 147 lost_available -= excess_fragmented_available; 148 lost_evacuation_capacity -= excess_fragmented_available; 149 excess_fragmented_available = 0; 150 } 151 } 152 size_t scaled_loss = (size_t) ((double) lost_available / ShenandoahOldEvacWaste); 153 if ((lost_available > 0) && (fragmented_available > 0)) { 154 if (scaled_loss + live_data_for_evacuation < fragmented_available) { 155 fragmented_available -= scaled_loss; 156 scaled_loss = 0; 157 } else { 158 // We will have to allocate this region's evacuation memory from unfragmented memory, so don't bother 159 // to decrement scaled_loss 160 } 161 } 162 if (scaled_loss > 0) { 163 // We were not able to account for the lost free memory within fragmented memory, so we need to take this 164 // allocation out of unfragmented memory. Unfragmented memory does not need to account for loss of free. 165 if (live_data_for_evacuation > unfragmented_available) { 166 // There is not room to evacuate this region or any that come after it in within the candidates array. 167 break; 168 } else { 169 unfragmented_available -= live_data_for_evacuation; 170 } 171 } else { 172 // Since scaled_loss == 0, we have accounted for the loss of free memory, so we can allocate from either 173 // fragmented or unfragmented available memory. Use up the fragmented memory budget first. 174 size_t evacuation_need = live_data_for_evacuation; 175 176 if (evacuation_need > fragmented_available) { 177 evacuation_need -= fragmented_available; 178 fragmented_available = 0; 179 } else { 180 fragmented_available -= evacuation_need; 181 evacuation_need = 0; 182 } 183 if (evacuation_need > unfragmented_available) { 184 // There is not room to evacuate this region or any that come after it in within the candidates array. 185 break; 186 } else { 187 unfragmented_available -= evacuation_need; 188 // dead code: evacuation_need == 0; 189 } 190 } 191 collection_set->add_region(r); 192 included_old_regions++; 193 evacuated_old_bytes += live_data_for_evacuation; 194 collected_old_bytes += r->garbage(); 195 consume_old_collection_candidate(); 196 } 197 198 if (_first_pinned_candidate != NOT_FOUND) { 199 // Need to deal with pinned regions 200 slide_pinned_regions_to_front(); 201 } 202 decrease_unprocessed_old_collection_candidates_live_memory(evacuated_old_bytes); 203 if (included_old_regions > 0) { 204 log_info(gc)("Old-gen piggyback evac (" UINT32_FORMAT " regions, evacuating " SIZE_FORMAT "%s, reclaiming: " SIZE_FORMAT "%s)", 205 included_old_regions, 206 byte_size_in_proper_unit(evacuated_old_bytes), proper_unit_for_byte_size(evacuated_old_bytes), 207 byte_size_in_proper_unit(collected_old_bytes), proper_unit_for_byte_size(collected_old_bytes)); 208 } 209 210 if (unprocessed_old_collection_candidates() == 0) { 211 // We have added the last of our collection candidates to a mixed collection. 212 // Any triggers that occurred during mixed evacuations may no longer be valid. They can retrigger if appropriate. 213 clear_triggers(); 214 215 _old_generation->complete_mixed_evacuations(); 216 } else if (included_old_regions == 0) { 217 // We have candidates, but none were included for evacuation - are they all pinned? 218 // or did we just not have enough room for any of them in this collection set? 219 // We don't want a region with a stuck pin to prevent subsequent old collections, so 220 // if they are all pinned we transition to a state that will allow us to make these uncollected 221 // (pinned) regions parsable. 222 if (all_candidates_are_pinned()) { 223 log_info(gc)("All candidate regions " UINT32_FORMAT " are pinned", unprocessed_old_collection_candidates()); 224 _old_generation->abandon_mixed_evacuations(); 225 } else { 226 log_info(gc)("No regions selected for mixed collection. " 227 "Old evacuation budget: " PROPERFMT ", Remaining evacuation budget: " PROPERFMT 228 ", Lost capacity: " PROPERFMT 229 ", Next candidate: " UINT32_FORMAT ", Last candidate: " UINT32_FORMAT, 230 PROPERFMTARGS(old_evacuation_reserve), 231 PROPERFMTARGS(remaining_old_evacuation_budget), 232 PROPERFMTARGS(lost_evacuation_capacity), 233 _next_old_collection_candidate, _last_old_collection_candidate); 234 } 235 } 236 237 return (included_old_regions > 0); 238 } 239 240 bool ShenandoahOldHeuristics::all_candidates_are_pinned() { 241 #ifdef ASSERT 242 if (uint(os::random()) % 100 < ShenandoahCoalesceChance) { 243 return true; 244 } 245 #endif 246 247 for (uint i = _next_old_collection_candidate; i < _last_old_collection_candidate; ++i) { 248 ShenandoahHeapRegion* region = _region_data[i].get_region(); 249 if (!region->is_pinned()) { 250 return false; 251 } 252 } 253 return true; 254 } 255 256 void ShenandoahOldHeuristics::slide_pinned_regions_to_front() { 257 // Find the first unpinned region to the left of the next region that 258 // will be added to the collection set. These regions will have been 259 // added to the cset, so we can use them to hold pointers to regions 260 // that were pinned when the cset was chosen. 261 // [ r p r p p p r r ] 262 // ^ ^ ^ 263 // | | | pointer to next region to add to a mixed collection is here. 264 // | | first r to the left should be in the collection set now. 265 // | first pinned region, we don't need to look past this 266 uint write_index = NOT_FOUND; 267 for (uint search = _next_old_collection_candidate - 1; search > _first_pinned_candidate; --search) { 268 ShenandoahHeapRegion* region = _region_data[search].get_region(); 269 if (!region->is_pinned()) { 270 write_index = search; 271 assert(region->is_cset(), "Expected unpinned region to be added to the collection set."); 272 break; 273 } 274 } 275 276 // If we could not find an unpinned region, it means there are no slots available 277 // to move up the pinned regions. In this case, we just reset our next index in the 278 // hopes that some of these regions will become unpinned before the next mixed 279 // collection. We may want to bailout of here instead, as it should be quite 280 // rare to have so many pinned regions and may indicate something is wrong. 281 if (write_index == NOT_FOUND) { 282 assert(_first_pinned_candidate != NOT_FOUND, "Should only be here if there are pinned regions."); 283 _next_old_collection_candidate = _first_pinned_candidate; 284 return; 285 } 286 287 // Find pinned regions to the left and move their pointer into a slot 288 // that was pointing at a region that has been added to the cset (or was pointing 289 // to a pinned region that we've already moved up). We are done when the leftmost 290 // pinned region has been slid up. 291 // [ r p r x p p p r ] 292 // ^ ^ 293 // | | next region for mixed collections 294 // | Write pointer is here. We know this region is already in the cset 295 // | so we can clobber it with the next pinned region we find. 296 for (int32_t search = (int32_t)write_index - 1; search >= (int32_t)_first_pinned_candidate; --search) { 297 RegionData& skipped = _region_data[search]; 298 if (skipped.get_region()->is_pinned()) { 299 RegionData& available_slot = _region_data[write_index]; 300 available_slot.set_region_and_livedata(skipped.get_region(), skipped.get_livedata()); 301 --write_index; 302 } 303 } 304 305 // Update to read from the leftmost pinned region. Plus one here because we decremented 306 // the write index to hold the next found pinned region. We are just moving it back now 307 // to point to the first pinned region. 308 _next_old_collection_candidate = write_index + 1; 309 } 310 311 void ShenandoahOldHeuristics::prepare_for_old_collections() { 312 ShenandoahHeap* heap = ShenandoahHeap::heap(); 313 314 const size_t num_regions = heap->num_regions(); 315 size_t cand_idx = 0; 316 size_t immediate_garbage = 0; 317 size_t immediate_regions = 0; 318 size_t live_data = 0; 319 320 RegionData* candidates = _region_data; 321 for (size_t i = 0; i < num_regions; i++) { 322 ShenandoahHeapRegion* region = heap->get_region(i); 323 if (!region->is_old()) { 324 continue; 325 } 326 327 size_t garbage = region->garbage(); 328 size_t live_bytes = region->get_live_data_bytes(); 329 live_data += live_bytes; 330 331 if (region->is_regular() || region->is_regular_pinned()) { 332 // Only place regular or pinned regions with live data into the candidate set. 333 // Pinned regions cannot be evacuated, but we are not actually choosing candidates 334 // for the collection set here. That happens later during the next young GC cycle, 335 // by which time, the pinned region may no longer be pinned. 336 if (!region->has_live()) { 337 assert(!region->is_pinned(), "Pinned region should have live (pinned) objects."); 338 region->make_trash_immediate(); 339 immediate_regions++; 340 immediate_garbage += garbage; 341 } else { 342 region->begin_preemptible_coalesce_and_fill(); 343 candidates[cand_idx].set_region_and_livedata(region, live_bytes); 344 cand_idx++; 345 } 346 } else if (region->is_humongous_start()) { 347 // This will handle humongous start regions whether they are also pinned, or not. 348 // If they are pinned, we expect them to hold live data, so they will not be 349 // turned into immediate garbage. 350 if (!region->has_live()) { 351 assert(!region->is_pinned(), "Pinned region should have live (pinned) objects."); 352 // The humongous object is dead, we can just return this region and the continuations 353 // immediately to the freeset - no evacuations are necessary here. The continuations 354 // will be made into trash by this method, so they'll be skipped by the 'is_regular' 355 // check above, but we still need to count the start region. 356 immediate_regions++; 357 immediate_garbage += garbage; 358 size_t region_count = heap->trash_humongous_region_at(region); 359 log_debug(gc)("Trashed " SIZE_FORMAT " regions for humongous object.", region_count); 360 } 361 } else if (region->is_trash()) { 362 // Count humongous objects made into trash here. 363 immediate_regions++; 364 immediate_garbage += garbage; 365 } 366 } 367 368 _old_generation->set_live_bytes_after_last_mark(live_data); 369 370 // Unlike young, we are more interested in efficiently packing OLD-gen than in reclaiming garbage first. We sort by live-data. 371 // Some regular regions may have been promoted in place with no garbage but also with very little live data. When we "compact" 372 // old-gen, we want to pack these underutilized regions together so we can have more unaffiliated (unfragmented) free regions 373 // in old-gen. 374 375 QuickSort::sort<RegionData>(candidates, cand_idx, compare_by_live); 376 377 const size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes(); 378 379 // The convention is to collect regions that have more than this amount of garbage. 380 const size_t garbage_threshold = region_size_bytes * ShenandoahOldGarbageThreshold / 100; 381 382 // Enlightened interpretation: collect regions that have less than this amount of live. 383 const size_t live_threshold = region_size_bytes - garbage_threshold; 384 385 _last_old_region = (uint)cand_idx; 386 _last_old_collection_candidate = (uint)cand_idx; 387 _next_old_collection_candidate = 0; 388 389 size_t unfragmented = 0; 390 size_t candidates_garbage = 0; 391 392 for (size_t i = 0; i < cand_idx; i++) { 393 size_t live = candidates[i].get_livedata(); 394 if (live > live_threshold) { 395 // Candidates are sorted in increasing order of live data, so no regions after this will be below the threshold. 396 _last_old_collection_candidate = (uint)i; 397 break; 398 } 399 ShenandoahHeapRegion* r = candidates[i].get_region(); 400 size_t region_garbage = r->garbage(); 401 size_t region_free = r->free(); 402 candidates_garbage += region_garbage; 403 unfragmented += region_free; 404 } 405 406 // defrag_count represents regions that are placed into the old collection set in order to defragment the memory 407 // that we try to "reserve" for humongous allocations. 408 size_t defrag_count = 0; 409 size_t total_uncollected_old_regions = _last_old_region - _last_old_collection_candidate; 410 411 if (cand_idx > _last_old_collection_candidate) { 412 // Above, we have added into the set of mixed-evacuation candidates all old-gen regions for which the live memory 413 // that they contain is below a particular old-garbage threshold. Regions that were not selected for the collection 414 // set hold enough live memory that it is not considered efficient (by "garbage-first standards") to compact these 415 // at the current time. 416 // 417 // However, if any of these regions that were rejected from the collection set reside within areas of memory that 418 // might interfere with future humongous allocation requests, we will prioritize them for evacuation at this time. 419 // Humongous allocations target the bottom of the heap. We want old-gen regions to congregate at the top of the 420 // heap. 421 // 422 // Sort the regions that were initially rejected from the collection set in order of index. This allows us to 423 // focus our attention on the regions that have low index value (i.e. the old-gen regions at the bottom of the heap). 424 QuickSort::sort<RegionData>(candidates + _last_old_collection_candidate, cand_idx - _last_old_collection_candidate, 425 compare_by_index); 426 427 const size_t first_unselected_old_region = candidates[_last_old_collection_candidate].get_region()->index(); 428 const size_t last_unselected_old_region = candidates[cand_idx - 1].get_region()->index(); 429 size_t span_of_uncollected_regions = 1 + last_unselected_old_region - first_unselected_old_region; 430 431 // Add no more than 1/8 of the existing old-gen regions to the set of mixed evacuation candidates. 432 const int MAX_FRACTION_OF_HUMONGOUS_DEFRAG_REGIONS = 8; 433 const size_t bound_on_additional_regions = cand_idx / MAX_FRACTION_OF_HUMONGOUS_DEFRAG_REGIONS; 434 435 // The heuristic old_is_fragmented trigger may be seeking to achieve up to 75% density. Allow ourselves to overshoot 436 // that target (at 7/8) so we will not have to do another defragmenting old collection right away. 437 while ((defrag_count < bound_on_additional_regions) && 438 (total_uncollected_old_regions < 7 * span_of_uncollected_regions / 8)) { 439 ShenandoahHeapRegion* r = candidates[_last_old_collection_candidate].get_region(); 440 assert(r->is_regular() || r->is_regular_pinned(), "Region " SIZE_FORMAT " has wrong state for collection: %s", 441 r->index(), ShenandoahHeapRegion::region_state_to_string(r->state())); 442 const size_t region_garbage = r->garbage(); 443 const size_t region_free = r->free(); 444 candidates_garbage += region_garbage; 445 unfragmented += region_free; 446 defrag_count++; 447 _last_old_collection_candidate++; 448 449 // We now have one fewer uncollected regions, and our uncollected span shrinks because we have removed its first region. 450 total_uncollected_old_regions--; 451 span_of_uncollected_regions = 452 1 + last_unselected_old_region - candidates[_last_old_collection_candidate].get_region()->index(); 453 } 454 } 455 456 // Note that we do not coalesce and fill occupied humongous regions 457 // HR: humongous regions, RR: regular regions, CF: coalesce and fill regions 458 const size_t collectable_garbage = immediate_garbage + candidates_garbage; 459 const size_t old_candidates = _last_old_collection_candidate; 460 const size_t mixed_evac_live = old_candidates * region_size_bytes - (candidates_garbage + unfragmented); 461 set_unprocessed_old_collection_candidates_live_memory(mixed_evac_live); 462 463 log_info(gc)("Old-Gen Collectable Garbage: " PROPERFMT " consolidated with free: " PROPERFMT ", over " SIZE_FORMAT " regions", 464 PROPERFMTARGS(collectable_garbage), PROPERFMTARGS(unfragmented), old_candidates); 465 log_info(gc)("Old-Gen Immediate Garbage: " PROPERFMT " over " SIZE_FORMAT " regions", 466 PROPERFMTARGS(immediate_garbage), immediate_regions); 467 log_info(gc)("Old regions selected for defragmentation: " SIZE_FORMAT, defrag_count); 468 log_info(gc)("Old regions not selected: " SIZE_FORMAT, total_uncollected_old_regions); 469 470 if (unprocessed_old_collection_candidates() > 0) { 471 _old_generation->transition_to(ShenandoahOldGeneration::EVACUATING); 472 } else if (has_coalesce_and_fill_candidates()) { 473 _old_generation->transition_to(ShenandoahOldGeneration::FILLING); 474 } else { 475 _old_generation->transition_to(ShenandoahOldGeneration::WAITING_FOR_BOOTSTRAP); 476 } 477 } 478 479 size_t ShenandoahOldHeuristics::unprocessed_old_collection_candidates_live_memory() const { 480 return _live_bytes_in_unprocessed_candidates; 481 } 482 483 void ShenandoahOldHeuristics::set_unprocessed_old_collection_candidates_live_memory(size_t initial_live) { 484 _live_bytes_in_unprocessed_candidates = initial_live; 485 } 486 487 void ShenandoahOldHeuristics::decrease_unprocessed_old_collection_candidates_live_memory(size_t evacuated_live) { 488 assert(evacuated_live <= _live_bytes_in_unprocessed_candidates, "Cannot evacuate more than was present"); 489 _live_bytes_in_unprocessed_candidates -= evacuated_live; 490 } 491 492 // Used by unit test: test_shenandoahOldHeuristic.cpp 493 uint ShenandoahOldHeuristics::last_old_collection_candidate_index() const { 494 return _last_old_collection_candidate; 495 } 496 497 uint ShenandoahOldHeuristics::unprocessed_old_collection_candidates() const { 498 return _last_old_collection_candidate - _next_old_collection_candidate; 499 } 500 501 ShenandoahHeapRegion* ShenandoahOldHeuristics::next_old_collection_candidate() { 502 while (_next_old_collection_candidate < _last_old_collection_candidate) { 503 ShenandoahHeapRegion* next = _region_data[_next_old_collection_candidate].get_region(); 504 if (!next->is_pinned()) { 505 return next; 506 } else { 507 assert(next->is_pinned(), "sanity"); 508 if (_first_pinned_candidate == NOT_FOUND) { 509 _first_pinned_candidate = _next_old_collection_candidate; 510 } 511 } 512 513 _next_old_collection_candidate++; 514 } 515 return nullptr; 516 } 517 518 void ShenandoahOldHeuristics::consume_old_collection_candidate() { 519 _next_old_collection_candidate++; 520 } 521 522 unsigned int ShenandoahOldHeuristics::get_coalesce_and_fill_candidates(ShenandoahHeapRegion** buffer) { 523 uint end = _last_old_region; 524 uint index = _next_old_collection_candidate; 525 while (index < end) { 526 *buffer++ = _region_data[index++].get_region(); 527 } 528 return (_last_old_region - _next_old_collection_candidate); 529 } 530 531 void ShenandoahOldHeuristics::abandon_collection_candidates() { 532 _last_old_collection_candidate = 0; 533 _next_old_collection_candidate = 0; 534 _last_old_region = 0; 535 } 536 537 void ShenandoahOldHeuristics::record_cycle_end() { 538 this->ShenandoahHeuristics::record_cycle_end(); 539 clear_triggers(); 540 } 541 542 void ShenandoahOldHeuristics::clear_triggers() { 543 // Clear any triggers that were set during mixed evacuations. Conditions may be different now that this phase has finished. 544 _cannot_expand_trigger = false; 545 _fragmentation_trigger = false; 546 _growth_trigger = false; 547 } 548 549 // This triggers old-gen collection if the number of regions "dedicated" to old generation is much larger than 550 // is required to represent the memory currently used within the old generation. This trigger looks specifically 551 // at density of the old-gen spanned region. A different mechanism triggers old-gen GC if the total number of 552 // old-gen regions (regardless of how close the regions are to one another) grows beyond an anticipated growth target. 553 void ShenandoahOldHeuristics::set_trigger_if_old_is_fragmented(size_t first_old_region, size_t last_old_region, 554 size_t old_region_count, size_t num_regions) { 555 if (ShenandoahGenerationalHumongousReserve > 0) { 556 // Our intent is to pack old-gen memory into the highest-numbered regions of the heap. Count all memory 557 // above first_old_region as the "span" of old generation. 558 size_t old_region_span = (first_old_region <= last_old_region)? (num_regions - first_old_region): 0; 559 // Given that memory at the bottom of the heap is reserved to represent humongous objects, the number of 560 // regions that old_gen is "allowed" to consume is less than the total heap size. The restriction on allowed 561 // span is not strictly enforced. This is a heuristic designed to reduce the likelihood that a humongous 562 // allocation request will require a STW full GC. 563 size_t allowed_old_gen_span = num_regions - (ShenandoahGenerationalHumongousReserve * num_regions) / 100; 564 565 size_t old_available = _old_gen->available() / HeapWordSize; 566 size_t region_size_words = ShenandoahHeapRegion::region_size_words(); 567 size_t old_unaffiliated_available = _old_gen->free_unaffiliated_regions() * region_size_words; 568 assert(old_available >= old_unaffiliated_available, "sanity"); 569 size_t old_fragmented_available = old_available - old_unaffiliated_available; 570 571 size_t old_words_consumed = old_region_count * region_size_words - old_fragmented_available; 572 size_t old_words_spanned = old_region_span * region_size_words; 573 double old_density = ((double) old_words_consumed) / old_words_spanned; 574 575 double old_span_percent = ((double) old_region_span) / allowed_old_gen_span; 576 if (old_span_percent > 0.50) { 577 // Squaring old_span_percent in the denominator below allows more aggressive triggering when we are 578 // above desired maximum span and less aggressive triggering when we are far below the desired maximum span. 579 double old_span_percent_squared = old_span_percent * old_span_percent; 580 if (old_density / old_span_percent_squared < 0.75) { 581 // We trigger old defragmentation, for example, if: 582 // old_span_percent is 110% and old_density is below 90.8%, or 583 // old_span_percent is 100% and old_density is below 75.0%, or 584 // old_span_percent is 90% and old_density is below 60.8%, or 585 // old_span_percent is 80% and old_density is below 48.0%, or 586 // old_span_percent is 70% and old_density is below 36.8%, or 587 // old_span_percent is 60% and old_density is below 27.0%, or 588 // old_span_percent is 50% and old_density is below 18.8%. 589 590 // Set the fragmentation trigger and related attributes 591 _fragmentation_trigger = true; 592 _fragmentation_density = old_density; 593 _fragmentation_first_old_region = first_old_region; 594 _fragmentation_last_old_region = last_old_region; 595 } 596 } 597 } 598 } 599 600 void ShenandoahOldHeuristics::set_trigger_if_old_is_overgrown() { 601 size_t old_used = _old_gen->used() + _old_gen->get_humongous_waste(); 602 size_t trigger_threshold = _old_gen->usage_trigger_threshold(); 603 // Detects unsigned arithmetic underflow 604 assert(old_used <= _heap->capacity(), 605 "Old used (" SIZE_FORMAT ", " SIZE_FORMAT") must not be more than heap capacity (" SIZE_FORMAT ")", 606 _old_gen->used(), _old_gen->get_humongous_waste(), _heap->capacity()); 607 if (old_used > trigger_threshold) { 608 _growth_trigger = true; 609 } 610 } 611 612 void ShenandoahOldHeuristics::evaluate_triggers(size_t first_old_region, size_t last_old_region, 613 size_t old_region_count, size_t num_regions) { 614 set_trigger_if_old_is_fragmented(first_old_region, last_old_region, old_region_count, num_regions); 615 set_trigger_if_old_is_overgrown(); 616 } 617 618 bool ShenandoahOldHeuristics::should_start_gc() { 619 // Cannot start a new old-gen GC until previous one has finished. 620 // 621 // Future refinement: under certain circumstances, we might be more sophisticated about this choice. 622 // For example, we could choose to abandon the previous old collection before it has completed evacuations. 623 ShenandoahHeap* heap = ShenandoahHeap::heap(); 624 if (!_old_generation->can_start_gc() || heap->collection_set()->has_old_regions()) { 625 return false; 626 } 627 628 if (_cannot_expand_trigger) { 629 const size_t old_gen_capacity = _old_generation->max_capacity(); 630 const size_t heap_capacity = heap->capacity(); 631 const double percent = percent_of(old_gen_capacity, heap_capacity); 632 log_info(gc)("Trigger (OLD): Expansion failure, current size: " SIZE_FORMAT "%s which is %.1f%% of total heap size", 633 byte_size_in_proper_unit(old_gen_capacity), proper_unit_for_byte_size(old_gen_capacity), percent); 634 return true; 635 } 636 637 if (_fragmentation_trigger) { 638 const size_t used = _old_generation->used(); 639 const size_t used_regions_size = _old_generation->used_regions_size(); 640 641 // used_regions includes humongous regions 642 const size_t used_regions = _old_generation->used_regions(); 643 assert(used_regions_size > used_regions, "Cannot have more used than used regions"); 644 645 size_t first_old_region, last_old_region; 646 double density; 647 get_fragmentation_trigger_reason_for_log_message(density, first_old_region, last_old_region); 648 const size_t span_of_old_regions = (last_old_region >= first_old_region)? last_old_region + 1 - first_old_region: 0; 649 const size_t fragmented_free = used_regions_size - used; 650 651 log_info(gc)("Trigger (OLD): Old has become fragmented: " 652 SIZE_FORMAT "%s available bytes spread between range spanned from " 653 SIZE_FORMAT " to " SIZE_FORMAT " (" SIZE_FORMAT "), density: %.1f%%", 654 byte_size_in_proper_unit(fragmented_free), proper_unit_for_byte_size(fragmented_free), 655 first_old_region, last_old_region, span_of_old_regions, density * 100); 656 return true; 657 } 658 659 if (_growth_trigger) { 660 // Growth may be falsely triggered during mixed evacuations, before the mixed-evacuation candidates have been 661 // evacuated. Before acting on a false trigger, we check to confirm the trigger condition is still satisfied. 662 const size_t current_usage = _old_generation->used() + _old_generation->get_humongous_waste(); 663 const size_t trigger_threshold = _old_generation->usage_trigger_threshold(); 664 const size_t heap_size = heap->capacity(); 665 const size_t ignore_threshold = (ShenandoahIgnoreOldGrowthBelowPercentage * heap_size) / 100; 666 size_t consecutive_young_cycles; 667 if ((current_usage < ignore_threshold) && 668 ((consecutive_young_cycles = heap->shenandoah_policy()->consecutive_young_gc_count()) 669 < ShenandoahDoNotIgnoreGrowthAfterYoungCycles)) { 670 log_debug(gc)("Ignoring Trigger (OLD): Old has overgrown: usage (" SIZE_FORMAT "%s) is below threshold (" 671 SIZE_FORMAT "%s) after " SIZE_FORMAT " consecutive completed young GCs", 672 byte_size_in_proper_unit(current_usage), proper_unit_for_byte_size(current_usage), 673 byte_size_in_proper_unit(ignore_threshold), proper_unit_for_byte_size(ignore_threshold), 674 consecutive_young_cycles); 675 _growth_trigger = false; 676 } else if (current_usage > trigger_threshold) { 677 const size_t live_at_previous_old = _old_generation->get_live_bytes_after_last_mark(); 678 const double percent_growth = percent_of(current_usage - live_at_previous_old, live_at_previous_old); 679 log_info(gc)("Trigger (OLD): Old has overgrown, live at end of previous OLD marking: " 680 SIZE_FORMAT "%s, current usage: " SIZE_FORMAT "%s, percent growth: %.1f%%", 681 byte_size_in_proper_unit(live_at_previous_old), proper_unit_for_byte_size(live_at_previous_old), 682 byte_size_in_proper_unit(current_usage), proper_unit_for_byte_size(current_usage), percent_growth); 683 return true; 684 } else { 685 // Mixed evacuations have decreased current_usage such that old-growth trigger is no longer relevant. 686 _growth_trigger = false; 687 } 688 } 689 690 // Otherwise, defer to inherited heuristic for gc trigger. 691 return this->ShenandoahHeuristics::should_start_gc(); 692 } 693 694 void ShenandoahOldHeuristics::record_success_concurrent() { 695 // Forget any triggers that occurred while OLD GC was ongoing. If we really need to start another, it will retrigger. 696 clear_triggers(); 697 this->ShenandoahHeuristics::record_success_concurrent(); 698 } 699 700 void ShenandoahOldHeuristics::record_success_degenerated() { 701 // Forget any triggers that occurred while OLD GC was ongoing. If we really need to start another, it will retrigger. 702 clear_triggers(); 703 this->ShenandoahHeuristics::record_success_degenerated(); 704 } 705 706 void ShenandoahOldHeuristics::record_success_full() { 707 // Forget any triggers that occurred while OLD GC was ongoing. If we really need to start another, it will retrigger. 708 clear_triggers(); 709 this->ShenandoahHeuristics::record_success_full(); 710 } 711 712 const char* ShenandoahOldHeuristics::name() { 713 return "Old"; 714 } 715 716 bool ShenandoahOldHeuristics::is_diagnostic() { 717 return false; 718 } 719 720 bool ShenandoahOldHeuristics::is_experimental() { 721 return true; 722 } 723 724 void ShenandoahOldHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* set, 725 ShenandoahHeuristics::RegionData* data, 726 size_t data_size, size_t free) { 727 ShouldNotReachHere(); 728 }