1 /* 2 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 27 #include "gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp" 28 #include "gc/shenandoah/shenandoahCollectionSet.hpp" 29 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp" 30 #include "gc/shenandoah/shenandoahGenerationalHeap.hpp" 31 #include "gc/shenandoah/shenandoahOldGeneration.hpp" 32 #include "gc/shenandoah/shenandoahHeap.inline.hpp" 33 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp" 34 #include "logging/log.hpp" 35 #include "utilities/quickSort.hpp" 36 37 uint ShenandoahOldHeuristics::NOT_FOUND = -1U; 38 39 // sort by increasing live (so least live comes first) 40 int ShenandoahOldHeuristics::compare_by_live(RegionData a, RegionData b) { 41 if (a.get_livedata() < b.get_livedata()) { 42 return -1; 43 } else if (a.get_livedata() > b.get_livedata()) { 44 return 1; 45 } else { 46 return 0; 47 } 48 } 49 50 // sort by increasing index 51 int ShenandoahOldHeuristics::compare_by_index(RegionData a, RegionData b) { 52 if (a.get_region()->index() < b.get_region()->index()) { 53 return -1; 54 } else if (a.get_region()->index() > b.get_region()->index()) { 55 return 1; 56 } else { 57 // quicksort may compare to self during search for pivot 58 return 0; 59 } 60 } 61 62 ShenandoahOldHeuristics::ShenandoahOldHeuristics(ShenandoahOldGeneration* generation, ShenandoahGenerationalHeap* gen_heap) : 63 ShenandoahHeuristics(generation), 64 _heap(gen_heap), 65 _old_gen(generation), 66 _first_pinned_candidate(NOT_FOUND), 67 _last_old_collection_candidate(0), 68 _next_old_collection_candidate(0), 69 _last_old_region(0), 70 _live_bytes_in_unprocessed_candidates(0), 71 _old_generation(generation), 72 _cannot_expand_trigger(false), 73 _fragmentation_trigger(false), 74 _growth_trigger(false), 75 _fragmentation_density(0.0), 76 _fragmentation_first_old_region(0), 77 _fragmentation_last_old_region(0) 78 { 79 } 80 81 bool ShenandoahOldHeuristics::prime_collection_set(ShenandoahCollectionSet* collection_set) { 82 if (unprocessed_old_collection_candidates() == 0) { 83 return false; 84 } 85 86 if (_old_generation->is_preparing_for_mark()) { 87 // We have unprocessed old collection candidates, but the heuristic has given up on evacuating them. 88 // This is most likely because they were _all_ pinned at the time of the last mixed evacuation (and 89 // this in turn is most likely because there are just one or two candidate regions remaining). 90 log_debug(gc)("Remaining " UINT32_FORMAT " old regions are being coalesced and filled", unprocessed_old_collection_candidates()); 91 return false; 92 } 93 94 _first_pinned_candidate = NOT_FOUND; 95 96 uint included_old_regions = 0; 97 size_t evacuated_old_bytes = 0; 98 size_t collected_old_bytes = 0; 99 100 // If a region is put into the collection set, then this region's free (not yet used) bytes are no longer 101 // "available" to hold the results of other evacuations. This may cause a decrease in the remaining amount 102 // of memory that can still be evacuated. We address this by reducing the evacuation budget by the amount 103 // of live memory in that region and by the amount of unallocated memory in that region if the evacuation 104 // budget is constrained by availability of free memory. 105 const size_t old_evacuation_reserve = _old_generation->get_evacuation_reserve(); 106 const size_t old_evacuation_budget = (size_t) ((double) old_evacuation_reserve / ShenandoahOldEvacWaste); 107 size_t unfragmented_available = _old_generation->free_unaffiliated_regions() * ShenandoahHeapRegion::region_size_bytes(); 108 size_t fragmented_available; 109 size_t excess_fragmented_available; 110 111 if (unfragmented_available > old_evacuation_budget) { 112 unfragmented_available = old_evacuation_budget; 113 fragmented_available = 0; 114 excess_fragmented_available = 0; 115 } else { 116 assert(_old_generation->available() >= old_evacuation_budget, "Cannot budget more than is available"); 117 fragmented_available = _old_generation->available() - unfragmented_available; 118 assert(fragmented_available + unfragmented_available >= old_evacuation_budget, "Budgets do not add up"); 119 if (fragmented_available + unfragmented_available > old_evacuation_budget) { 120 excess_fragmented_available = (fragmented_available + unfragmented_available) - old_evacuation_budget; 121 fragmented_available -= excess_fragmented_available; 122 } 123 } 124 125 size_t remaining_old_evacuation_budget = old_evacuation_budget; 126 log_info(gc)("Choose old regions for mixed collection: old evacuation budget: " SIZE_FORMAT "%s, candidates: %u", 127 byte_size_in_proper_unit(old_evacuation_budget), proper_unit_for_byte_size(old_evacuation_budget), 128 unprocessed_old_collection_candidates()); 129 130 size_t lost_evacuation_capacity = 0; 131 132 // The number of old-gen regions that were selected as candidates for collection at the end of the most recent old-gen 133 // concurrent marking phase and have not yet been collected is represented by unprocessed_old_collection_candidates(). 134 // Candidate regions are ordered according to increasing amount of live data. If there is not sufficient room to 135 // evacuate region N, then there is no need to even consider evacuating region N+1. 136 while (unprocessed_old_collection_candidates() > 0) { 137 // Old collection candidates are sorted in order of decreasing garbage contained therein. 138 ShenandoahHeapRegion* r = next_old_collection_candidate(); 139 if (r == nullptr) { 140 break; 141 } 142 assert(r->is_regular(), "There should be no humongous regions in the set of mixed-evac candidates"); 143 144 // If region r is evacuated to fragmented memory (to free memory within a partially used region), then we need 145 // to decrease the capacity of the fragmented memory by the scaled loss. 146 147 size_t live_data_for_evacuation = r->get_live_data_bytes(); 148 size_t lost_available = r->free(); 149 150 if ((lost_available > 0) && (excess_fragmented_available > 0)) { 151 if (lost_available < excess_fragmented_available) { 152 excess_fragmented_available -= lost_available; 153 lost_evacuation_capacity -= lost_available; 154 lost_available = 0; 155 } else { 156 lost_available -= excess_fragmented_available; 157 lost_evacuation_capacity -= excess_fragmented_available; 158 excess_fragmented_available = 0; 159 } 160 } 161 size_t scaled_loss = (size_t) ((double) lost_available / ShenandoahOldEvacWaste); 162 if ((lost_available > 0) && (fragmented_available > 0)) { 163 if (scaled_loss + live_data_for_evacuation < fragmented_available) { 164 fragmented_available -= scaled_loss; 165 scaled_loss = 0; 166 } else { 167 // We will have to allocate this region's evacuation memory from unfragmented memory, so don't bother 168 // to decrement scaled_loss 169 } 170 } 171 if (scaled_loss > 0) { 172 // We were not able to account for the lost free memory within fragmented memory, so we need to take this 173 // allocation out of unfragmented memory. Unfragmented memory does not need to account for loss of free. 174 if (live_data_for_evacuation > unfragmented_available) { 175 // There is not room to evacuate this region or any that come after it in within the candidates array. 176 break; 177 } else { 178 unfragmented_available -= live_data_for_evacuation; 179 } 180 } else { 181 // Since scaled_loss == 0, we have accounted for the loss of free memory, so we can allocate from either 182 // fragmented or unfragmented available memory. Use up the fragmented memory budget first. 183 size_t evacuation_need = live_data_for_evacuation; 184 185 if (evacuation_need > fragmented_available) { 186 evacuation_need -= fragmented_available; 187 fragmented_available = 0; 188 } else { 189 fragmented_available -= evacuation_need; 190 evacuation_need = 0; 191 } 192 if (evacuation_need > unfragmented_available) { 193 // There is not room to evacuate this region or any that come after it in within the candidates array. 194 break; 195 } else { 196 unfragmented_available -= evacuation_need; 197 // dead code: evacuation_need == 0; 198 } 199 } 200 collection_set->add_region(r); 201 included_old_regions++; 202 evacuated_old_bytes += live_data_for_evacuation; 203 collected_old_bytes += r->garbage(); 204 consume_old_collection_candidate(); 205 } 206 207 if (_first_pinned_candidate != NOT_FOUND) { 208 // Need to deal with pinned regions 209 slide_pinned_regions_to_front(); 210 } 211 decrease_unprocessed_old_collection_candidates_live_memory(evacuated_old_bytes); 212 if (included_old_regions > 0) { 213 log_info(gc)("Old-gen piggyback evac (" UINT32_FORMAT " regions, evacuating " SIZE_FORMAT "%s, reclaiming: " SIZE_FORMAT "%s)", 214 included_old_regions, 215 byte_size_in_proper_unit(evacuated_old_bytes), proper_unit_for_byte_size(evacuated_old_bytes), 216 byte_size_in_proper_unit(collected_old_bytes), proper_unit_for_byte_size(collected_old_bytes)); 217 } 218 219 if (unprocessed_old_collection_candidates() == 0) { 220 // We have added the last of our collection candidates to a mixed collection. 221 // Any triggers that occurred during mixed evacuations may no longer be valid. They can retrigger if appropriate. 222 clear_triggers(); 223 224 _old_generation->complete_mixed_evacuations(); 225 } else if (included_old_regions == 0) { 226 // We have candidates, but none were included for evacuation - are they all pinned? 227 // or did we just not have enough room for any of them in this collection set? 228 // We don't want a region with a stuck pin to prevent subsequent old collections, so 229 // if they are all pinned we transition to a state that will allow us to make these uncollected 230 // (pinned) regions parsable. 231 if (all_candidates_are_pinned()) { 232 log_info(gc)("All candidate regions " UINT32_FORMAT " are pinned", unprocessed_old_collection_candidates()); 233 _old_generation->abandon_mixed_evacuations(); 234 } else { 235 log_info(gc)("No regions selected for mixed collection. " 236 "Old evacuation budget: " PROPERFMT ", Remaining evacuation budget: " PROPERFMT 237 ", Lost capacity: " PROPERFMT 238 ", Next candidate: " UINT32_FORMAT ", Last candidate: " UINT32_FORMAT, 239 PROPERFMTARGS(old_evacuation_reserve), 240 PROPERFMTARGS(remaining_old_evacuation_budget), 241 PROPERFMTARGS(lost_evacuation_capacity), 242 _next_old_collection_candidate, _last_old_collection_candidate); 243 } 244 } 245 246 return (included_old_regions > 0); 247 } 248 249 bool ShenandoahOldHeuristics::all_candidates_are_pinned() { 250 #ifdef ASSERT 251 if (uint(os::random()) % 100 < ShenandoahCoalesceChance) { 252 return true; 253 } 254 #endif 255 256 for (uint i = _next_old_collection_candidate; i < _last_old_collection_candidate; ++i) { 257 ShenandoahHeapRegion* region = _region_data[i].get_region(); 258 if (!region->is_pinned()) { 259 return false; 260 } 261 } 262 return true; 263 } 264 265 void ShenandoahOldHeuristics::slide_pinned_regions_to_front() { 266 // Find the first unpinned region to the left of the next region that 267 // will be added to the collection set. These regions will have been 268 // added to the cset, so we can use them to hold pointers to regions 269 // that were pinned when the cset was chosen. 270 // [ r p r p p p r r ] 271 // ^ ^ ^ 272 // | | | pointer to next region to add to a mixed collection is here. 273 // | | first r to the left should be in the collection set now. 274 // | first pinned region, we don't need to look past this 275 uint write_index = NOT_FOUND; 276 for (uint search = _next_old_collection_candidate - 1; search > _first_pinned_candidate; --search) { 277 ShenandoahHeapRegion* region = _region_data[search].get_region(); 278 if (!region->is_pinned()) { 279 write_index = search; 280 assert(region->is_cset(), "Expected unpinned region to be added to the collection set."); 281 break; 282 } 283 } 284 285 // If we could not find an unpinned region, it means there are no slots available 286 // to move up the pinned regions. In this case, we just reset our next index in the 287 // hopes that some of these regions will become unpinned before the next mixed 288 // collection. We may want to bailout of here instead, as it should be quite 289 // rare to have so many pinned regions and may indicate something is wrong. 290 if (write_index == NOT_FOUND) { 291 assert(_first_pinned_candidate != NOT_FOUND, "Should only be here if there are pinned regions."); 292 _next_old_collection_candidate = _first_pinned_candidate; 293 return; 294 } 295 296 // Find pinned regions to the left and move their pointer into a slot 297 // that was pointing at a region that has been added to the cset (or was pointing 298 // to a pinned region that we've already moved up). We are done when the leftmost 299 // pinned region has been slid up. 300 // [ r p r x p p p r ] 301 // ^ ^ 302 // | | next region for mixed collections 303 // | Write pointer is here. We know this region is already in the cset 304 // | so we can clobber it with the next pinned region we find. 305 for (int32_t search = (int32_t)write_index - 1; search >= (int32_t)_first_pinned_candidate; --search) { 306 RegionData& skipped = _region_data[search]; 307 if (skipped.get_region()->is_pinned()) { 308 RegionData& available_slot = _region_data[write_index]; 309 available_slot.set_region_and_livedata(skipped.get_region(), skipped.get_livedata()); 310 --write_index; 311 } 312 } 313 314 // Update to read from the leftmost pinned region. Plus one here because we decremented 315 // the write index to hold the next found pinned region. We are just moving it back now 316 // to point to the first pinned region. 317 _next_old_collection_candidate = write_index + 1; 318 } 319 320 void ShenandoahOldHeuristics::prepare_for_old_collections() { 321 ShenandoahHeap* heap = ShenandoahHeap::heap(); 322 323 const size_t num_regions = heap->num_regions(); 324 size_t cand_idx = 0; 325 size_t immediate_garbage = 0; 326 size_t immediate_regions = 0; 327 size_t live_data = 0; 328 329 RegionData* candidates = _region_data; 330 for (size_t i = 0; i < num_regions; i++) { 331 ShenandoahHeapRegion* region = heap->get_region(i); 332 if (!region->is_old()) { 333 continue; 334 } 335 336 size_t garbage = region->garbage(); 337 size_t live_bytes = region->get_live_data_bytes(); 338 live_data += live_bytes; 339 340 if (region->is_regular() || region->is_regular_pinned()) { 341 // Only place regular or pinned regions with live data into the candidate set. 342 // Pinned regions cannot be evacuated, but we are not actually choosing candidates 343 // for the collection set here. That happens later during the next young GC cycle, 344 // by which time, the pinned region may no longer be pinned. 345 if (!region->has_live()) { 346 assert(!region->is_pinned(), "Pinned region should have live (pinned) objects."); 347 region->make_trash_immediate(); 348 immediate_regions++; 349 immediate_garbage += garbage; 350 } else { 351 region->begin_preemptible_coalesce_and_fill(); 352 candidates[cand_idx].set_region_and_livedata(region, live_bytes); 353 cand_idx++; 354 } 355 } else if (region->is_humongous_start()) { 356 // This will handle humongous start regions whether they are also pinned, or not. 357 // If they are pinned, we expect them to hold live data, so they will not be 358 // turned into immediate garbage. 359 if (!region->has_live()) { 360 assert(!region->is_pinned(), "Pinned region should have live (pinned) objects."); 361 // The humongous object is dead, we can just return this region and the continuations 362 // immediately to the freeset - no evacuations are necessary here. The continuations 363 // will be made into trash by this method, so they'll be skipped by the 'is_regular' 364 // check above, but we still need to count the start region. 365 immediate_regions++; 366 immediate_garbage += garbage; 367 size_t region_count = heap->trash_humongous_region_at(region); 368 log_debug(gc)("Trashed " SIZE_FORMAT " regions for humongous object.", region_count); 369 } 370 } else if (region->is_trash()) { 371 // Count humongous objects made into trash here. 372 immediate_regions++; 373 immediate_garbage += garbage; 374 } 375 } 376 377 _old_generation->set_live_bytes_after_last_mark(live_data); 378 379 // Unlike young, we are more interested in efficiently packing OLD-gen than in reclaiming garbage first. We sort by live-data. 380 // Some regular regions may have been promoted in place with no garbage but also with very little live data. When we "compact" 381 // old-gen, we want to pack these underutilized regions together so we can have more unaffiliated (unfragmented) free regions 382 // in old-gen. 383 384 QuickSort::sort<RegionData>(candidates, cand_idx, compare_by_live, false); 385 386 const size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes(); 387 388 // The convention is to collect regions that have more than this amount of garbage. 389 const size_t garbage_threshold = region_size_bytes * ShenandoahOldGarbageThreshold / 100; 390 391 // Enlightened interpretation: collect regions that have less than this amount of live. 392 const size_t live_threshold = region_size_bytes - garbage_threshold; 393 394 _last_old_region = (uint)cand_idx; 395 _last_old_collection_candidate = (uint)cand_idx; 396 _next_old_collection_candidate = 0; 397 398 size_t unfragmented = 0; 399 size_t candidates_garbage = 0; 400 401 for (size_t i = 0; i < cand_idx; i++) { 402 size_t live = candidates[i].get_livedata(); 403 if (live > live_threshold) { 404 // Candidates are sorted in increasing order of live data, so no regions after this will be below the threshold. 405 _last_old_collection_candidate = (uint)i; 406 break; 407 } 408 ShenandoahHeapRegion* r = candidates[i].get_region(); 409 size_t region_garbage = r->garbage(); 410 size_t region_free = r->free(); 411 candidates_garbage += region_garbage; 412 unfragmented += region_free; 413 } 414 415 // defrag_count represents regions that are placed into the old collection set in order to defragment the memory 416 // that we try to "reserve" for humongous allocations. 417 size_t defrag_count = 0; 418 size_t total_uncollected_old_regions = _last_old_region - _last_old_collection_candidate; 419 420 if (cand_idx > _last_old_collection_candidate) { 421 // Above, we have added into the set of mixed-evacuation candidates all old-gen regions for which the live memory 422 // that they contain is below a particular old-garbage threshold. Regions that were not selected for the collection 423 // set hold enough live memory that it is not considered efficient (by "garbage-first standards") to compact these 424 // at the current time. 425 // 426 // However, if any of these regions that were rejected from the collection set reside within areas of memory that 427 // might interfere with future humongous allocation requests, we will prioritize them for evacuation at this time. 428 // Humongous allocations target the bottom of the heap. We want old-gen regions to congregate at the top of the 429 // heap. 430 // 431 // Sort the regions that were initially rejected from the collection set in order of index. This allows us to 432 // focus our attention on the regions that have low index value (i.e. the old-gen regions at the bottom of the heap). 433 QuickSort::sort<RegionData>(candidates + _last_old_collection_candidate, cand_idx - _last_old_collection_candidate, 434 compare_by_index, false); 435 436 const size_t first_unselected_old_region = candidates[_last_old_collection_candidate].get_region()->index(); 437 const size_t last_unselected_old_region = candidates[cand_idx - 1].get_region()->index(); 438 size_t span_of_uncollected_regions = 1 + last_unselected_old_region - first_unselected_old_region; 439 440 // Add no more than 1/8 of the existing old-gen regions to the set of mixed evacuation candidates. 441 const int MAX_FRACTION_OF_HUMONGOUS_DEFRAG_REGIONS = 8; 442 const size_t bound_on_additional_regions = cand_idx / MAX_FRACTION_OF_HUMONGOUS_DEFRAG_REGIONS; 443 444 // The heuristic old_is_fragmented trigger may be seeking to achieve up to 75% density. Allow ourselves to overshoot 445 // that target (at 7/8) so we will not have to do another defragmenting old collection right away. 446 while ((defrag_count < bound_on_additional_regions) && 447 (total_uncollected_old_regions < 7 * span_of_uncollected_regions / 8)) { 448 ShenandoahHeapRegion* r = candidates[_last_old_collection_candidate].get_region(); 449 assert(r->is_regular() || r->is_regular_pinned(), "Region " SIZE_FORMAT " has wrong state for collection: %s", 450 r->index(), ShenandoahHeapRegion::region_state_to_string(r->state())); 451 const size_t region_garbage = r->garbage(); 452 const size_t region_free = r->free(); 453 candidates_garbage += region_garbage; 454 unfragmented += region_free; 455 defrag_count++; 456 _last_old_collection_candidate++; 457 458 // We now have one fewer uncollected regions, and our uncollected span shrinks because we have removed its first region. 459 total_uncollected_old_regions--; 460 span_of_uncollected_regions = 461 1 + last_unselected_old_region - candidates[_last_old_collection_candidate].get_region()->index(); 462 } 463 } 464 465 // Note that we do not coalesce and fill occupied humongous regions 466 // HR: humongous regions, RR: regular regions, CF: coalesce and fill regions 467 const size_t collectable_garbage = immediate_garbage + candidates_garbage; 468 const size_t old_candidates = _last_old_collection_candidate; 469 const size_t mixed_evac_live = old_candidates * region_size_bytes - (candidates_garbage + unfragmented); 470 set_unprocessed_old_collection_candidates_live_memory(mixed_evac_live); 471 472 log_info(gc)("Old-Gen Collectable Garbage: " PROPERFMT " consolidated with free: " PROPERFMT ", over " SIZE_FORMAT " regions", 473 PROPERFMTARGS(collectable_garbage), PROPERFMTARGS(unfragmented), old_candidates); 474 log_info(gc)("Old-Gen Immediate Garbage: " PROPERFMT " over " SIZE_FORMAT " regions", 475 PROPERFMTARGS(immediate_garbage), immediate_regions); 476 log_info(gc)("Old regions selected for defragmentation: " SIZE_FORMAT, defrag_count); 477 log_info(gc)("Old regions not selected: " SIZE_FORMAT, total_uncollected_old_regions); 478 479 if (unprocessed_old_collection_candidates() > 0) { 480 _old_generation->transition_to(ShenandoahOldGeneration::EVACUATING); 481 } else if (has_coalesce_and_fill_candidates()) { 482 _old_generation->transition_to(ShenandoahOldGeneration::FILLING); 483 } else { 484 _old_generation->transition_to(ShenandoahOldGeneration::WAITING_FOR_BOOTSTRAP); 485 } 486 } 487 488 size_t ShenandoahOldHeuristics::unprocessed_old_collection_candidates_live_memory() const { 489 return _live_bytes_in_unprocessed_candidates; 490 } 491 492 void ShenandoahOldHeuristics::set_unprocessed_old_collection_candidates_live_memory(size_t initial_live) { 493 _live_bytes_in_unprocessed_candidates = initial_live; 494 } 495 496 void ShenandoahOldHeuristics::decrease_unprocessed_old_collection_candidates_live_memory(size_t evacuated_live) { 497 assert(evacuated_live <= _live_bytes_in_unprocessed_candidates, "Cannot evacuate more than was present"); 498 _live_bytes_in_unprocessed_candidates -= evacuated_live; 499 } 500 501 // Used by unit test: test_shenandoahOldHeuristic.cpp 502 uint ShenandoahOldHeuristics::last_old_collection_candidate_index() const { 503 return _last_old_collection_candidate; 504 } 505 506 uint ShenandoahOldHeuristics::unprocessed_old_collection_candidates() const { 507 return _last_old_collection_candidate - _next_old_collection_candidate; 508 } 509 510 ShenandoahHeapRegion* ShenandoahOldHeuristics::next_old_collection_candidate() { 511 while (_next_old_collection_candidate < _last_old_collection_candidate) { 512 ShenandoahHeapRegion* next = _region_data[_next_old_collection_candidate].get_region(); 513 if (!next->is_pinned()) { 514 return next; 515 } else { 516 assert(next->is_pinned(), "sanity"); 517 if (_first_pinned_candidate == NOT_FOUND) { 518 _first_pinned_candidate = _next_old_collection_candidate; 519 } 520 } 521 522 _next_old_collection_candidate++; 523 } 524 return nullptr; 525 } 526 527 void ShenandoahOldHeuristics::consume_old_collection_candidate() { 528 _next_old_collection_candidate++; 529 } 530 531 unsigned int ShenandoahOldHeuristics::get_coalesce_and_fill_candidates(ShenandoahHeapRegion** buffer) { 532 uint end = _last_old_region; 533 uint index = _next_old_collection_candidate; 534 while (index < end) { 535 *buffer++ = _region_data[index++].get_region(); 536 } 537 return (_last_old_region - _next_old_collection_candidate); 538 } 539 540 void ShenandoahOldHeuristics::abandon_collection_candidates() { 541 _last_old_collection_candidate = 0; 542 _next_old_collection_candidate = 0; 543 _last_old_region = 0; 544 } 545 546 void ShenandoahOldHeuristics::record_cycle_end() { 547 this->ShenandoahHeuristics::record_cycle_end(); 548 clear_triggers(); 549 } 550 551 void ShenandoahOldHeuristics::clear_triggers() { 552 // Clear any triggers that were set during mixed evacuations. Conditions may be different now that this phase has finished. 553 _cannot_expand_trigger = false; 554 _fragmentation_trigger = false; 555 _growth_trigger = false; 556 } 557 558 // This triggers old-gen collection if the number of regions "dedicated" to old generation is much larger than 559 // is required to represent the memory currently used within the old generation. This trigger looks specifically 560 // at density of the old-gen spanned region. A different mechanism triggers old-gen GC if the total number of 561 // old-gen regions (regardless of how close the regions are to one another) grows beyond an anticipated growth target. 562 void ShenandoahOldHeuristics::set_trigger_if_old_is_fragmented(size_t first_old_region, size_t last_old_region, 563 size_t old_region_count, size_t num_regions) { 564 if (ShenandoahGenerationalHumongousReserve > 0) { 565 // Our intent is to pack old-gen memory into the highest-numbered regions of the heap. Count all memory 566 // above first_old_region as the "span" of old generation. 567 size_t old_region_span = (first_old_region <= last_old_region)? (num_regions - first_old_region): 0; 568 // Given that memory at the bottom of the heap is reserved to represent humongous objects, the number of 569 // regions that old_gen is "allowed" to consume is less than the total heap size. The restriction on allowed 570 // span is not strictly enforced. This is a heuristic designed to reduce the likelihood that a humongous 571 // allocation request will require a STW full GC. 572 size_t allowed_old_gen_span = num_regions - (ShenandoahGenerationalHumongousReserve * num_regions) / 100; 573 574 size_t old_available = _old_gen->available() / HeapWordSize; 575 size_t region_size_words = ShenandoahHeapRegion::region_size_words(); 576 size_t old_unaffiliated_available = _old_gen->free_unaffiliated_regions() * region_size_words; 577 assert(old_available >= old_unaffiliated_available, "sanity"); 578 size_t old_fragmented_available = old_available - old_unaffiliated_available; 579 580 size_t old_words_consumed = old_region_count * region_size_words - old_fragmented_available; 581 size_t old_words_spanned = old_region_span * region_size_words; 582 double old_density = ((double) old_words_consumed) / old_words_spanned; 583 584 double old_span_percent = ((double) old_region_span) / allowed_old_gen_span; 585 if (old_span_percent > 0.50) { 586 // Squaring old_span_percent in the denominator below allows more aggressive triggering when we are 587 // above desired maximum span and less aggressive triggering when we are far below the desired maximum span. 588 double old_span_percent_squared = old_span_percent * old_span_percent; 589 if (old_density / old_span_percent_squared < 0.75) { 590 // We trigger old defragmentation, for example, if: 591 // old_span_percent is 110% and old_density is below 90.8%, or 592 // old_span_percent is 100% and old_density is below 75.0%, or 593 // old_span_percent is 90% and old_density is below 60.8%, or 594 // old_span_percent is 80% and old_density is below 48.0%, or 595 // old_span_percent is 70% and old_density is below 36.8%, or 596 // old_span_percent is 60% and old_density is below 27.0%, or 597 // old_span_percent is 50% and old_density is below 18.8%. 598 599 // Set the fragmentation trigger and related attributes 600 _fragmentation_trigger = true; 601 _fragmentation_density = old_density; 602 _fragmentation_first_old_region = first_old_region; 603 _fragmentation_last_old_region = last_old_region; 604 } 605 } 606 } 607 } 608 609 void ShenandoahOldHeuristics::set_trigger_if_old_is_overgrown() { 610 size_t old_used = _old_gen->used() + _old_gen->get_humongous_waste(); 611 size_t trigger_threshold = _old_gen->usage_trigger_threshold(); 612 // Detects unsigned arithmetic underflow 613 assert(old_used <= _heap->capacity(), 614 "Old used (" SIZE_FORMAT ", " SIZE_FORMAT") must not be more than heap capacity (" SIZE_FORMAT ")", 615 _old_gen->used(), _old_gen->get_humongous_waste(), _heap->capacity()); 616 if (old_used > trigger_threshold) { 617 _growth_trigger = true; 618 } 619 } 620 621 void ShenandoahOldHeuristics::evaluate_triggers(size_t first_old_region, size_t last_old_region, 622 size_t old_region_count, size_t num_regions) { 623 set_trigger_if_old_is_fragmented(first_old_region, last_old_region, old_region_count, num_regions); 624 set_trigger_if_old_is_overgrown(); 625 } 626 627 bool ShenandoahOldHeuristics::should_start_gc() { 628 // Cannot start a new old-gen GC until previous one has finished. 629 // 630 // Future refinement: under certain circumstances, we might be more sophisticated about this choice. 631 // For example, we could choose to abandon the previous old collection before it has completed evacuations. 632 ShenandoahHeap* heap = ShenandoahHeap::heap(); 633 if (!_old_generation->can_start_gc() || heap->collection_set()->has_old_regions()) { 634 return false; 635 } 636 637 if (_cannot_expand_trigger) { 638 const size_t old_gen_capacity = _old_generation->max_capacity(); 639 const size_t heap_capacity = heap->capacity(); 640 const double percent = percent_of(old_gen_capacity, heap_capacity); 641 log_info(gc)("Trigger (OLD): Expansion failure, current size: " SIZE_FORMAT "%s which is %.1f%% of total heap size", 642 byte_size_in_proper_unit(old_gen_capacity), proper_unit_for_byte_size(old_gen_capacity), percent); 643 return true; 644 } 645 646 if (_fragmentation_trigger) { 647 const size_t used = _old_generation->used(); 648 const size_t used_regions_size = _old_generation->used_regions_size(); 649 650 // used_regions includes humongous regions 651 const size_t used_regions = _old_generation->used_regions(); 652 assert(used_regions_size > used_regions, "Cannot have more used than used regions"); 653 654 size_t first_old_region, last_old_region; 655 double density; 656 get_fragmentation_trigger_reason_for_log_message(density, first_old_region, last_old_region); 657 const size_t span_of_old_regions = (last_old_region >= first_old_region)? last_old_region + 1 - first_old_region: 0; 658 const size_t fragmented_free = used_regions_size - used; 659 660 log_info(gc)("Trigger (OLD): Old has become fragmented: " 661 SIZE_FORMAT "%s available bytes spread between range spanned from " 662 SIZE_FORMAT " to " SIZE_FORMAT " (" SIZE_FORMAT "), density: %.1f%%", 663 byte_size_in_proper_unit(fragmented_free), proper_unit_for_byte_size(fragmented_free), 664 first_old_region, last_old_region, span_of_old_regions, density * 100); 665 return true; 666 } 667 668 if (_growth_trigger) { 669 // Growth may be falsely triggered during mixed evacuations, before the mixed-evacuation candidates have been 670 // evacuated. Before acting on a false trigger, we check to confirm the trigger condition is still satisfied. 671 const size_t current_usage = _old_generation->used() + _old_generation->get_humongous_waste(); 672 const size_t trigger_threshold = _old_generation->usage_trigger_threshold(); 673 const size_t heap_size = heap->capacity(); 674 const size_t ignore_threshold = (ShenandoahIgnoreOldGrowthBelowPercentage * heap_size) / 100; 675 size_t consecutive_young_cycles; 676 if ((current_usage < ignore_threshold) && 677 ((consecutive_young_cycles = heap->shenandoah_policy()->consecutive_young_gc_count()) 678 < ShenandoahDoNotIgnoreGrowthAfterYoungCycles)) { 679 log_debug(gc)("Ignoring Trigger (OLD): Old has overgrown: usage (" SIZE_FORMAT "%s) is below threshold (" 680 SIZE_FORMAT "%s) after " SIZE_FORMAT " consecutive completed young GCs", 681 byte_size_in_proper_unit(current_usage), proper_unit_for_byte_size(current_usage), 682 byte_size_in_proper_unit(ignore_threshold), proper_unit_for_byte_size(ignore_threshold), 683 consecutive_young_cycles); 684 _growth_trigger = false; 685 } else if (current_usage > trigger_threshold) { 686 const size_t live_at_previous_old = _old_generation->get_live_bytes_after_last_mark(); 687 const double percent_growth = percent_of(current_usage - live_at_previous_old, live_at_previous_old); 688 log_info(gc)("Trigger (OLD): Old has overgrown, live at end of previous OLD marking: " 689 SIZE_FORMAT "%s, current usage: " SIZE_FORMAT "%s, percent growth: %.1f%%", 690 byte_size_in_proper_unit(live_at_previous_old), proper_unit_for_byte_size(live_at_previous_old), 691 byte_size_in_proper_unit(current_usage), proper_unit_for_byte_size(current_usage), percent_growth); 692 return true; 693 } else { 694 // Mixed evacuations have decreased current_usage such that old-growth trigger is no longer relevant. 695 _growth_trigger = false; 696 } 697 } 698 699 // Otherwise, defer to inherited heuristic for gc trigger. 700 return this->ShenandoahHeuristics::should_start_gc(); 701 } 702 703 void ShenandoahOldHeuristics::record_success_concurrent() { 704 // Forget any triggers that occurred while OLD GC was ongoing. If we really need to start another, it will retrigger. 705 clear_triggers(); 706 this->ShenandoahHeuristics::record_success_concurrent(); 707 } 708 709 void ShenandoahOldHeuristics::record_success_degenerated() { 710 // Forget any triggers that occurred while OLD GC was ongoing. If we really need to start another, it will retrigger. 711 clear_triggers(); 712 this->ShenandoahHeuristics::record_success_degenerated(); 713 } 714 715 void ShenandoahOldHeuristics::record_success_full() { 716 // Forget any triggers that occurred while OLD GC was ongoing. If we really need to start another, it will retrigger. 717 clear_triggers(); 718 this->ShenandoahHeuristics::record_success_full(); 719 } 720 721 const char* ShenandoahOldHeuristics::name() { 722 return "Old"; 723 } 724 725 bool ShenandoahOldHeuristics::is_diagnostic() { 726 return false; 727 } 728 729 bool ShenandoahOldHeuristics::is_experimental() { 730 return true; 731 } 732 733 void ShenandoahOldHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* set, 734 ShenandoahHeuristics::RegionData* data, 735 size_t data_size, size_t free) { 736 ShouldNotReachHere(); 737 }