1 /* 2 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp" 27 #include "gc/shenandoah/shenandoahCollectionSetPreselector.hpp" 28 #include "gc/shenandoah/shenandoahFreeSet.hpp" 29 #include "gc/shenandoah/shenandoahGeneration.hpp" 30 #include "gc/shenandoah/shenandoahGenerationalHeap.hpp" 31 #include "gc/shenandoah/shenandoahHeapRegionClosures.hpp" 32 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp" 33 #include "gc/shenandoah/shenandoahOldGeneration.hpp" 34 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp" 35 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp" 36 #include "gc/shenandoah/shenandoahTaskqueue.inline.hpp" 37 #include "gc/shenandoah/shenandoahUtils.hpp" 38 #include "gc/shenandoah/shenandoahVerifier.hpp" 39 #include "gc/shenandoah/shenandoahYoungGeneration.hpp" 40 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp" 41 42 #include "utilities/quickSort.hpp" 43 44 template <bool PREPARE_FOR_CURRENT_CYCLE, bool FULL_GC = false> 45 class ShenandoahResetBitmapClosure final : public ShenandoahHeapRegionClosure { 46 private: 47 ShenandoahHeap* _heap; 48 ShenandoahMarkingContext* _ctx; 49 50 public: 51 explicit ShenandoahResetBitmapClosure() : 52 ShenandoahHeapRegionClosure(), _heap(ShenandoahHeap::heap()), _ctx(_heap->marking_context()) {} 53 54 void heap_region_do(ShenandoahHeapRegion* region) override { 55 assert(!_heap->is_uncommit_in_progress(), "Cannot uncommit bitmaps while resetting them."); 56 if (PREPARE_FOR_CURRENT_CYCLE) { 57 if (region->need_bitmap_reset() && _heap->is_bitmap_slice_committed(region)) { 58 _ctx->clear_bitmap(region); 59 } else { 60 region->set_needs_bitmap_reset(); 61 } 62 // Capture Top At Mark Start for this generation. 63 if (FULL_GC || region->is_active()) { 64 // Reset live data and set TAMS optimistically. We would recheck these under the pause 65 // anyway to capture any updates that happened since now. 66 _ctx->capture_top_at_mark_start(region); 67 region->clear_live_data(); 68 } 69 } else { 70 if (_heap->is_bitmap_slice_committed(region)) { 71 _ctx->clear_bitmap(region); 72 region->unset_needs_bitmap_reset(); 73 } else { 74 region->set_needs_bitmap_reset(); 75 } 76 } 77 } 78 79 bool is_thread_safe() override { return true; } 80 }; 81 82 // Copy the write-version of the card-table into the read-version, clearing the 83 // write-copy. 84 class ShenandoahMergeWriteTable: public ShenandoahHeapRegionClosure { 85 private: 86 ShenandoahScanRemembered* _scanner; 87 public: 88 ShenandoahMergeWriteTable(ShenandoahScanRemembered* scanner) : _scanner(scanner) {} 89 90 void heap_region_do(ShenandoahHeapRegion* r) override { 91 assert(r->is_old(), "Don't waste time doing this for non-old regions"); 92 _scanner->merge_write_table(r->bottom(), ShenandoahHeapRegion::region_size_words()); 93 } 94 95 bool is_thread_safe() override { 96 return true; 97 } 98 }; 99 100 // Add [TAMS, top) volume over young regions. Used to correct age 0 cohort census 101 // for adaptive tenuring when census is taken during marking. 102 // In non-product builds, for the purposes of verification, we also collect the total 103 // live objects in young regions as well. 104 class ShenandoahUpdateCensusZeroCohortClosure : public ShenandoahHeapRegionClosure { 105 private: 106 ShenandoahMarkingContext* const _ctx; 107 // Population size units are words (not bytes) 108 size_t _age0_pop; // running tally of age0 population size 109 size_t _total_pop; // total live population size 110 public: 111 explicit ShenandoahUpdateCensusZeroCohortClosure(ShenandoahMarkingContext* ctx) 112 : _ctx(ctx), _age0_pop(0), _total_pop(0) {} 113 114 void heap_region_do(ShenandoahHeapRegion* r) override { 115 if (_ctx != nullptr && r->is_active()) { 116 assert(r->is_young(), "Young regions only"); 117 HeapWord* tams = _ctx->top_at_mark_start(r); 118 HeapWord* top = r->top(); 119 if (top > tams) { 120 _age0_pop += pointer_delta(top, tams); 121 } 122 // TODO: check significance of _ctx != nullptr above, can that 123 // spoof _total_pop in some corner cases? 124 NOT_PRODUCT(_total_pop += r->get_live_data_words();) 125 } 126 } 127 128 size_t get_age0_population() const { return _age0_pop; } 129 size_t get_total_population() const { return _total_pop; } 130 }; 131 132 void ShenandoahGeneration::confirm_heuristics_mode() { 133 if (_heuristics->is_diagnostic() && !UnlockDiagnosticVMOptions) { 134 vm_exit_during_initialization( 135 err_msg("Heuristics \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.", 136 _heuristics->name())); 137 } 138 if (_heuristics->is_experimental() && !UnlockExperimentalVMOptions) { 139 vm_exit_during_initialization( 140 err_msg("Heuristics \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.", 141 _heuristics->name())); 142 } 143 } 144 145 ShenandoahHeuristics* ShenandoahGeneration::initialize_heuristics(ShenandoahMode* gc_mode) { 146 _heuristics = gc_mode->initialize_heuristics(this); 147 _heuristics->set_guaranteed_gc_interval(ShenandoahGuaranteedGCInterval); 148 confirm_heuristics_mode(); 149 return _heuristics; 150 } 151 152 size_t ShenandoahGeneration::bytes_allocated_since_gc_start() const { 153 return Atomic::load(&_bytes_allocated_since_gc_start); 154 } 155 156 void ShenandoahGeneration::reset_bytes_allocated_since_gc_start() { 157 Atomic::store(&_bytes_allocated_since_gc_start, (size_t)0); 158 } 159 160 void ShenandoahGeneration::increase_allocated(size_t bytes) { 161 Atomic::add(&_bytes_allocated_since_gc_start, bytes, memory_order_relaxed); 162 } 163 164 void ShenandoahGeneration::set_evacuation_reserve(size_t new_val) { 165 _evacuation_reserve = new_val; 166 } 167 168 size_t ShenandoahGeneration::get_evacuation_reserve() const { 169 return _evacuation_reserve; 170 } 171 172 void ShenandoahGeneration::augment_evacuation_reserve(size_t increment) { 173 _evacuation_reserve += increment; 174 } 175 176 void ShenandoahGeneration::log_status(const char *msg) const { 177 typedef LogTarget(Info, gc, ergo) LogGcInfo; 178 179 if (!LogGcInfo::is_enabled()) { 180 return; 181 } 182 183 // Not under a lock here, so read each of these once to make sure 184 // byte size in proper unit and proper unit for byte size are consistent. 185 const size_t v_used = used(); 186 const size_t v_used_regions = used_regions_size(); 187 const size_t v_soft_max_capacity = soft_max_capacity(); 188 const size_t v_max_capacity = max_capacity(); 189 const size_t v_available = available(); 190 const size_t v_humongous_waste = get_humongous_waste(); 191 192 const LogGcInfo target; 193 LogStream ls(target); 194 ls.print("%s: ", msg); 195 if (_type != NON_GEN) { 196 ls.print("%s generation ", name()); 197 } 198 199 ls.print_cr("used: " PROPERFMT ", used regions: " PROPERFMT ", humongous waste: " PROPERFMT 200 ", soft capacity: " PROPERFMT ", max capacity: " PROPERFMT ", available: " PROPERFMT, 201 PROPERFMTARGS(v_used), PROPERFMTARGS(v_used_regions), PROPERFMTARGS(v_humongous_waste), 202 PROPERFMTARGS(v_soft_max_capacity), PROPERFMTARGS(v_max_capacity), PROPERFMTARGS(v_available)); 203 } 204 205 template <bool PREPARE_FOR_CURRENT_CYCLE, bool FULL_GC> 206 void ShenandoahGeneration::reset_mark_bitmap() { 207 ShenandoahHeap* heap = ShenandoahHeap::heap(); 208 heap->assert_gc_workers(heap->workers()->active_workers()); 209 210 set_mark_incomplete(); 211 212 ShenandoahResetBitmapClosure<PREPARE_FOR_CURRENT_CYCLE, FULL_GC> closure; 213 parallel_heap_region_iterate_free(&closure); 214 } 215 // Explicit specializations 216 template void ShenandoahGeneration::reset_mark_bitmap<true, false>(); 217 template void ShenandoahGeneration::reset_mark_bitmap<true, true>(); 218 template void ShenandoahGeneration::reset_mark_bitmap<false, false>(); 219 220 // Swap the read and write card table pointers prior to the next remset scan. 221 // This avoids the need to synchronize reads of the table by the GC workers 222 // doing remset scanning, on the one hand, with the dirtying of the table by 223 // mutators on the other. 224 void ShenandoahGeneration::swap_card_tables() { 225 // Must be sure that marking is complete before we swap remembered set. 226 ShenandoahGenerationalHeap* heap = ShenandoahGenerationalHeap::heap(); 227 heap->assert_gc_workers(heap->workers()->active_workers()); 228 shenandoah_assert_safepoint(); 229 230 ShenandoahOldGeneration* old_generation = heap->old_generation(); 231 old_generation->card_scan()->swap_card_tables(); 232 } 233 234 // Copy the write-version of the card-table into the read-version, clearing the 235 // write-version. The work is done at a safepoint and in parallel by the GC 236 // worker threads. 237 void ShenandoahGeneration::merge_write_table() { 238 // This should only happen for degenerated cycles 239 ShenandoahGenerationalHeap* heap = ShenandoahGenerationalHeap::heap(); 240 heap->assert_gc_workers(heap->workers()->active_workers()); 241 shenandoah_assert_safepoint(); 242 243 ShenandoahOldGeneration* old_generation = heap->old_generation(); 244 ShenandoahMergeWriteTable task(old_generation->card_scan()); 245 old_generation->parallel_heap_region_iterate(&task); 246 } 247 248 void ShenandoahGeneration::prepare_gc() { 249 reset_mark_bitmap<true>(); 250 } 251 252 void ShenandoahGeneration::parallel_heap_region_iterate_free(ShenandoahHeapRegionClosure* cl) { 253 ShenandoahHeap::heap()->parallel_heap_region_iterate(cl); 254 } 255 256 void ShenandoahGeneration::compute_evacuation_budgets(ShenandoahHeap* const heap) { 257 shenandoah_assert_generational(); 258 259 ShenandoahOldGeneration* const old_generation = heap->old_generation(); 260 ShenandoahYoungGeneration* const young_generation = heap->young_generation(); 261 262 // During initialization and phase changes, it is more likely that fewer objects die young and old-gen 263 // memory is not yet full (or is in the process of being replaced). During these times especially, it 264 // is beneficial to loan memory from old-gen to young-gen during the evacuation and update-refs phases 265 // of execution. 266 267 // Calculate EvacuationReserve before PromotionReserve. Evacuation is more critical than promotion. 268 // If we cannot evacuate old-gen, we will not be able to reclaim old-gen memory. Promotions are less 269 // critical. If we cannot promote, there may be degradation of young-gen memory because old objects 270 // accumulate there until they can be promoted. This increases the young-gen marking and evacuation work. 271 272 // First priority is to reclaim the easy garbage out of young-gen. 273 274 // maximum_young_evacuation_reserve is upper bound on memory to be evacuated out of young 275 const size_t maximum_young_evacuation_reserve = (young_generation->max_capacity() * ShenandoahEvacReserve) / 100; 276 const size_t young_evacuation_reserve = MIN2(maximum_young_evacuation_reserve, young_generation->available_with_reserve()); 277 278 // maximum_old_evacuation_reserve is an upper bound on memory evacuated from old and evacuated to old (promoted), 279 // clamped by the old generation space available. 280 // 281 // Here's the algebra. 282 // Let SOEP = ShenandoahOldEvacRatioPercent, 283 // OE = old evac, 284 // YE = young evac, and 285 // TE = total evac = OE + YE 286 // By definition: 287 // SOEP/100 = OE/TE 288 // = OE/(OE+YE) 289 // => SOEP/(100-SOEP) = OE/((OE+YE)-OE) // componendo-dividendo: If a/b = c/d, then a/(b-a) = c/(d-c) 290 // = OE/YE 291 // => OE = YE*SOEP/(100-SOEP) 292 293 // We have to be careful in the event that SOEP is set to 100 by the user. 294 assert(ShenandoahOldEvacRatioPercent <= 100, "Error"); 295 const size_t old_available = old_generation->available(); 296 const size_t maximum_old_evacuation_reserve = (ShenandoahOldEvacRatioPercent == 100) ? 297 old_available : MIN2((maximum_young_evacuation_reserve * ShenandoahOldEvacRatioPercent) / (100 - ShenandoahOldEvacRatioPercent), 298 old_available); 299 300 301 // Second priority is to reclaim garbage out of old-gen if there are old-gen collection candidates. Third priority 302 // is to promote as much as we have room to promote. However, if old-gen memory is in short supply, this means young 303 // GC is operating under "duress" and was unable to transfer the memory that we would normally expect. In this case, 304 // old-gen will refrain from compacting itself in order to allow a quicker young-gen cycle (by avoiding the update-refs 305 // through ALL of old-gen). If there is some memory available in old-gen, we will use this for promotions as promotions 306 // do not add to the update-refs burden of GC. 307 308 size_t old_evacuation_reserve, old_promo_reserve; 309 if (is_global()) { 310 // Global GC is typically triggered by user invocation of System.gc(), and typically indicates that there is lots 311 // of garbage to be reclaimed because we are starting a new phase of execution. Marking for global GC may take 312 // significantly longer than typical young marking because we must mark through all old objects. To expedite 313 // evacuation and update-refs, we give emphasis to reclaiming garbage first, wherever that garbage is found. 314 // Global GC will adjust generation sizes to accommodate the collection set it chooses. 315 316 // Set old_promo_reserve to enforce that no regions are preselected for promotion. Such regions typically 317 // have relatively high memory utilization. We still call select_aged_regions() because this will prepare for 318 // promotions in place, if relevant. 319 old_promo_reserve = 0; 320 321 // Dedicate all available old memory to old_evacuation reserve. This may be small, because old-gen is only 322 // expanded based on an existing mixed evacuation workload at the end of the previous GC cycle. We'll expand 323 // the budget for evacuation of old during GLOBAL cset selection. 324 old_evacuation_reserve = maximum_old_evacuation_reserve; 325 } else if (old_generation->has_unprocessed_collection_candidates()) { 326 // We reserved all old-gen memory at end of previous GC to hold anticipated evacuations to old-gen. If this is 327 // mixed evacuation, reserve all of this memory for compaction of old-gen and do not promote. Prioritize compaction 328 // over promotion in order to defragment OLD so that it will be better prepared to efficiently receive promoted memory. 329 old_evacuation_reserve = maximum_old_evacuation_reserve; 330 old_promo_reserve = 0; 331 } else { 332 // Make all old-evacuation memory for promotion, but if we can't use it all for promotion, we'll allow some evacuation. 333 old_evacuation_reserve = 0; 334 old_promo_reserve = maximum_old_evacuation_reserve; 335 } 336 assert(old_evacuation_reserve <= old_available, "Error"); 337 338 // We see too many old-evacuation failures if we force ourselves to evacuate into regions that are not initially empty. 339 // So we limit the old-evacuation reserve to unfragmented memory. Even so, old-evacuation is free to fill in nooks and 340 // crannies within existing partially used regions and it generally tries to do so. 341 const size_t old_free_unfragmented = old_generation->free_unaffiliated_regions() * ShenandoahHeapRegion::region_size_bytes(); 342 if (old_evacuation_reserve > old_free_unfragmented) { 343 const size_t delta = old_evacuation_reserve - old_free_unfragmented; 344 old_evacuation_reserve -= delta; 345 // Let promo consume fragments of old-gen memory if not global 346 if (!is_global()) { 347 old_promo_reserve += delta; 348 } 349 } 350 351 // Preselect regions for promotion by evacuation (obtaining the live data to seed promoted_reserve), 352 // and identify regions that will promote in place. These use the tenuring threshold. 353 const size_t consumed_by_advance_promotion = select_aged_regions(old_promo_reserve); 354 assert(consumed_by_advance_promotion <= maximum_old_evacuation_reserve, "Cannot promote more than available old-gen memory"); 355 356 // Note that unused old_promo_reserve might not be entirely consumed_by_advance_promotion. Do not transfer this 357 // to old_evacuation_reserve because this memory is likely very fragmented, and we do not want to increase the likelihood 358 // of old evacuation failure. 359 young_generation->set_evacuation_reserve(young_evacuation_reserve); 360 old_generation->set_evacuation_reserve(old_evacuation_reserve); 361 old_generation->set_promoted_reserve(consumed_by_advance_promotion); 362 363 // There is no need to expand OLD because all memory used here was set aside at end of previous GC, except in the 364 // case of a GLOBAL gc. During choose_collection_set() of GLOBAL, old will be expanded on demand. 365 } 366 367 // Having chosen the collection set, adjust the budgets for generational mode based on its composition. Note 368 // that young_generation->available() now knows about recently discovered immediate garbage. 369 // 370 void ShenandoahGeneration::adjust_evacuation_budgets(ShenandoahHeap* const heap, ShenandoahCollectionSet* const collection_set) { 371 shenandoah_assert_generational(); 372 // We may find that old_evacuation_reserve and/or loaned_for_young_evacuation are not fully consumed, in which case we may 373 // be able to increase regions_available_to_loan 374 375 // The role of adjust_evacuation_budgets() is to compute the correct value of regions_available_to_loan and to make 376 // effective use of this memory, including the remnant memory within these regions that may result from rounding loan to 377 // integral number of regions. Excess memory that is available to be loaned is applied to an allocation supplement, 378 // which allows mutators to allocate memory beyond the current capacity of young-gen on the promise that the loan 379 // will be repaid as soon as we finish updating references for the recently evacuated collection set. 380 381 // We cannot recalculate regions_available_to_loan by simply dividing old_generation->available() by region_size_bytes 382 // because the available memory may be distributed between many partially occupied regions that are already holding old-gen 383 // objects. Memory in partially occupied regions is not "available" to be loaned. Note that an increase in old-gen 384 // available that results from a decrease in memory consumed by old evacuation is not necessarily available to be loaned 385 // to young-gen. 386 387 size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes(); 388 ShenandoahOldGeneration* const old_generation = heap->old_generation(); 389 ShenandoahYoungGeneration* const young_generation = heap->young_generation(); 390 391 size_t old_evacuated = collection_set->get_old_bytes_reserved_for_evacuation(); 392 size_t old_evacuated_committed = (size_t) (ShenandoahOldEvacWaste * double(old_evacuated)); 393 size_t old_evacuation_reserve = old_generation->get_evacuation_reserve(); 394 395 if (old_evacuated_committed > old_evacuation_reserve) { 396 // This should only happen due to round-off errors when enforcing ShenandoahOldEvacWaste 397 assert(old_evacuated_committed <= (33 * old_evacuation_reserve) / 32, 398 "Round-off errors should be less than 3.125%%, committed: " SIZE_FORMAT ", reserved: " SIZE_FORMAT, 399 old_evacuated_committed, old_evacuation_reserve); 400 old_evacuated_committed = old_evacuation_reserve; 401 // Leave old_evac_reserve as previously configured 402 } else if (old_evacuated_committed < old_evacuation_reserve) { 403 // This happens if the old-gen collection consumes less than full budget. 404 old_evacuation_reserve = old_evacuated_committed; 405 old_generation->set_evacuation_reserve(old_evacuation_reserve); 406 } 407 408 size_t young_advance_promoted = collection_set->get_young_bytes_to_be_promoted(); 409 size_t young_advance_promoted_reserve_used = (size_t) (ShenandoahPromoEvacWaste * double(young_advance_promoted)); 410 411 size_t young_evacuated = collection_set->get_young_bytes_reserved_for_evacuation(); 412 size_t young_evacuated_reserve_used = (size_t) (ShenandoahEvacWaste * double(young_evacuated)); 413 414 size_t total_young_available = young_generation->available_with_reserve(); 415 assert(young_evacuated_reserve_used <= total_young_available, "Cannot evacuate more than is available in young"); 416 young_generation->set_evacuation_reserve(young_evacuated_reserve_used); 417 418 size_t old_available = old_generation->available(); 419 // Now that we've established the collection set, we know how much memory is really required by old-gen for evacuation 420 // and promotion reserves. Try shrinking OLD now in case that gives us a bit more runway for mutator allocations during 421 // evac and update phases. 422 size_t old_consumed = old_evacuated_committed + young_advance_promoted_reserve_used; 423 424 if (old_available < old_consumed) { 425 // This can happen due to round-off errors when adding the results of truncated integer arithmetic. 426 // We've already truncated old_evacuated_committed. Truncate young_advance_promoted_reserve_used here. 427 assert(young_advance_promoted_reserve_used <= (33 * (old_available - old_evacuated_committed)) / 32, 428 "Round-off errors should be less than 3.125%%, committed: " SIZE_FORMAT ", reserved: " SIZE_FORMAT, 429 young_advance_promoted_reserve_used, old_available - old_evacuated_committed); 430 young_advance_promoted_reserve_used = old_available - old_evacuated_committed; 431 old_consumed = old_evacuated_committed + young_advance_promoted_reserve_used; 432 } 433 434 assert(old_available >= old_consumed, "Cannot consume (" SIZE_FORMAT ") more than is available (" SIZE_FORMAT ")", 435 old_consumed, old_available); 436 size_t excess_old = old_available - old_consumed; 437 size_t unaffiliated_old_regions = old_generation->free_unaffiliated_regions(); 438 size_t unaffiliated_old = unaffiliated_old_regions * region_size_bytes; 439 assert(old_available >= unaffiliated_old, "Unaffiliated old is a subset of old available"); 440 441 // Make sure old_evac_committed is unaffiliated 442 if (old_evacuated_committed > 0) { 443 if (unaffiliated_old > old_evacuated_committed) { 444 size_t giveaway = unaffiliated_old - old_evacuated_committed; 445 size_t giveaway_regions = giveaway / region_size_bytes; // round down 446 if (giveaway_regions > 0) { 447 excess_old = MIN2(excess_old, giveaway_regions * region_size_bytes); 448 } else { 449 excess_old = 0; 450 } 451 } else { 452 excess_old = 0; 453 } 454 } 455 456 // If we find that OLD has excess regions, give them back to YOUNG now to reduce likelihood we run out of allocation 457 // runway during evacuation and update-refs. 458 size_t regions_to_xfer = 0; 459 if (excess_old > unaffiliated_old) { 460 // we can give back unaffiliated_old (all of unaffiliated is excess) 461 if (unaffiliated_old_regions > 0) { 462 regions_to_xfer = unaffiliated_old_regions; 463 } 464 } else if (unaffiliated_old_regions > 0) { 465 // excess_old < unaffiliated old: we can give back MIN(excess_old/region_size_bytes, unaffiliated_old_regions) 466 size_t excess_regions = excess_old / region_size_bytes; 467 regions_to_xfer = MIN2(excess_regions, unaffiliated_old_regions); 468 } 469 470 if (regions_to_xfer > 0) { 471 bool result = ShenandoahGenerationalHeap::cast(heap)->generation_sizer()->transfer_to_young(regions_to_xfer); 472 assert(excess_old >= regions_to_xfer * region_size_bytes, 473 "Cannot transfer (" SIZE_FORMAT ", " SIZE_FORMAT ") more than excess old (" SIZE_FORMAT ")", 474 regions_to_xfer, region_size_bytes, excess_old); 475 excess_old -= regions_to_xfer * region_size_bytes; 476 log_debug(gc, ergo)("%s transferred " SIZE_FORMAT " excess regions to young before start of evacuation", 477 result? "Successfully": "Unsuccessfully", regions_to_xfer); 478 } 479 480 // Add in the excess_old memory to hold unanticipated promotions, if any. If there are more unanticipated 481 // promotions than fit in reserved memory, they will be deferred until a future GC pass. 482 size_t total_promotion_reserve = young_advance_promoted_reserve_used + excess_old; 483 old_generation->set_promoted_reserve(total_promotion_reserve); 484 old_generation->reset_promoted_expended(); 485 } 486 487 typedef struct { 488 ShenandoahHeapRegion* _region; 489 size_t _live_data; 490 } AgedRegionData; 491 492 static int compare_by_aged_live(AgedRegionData a, AgedRegionData b) { 493 if (a._live_data < b._live_data) 494 return -1; 495 else if (a._live_data > b._live_data) 496 return 1; 497 else return 0; 498 } 499 500 inline void assert_no_in_place_promotions() { 501 #ifdef ASSERT 502 class ShenandoahNoInPlacePromotions : public ShenandoahHeapRegionClosure { 503 public: 504 void heap_region_do(ShenandoahHeapRegion *r) override { 505 assert(r->get_top_before_promote() == nullptr, 506 "Region " SIZE_FORMAT " should not be ready for in-place promotion", r->index()); 507 } 508 } cl; 509 ShenandoahHeap::heap()->heap_region_iterate(&cl); 510 #endif 511 } 512 513 // Preselect for inclusion into the collection set regions whose age is at or above tenure age which contain more than 514 // ShenandoahOldGarbageThreshold amounts of garbage. We identify these regions by setting the appropriate entry of 515 // the collection set's preselected regions array to true. All entries are initialized to false before calling this 516 // function. 517 // 518 // During the subsequent selection of the collection set, we give priority to these promotion set candidates. 519 // Without this prioritization, we found that the aged regions tend to be ignored because they typically have 520 // much less garbage and much more live data than the recently allocated "eden" regions. When aged regions are 521 // repeatedly excluded from the collection set, the amount of live memory within the young generation tends to 522 // accumulate and this has the undesirable side effect of causing young-generation collections to require much more 523 // CPU and wall-clock time. 524 // 525 // A second benefit of treating aged regions differently than other regions during collection set selection is 526 // that this allows us to more accurately budget memory to hold the results of evacuation. Memory for evacuation 527 // of aged regions must be reserved in the old generation. Memory for evacuation of all other regions must be 528 // reserved in the young generation. 529 size_t ShenandoahGeneration::select_aged_regions(size_t old_available) { 530 531 // There should be no regions configured for subsequent in-place-promotions carried over from the previous cycle. 532 assert_no_in_place_promotions(); 533 534 auto const heap = ShenandoahGenerationalHeap::heap(); 535 bool* const candidate_regions_for_promotion_by_copy = heap->collection_set()->preselected_regions(); 536 ShenandoahMarkingContext* const ctx = heap->marking_context(); 537 538 const uint tenuring_threshold = heap->age_census()->tenuring_threshold(); 539 const size_t old_garbage_threshold = (ShenandoahHeapRegion::region_size_bytes() * ShenandoahOldGarbageThreshold) / 100; 540 541 size_t old_consumed = 0; 542 size_t promo_potential = 0; 543 size_t candidates = 0; 544 545 // Tracks the padding of space above top in regions eligible for promotion in place 546 size_t promote_in_place_pad = 0; 547 548 // Sort the promotion-eligible regions in order of increasing live-data-bytes so that we can first reclaim regions that require 549 // less evacuation effort. This prioritizes garbage first, expanding the allocation pool early before we reclaim regions that 550 // have more live data. 551 const size_t num_regions = heap->num_regions(); 552 553 ResourceMark rm; 554 AgedRegionData* sorted_regions = NEW_RESOURCE_ARRAY(AgedRegionData, num_regions); 555 556 for (size_t i = 0; i < num_regions; i++) { 557 ShenandoahHeapRegion* const r = heap->get_region(i); 558 if (r->is_empty() || !r->has_live() || !r->is_young() || !r->is_regular()) { 559 // skip over regions that aren't regular young with some live data 560 continue; 561 } 562 if (r->age() >= tenuring_threshold) { 563 if ((r->garbage() < old_garbage_threshold)) { 564 // This tenure-worthy region has too little garbage, so we do not want to expend the copying effort to 565 // reclaim the garbage; instead this region may be eligible for promotion-in-place to the 566 // old generation. 567 HeapWord* tams = ctx->top_at_mark_start(r); 568 HeapWord* original_top = r->top(); 569 if (!heap->is_concurrent_old_mark_in_progress() && tams == original_top) { 570 // No allocations from this region have been made during concurrent mark. It meets all the criteria 571 // for in-place-promotion. Though we only need the value of top when we fill the end of the region, 572 // we use this field to indicate that this region should be promoted in place during the evacuation 573 // phase. 574 r->save_top_before_promote(); 575 576 size_t remnant_size = r->free() / HeapWordSize; 577 if (remnant_size > ShenandoahHeap::min_fill_size()) { 578 ShenandoahHeap::fill_with_object(original_top, remnant_size); 579 // Fill the remnant memory within this region to assure no allocations prior to promote in place. Otherwise, 580 // newly allocated objects will not be parsable when promote in place tries to register them. Furthermore, any 581 // new allocations would not necessarily be eligible for promotion. This addresses both issues. 582 r->set_top(r->end()); 583 promote_in_place_pad += remnant_size * HeapWordSize; 584 } else { 585 // Since the remnant is so small that it cannot be filled, we don't have to worry about any accidental 586 // allocations occurring within this region before the region is promoted in place. 587 } 588 } 589 // Else, we do not promote this region (either in place or by copy) because it has received new allocations. 590 591 // During evacuation, we exclude from promotion regions for which age > tenure threshold, garbage < garbage-threshold, 592 // and get_top_before_promote() != tams 593 } else { 594 // Record this promotion-eligible candidate region. After sorting and selecting the best candidates below, 595 // we may still decide to exclude this promotion-eligible region from the current collection set. If this 596 // happens, we will consider this region as part of the anticipated promotion potential for the next GC 597 // pass; see further below. 598 sorted_regions[candidates]._region = r; 599 sorted_regions[candidates++]._live_data = r->get_live_data_bytes(); 600 } 601 } else { 602 // We only evacuate & promote objects from regular regions whose garbage() is above old-garbage-threshold. 603 // Objects in tenure-worthy regions with less garbage are promoted in place. These take a different path to 604 // old-gen. Regions excluded from promotion because their garbage content is too low (causing us to anticipate that 605 // the region would be promoted in place) may be eligible for evacuation promotion by the time promotion takes 606 // place during a subsequent GC pass because more garbage is found within the region between now and then. This 607 // should not happen if we are properly adapting the tenure age. The theory behind adaptive tenuring threshold 608 // is to choose the youngest age that demonstrates no "significant" further loss of population since the previous 609 // age. If not this, we expect the tenure age to demonstrate linear population decay for at least two population 610 // samples, whereas we expect to observe exponential population decay for ages younger than the tenure age. 611 // 612 // In the case that certain regions which were anticipated to be promoted in place need to be promoted by 613 // evacuation, it may be the case that there is not sufficient reserve within old-gen to hold evacuation of 614 // these regions. The likely outcome is that these regions will not be selected for evacuation or promotion 615 // in the current cycle and we will anticipate that they will be promoted in the next cycle. This will cause 616 // us to reserve more old-gen memory so that these objects can be promoted in the subsequent cycle. 617 if (heap->is_aging_cycle() && (r->age() + 1 == tenuring_threshold)) { 618 if (r->garbage() >= old_garbage_threshold) { 619 promo_potential += r->get_live_data_bytes(); 620 } 621 } 622 } 623 // Note that we keep going even if one region is excluded from selection. 624 // Subsequent regions may be selected if they have smaller live data. 625 } 626 // Sort in increasing order according to live data bytes. Note that candidates represents the number of regions 627 // that qualify to be promoted by evacuation. 628 if (candidates > 0) { 629 size_t selected_regions = 0; 630 size_t selected_live = 0; 631 QuickSort::sort<AgedRegionData>(sorted_regions, candidates, compare_by_aged_live, false); 632 for (size_t i = 0; i < candidates; i++) { 633 ShenandoahHeapRegion* const region = sorted_regions[i]._region; 634 size_t region_live_data = sorted_regions[i]._live_data; 635 size_t promotion_need = (size_t) (region_live_data * ShenandoahPromoEvacWaste); 636 if (old_consumed + promotion_need <= old_available) { 637 old_consumed += promotion_need; 638 candidate_regions_for_promotion_by_copy[region->index()] = true; 639 selected_regions++; 640 selected_live += region_live_data; 641 } else { 642 // We rejected this promotable region from the collection set because we had no room to hold its copy. 643 // Add this region to promo potential for next GC. 644 promo_potential += region_live_data; 645 assert(!candidate_regions_for_promotion_by_copy[region->index()], "Shouldn't be selected"); 646 } 647 // We keep going even if one region is excluded from selection because we need to accumulate all eligible 648 // regions that are not preselected into promo_potential 649 } 650 log_debug(gc)("Preselected " SIZE_FORMAT " regions containing " SIZE_FORMAT " live bytes," 651 " consuming: " SIZE_FORMAT " of budgeted: " SIZE_FORMAT, 652 selected_regions, selected_live, old_consumed, old_available); 653 } 654 655 heap->old_generation()->set_pad_for_promote_in_place(promote_in_place_pad); 656 heap->old_generation()->set_promotion_potential(promo_potential); 657 return old_consumed; 658 } 659 660 void ShenandoahGeneration::prepare_regions_and_collection_set(bool concurrent) { 661 ShenandoahHeap* heap = ShenandoahHeap::heap(); 662 ShenandoahCollectionSet* collection_set = heap->collection_set(); 663 bool is_generational = heap->mode()->is_generational(); 664 665 assert(!heap->is_full_gc_in_progress(), "Only for concurrent and degenerated GC"); 666 assert(!is_old(), "Only YOUNG and GLOBAL GC perform evacuations"); 667 { 668 ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::final_update_region_states : 669 ShenandoahPhaseTimings::degen_gc_final_update_region_states); 670 ShenandoahFinalMarkUpdateRegionStateClosure cl(complete_marking_context()); 671 parallel_heap_region_iterate(&cl); 672 673 if (is_young()) { 674 // We always need to update the watermark for old regions. If there 675 // are mixed collections pending, we also need to synchronize the 676 // pinned status for old regions. Since we are already visiting every 677 // old region here, go ahead and sync the pin status too. 678 ShenandoahFinalMarkUpdateRegionStateClosure old_cl(nullptr); 679 heap->old_generation()->parallel_heap_region_iterate(&old_cl); 680 } 681 } 682 683 // Tally the census counts and compute the adaptive tenuring threshold 684 if (is_generational && ShenandoahGenerationalAdaptiveTenuring && !ShenandoahGenerationalCensusAtEvac) { 685 // Objects above TAMS weren't included in the age census. Since they were all 686 // allocated in this cycle they belong in the age 0 cohort. We walk over all 687 // young regions and sum the volume of objects between TAMS and top. 688 ShenandoahUpdateCensusZeroCohortClosure age0_cl(complete_marking_context()); 689 heap->young_generation()->heap_region_iterate(&age0_cl); 690 size_t age0_pop = age0_cl.get_age0_population(); 691 692 // Update the global census, including the missed age 0 cohort above, 693 // along with the census done during marking, and compute the tenuring threshold. 694 ShenandoahAgeCensus* census = ShenandoahGenerationalHeap::heap()->age_census(); 695 census->update_census(age0_pop); 696 #ifndef PRODUCT 697 size_t total_pop = age0_cl.get_total_population(); 698 size_t total_census = census->get_total(); 699 // Usually total_pop > total_census, but not by too much. 700 // We use integer division so anything up to just less than 2 is considered 701 // reasonable, and the "+1" is to avoid divide-by-zero. 702 assert((total_pop+1)/(total_census+1) == 1, "Extreme divergence: " 703 SIZE_FORMAT "/" SIZE_FORMAT, total_pop, total_census); 704 #endif 705 } 706 707 { 708 ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::choose_cset : 709 ShenandoahPhaseTimings::degen_gc_choose_cset); 710 711 collection_set->clear(); 712 ShenandoahHeapLocker locker(heap->lock()); 713 if (is_generational) { 714 // Seed the collection set with resource area-allocated 715 // preselected regions, which are removed when we exit this scope. 716 ShenandoahCollectionSetPreselector preselector(collection_set, heap->num_regions()); 717 718 // Find the amount that will be promoted, regions that will be promoted in 719 // place, and preselect older regions that will be promoted by evacuation. 720 compute_evacuation_budgets(heap); 721 722 // Choose the collection set, including the regions preselected above for 723 // promotion into the old generation. 724 _heuristics->choose_collection_set(collection_set); 725 if (!collection_set->is_empty()) { 726 // only make use of evacuation budgets when we are evacuating 727 adjust_evacuation_budgets(heap, collection_set); 728 } 729 730 if (is_global()) { 731 // We have just chosen a collection set for a global cycle. The mark bitmap covering old regions is complete, so 732 // the remembered set scan can use that to avoid walking into garbage. When the next old mark begins, we will 733 // use the mark bitmap to make the old regions parsable by coalescing and filling any unmarked objects. Thus, 734 // we prepare for old collections by remembering which regions are old at this time. Note that any objects 735 // promoted into old regions will be above TAMS, and so will be considered marked. However, free regions that 736 // become old after this point will not be covered correctly by the mark bitmap, so we must be careful not to 737 // coalesce those regions. Only the old regions which are not part of the collection set at this point are 738 // eligible for coalescing. As implemented now, this has the side effect of possibly initiating mixed-evacuations 739 // after a global cycle for old regions that were not included in this collection set. 740 heap->old_generation()->prepare_for_mixed_collections_after_global_gc(); 741 } 742 } else { 743 _heuristics->choose_collection_set(collection_set); 744 } 745 } 746 747 748 { 749 ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::final_rebuild_freeset : 750 ShenandoahPhaseTimings::degen_gc_final_rebuild_freeset); 751 ShenandoahHeapLocker locker(heap->lock()); 752 size_t young_cset_regions, old_cset_regions; 753 754 // We are preparing for evacuation. At this time, we ignore cset region tallies. 755 size_t first_old, last_old, num_old; 756 heap->free_set()->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old, last_old, num_old); 757 // Free set construction uses reserve quantities, because they are known to be valid here 758 heap->free_set()->finish_rebuild(young_cset_regions, old_cset_regions, num_old, true); 759 } 760 } 761 762 bool ShenandoahGeneration::is_bitmap_clear() { 763 ShenandoahHeap* heap = ShenandoahHeap::heap(); 764 ShenandoahMarkingContext* context = heap->marking_context(); 765 const size_t num_regions = heap->num_regions(); 766 for (size_t idx = 0; idx < num_regions; idx++) { 767 ShenandoahHeapRegion* r = heap->get_region(idx); 768 if (contains(r) && r->is_affiliated()) { 769 if (heap->is_bitmap_slice_committed(r) && (context->top_at_mark_start(r) > r->bottom()) && 770 !context->is_bitmap_range_within_region_clear(r->bottom(), r->end())) { 771 return false; 772 } 773 } 774 } 775 return true; 776 } 777 778 void ShenandoahGeneration::set_mark_complete() { 779 _is_marking_complete.set(); 780 } 781 782 void ShenandoahGeneration::set_mark_incomplete() { 783 _is_marking_complete.unset(); 784 } 785 786 ShenandoahMarkingContext* ShenandoahGeneration::complete_marking_context() { 787 assert(is_mark_complete(), "Marking must be completed."); 788 return ShenandoahHeap::heap()->marking_context(); 789 } 790 791 void ShenandoahGeneration::cancel_marking() { 792 log_info(gc)("Cancel marking: %s", name()); 793 if (is_concurrent_mark_in_progress()) { 794 set_mark_incomplete(); 795 } 796 _task_queues->clear(); 797 ref_processor()->abandon_partial_discovery(); 798 set_concurrent_mark_in_progress(false); 799 } 800 801 ShenandoahGeneration::ShenandoahGeneration(ShenandoahGenerationType type, 802 uint max_workers, 803 size_t max_capacity, 804 size_t soft_max_capacity) : 805 _type(type), 806 _task_queues(new ShenandoahObjToScanQueueSet(max_workers)), 807 _ref_processor(new ShenandoahReferenceProcessor(MAX2(max_workers, 1U))), 808 _affiliated_region_count(0), _humongous_waste(0), _evacuation_reserve(0), 809 _used(0), _bytes_allocated_since_gc_start(0), 810 _max_capacity(max_capacity), _soft_max_capacity(soft_max_capacity), 811 _heuristics(nullptr) 812 { 813 _is_marking_complete.set(); 814 assert(max_workers > 0, "At least one queue"); 815 for (uint i = 0; i < max_workers; ++i) { 816 ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue(); 817 _task_queues->register_queue(i, task_queue); 818 } 819 } 820 821 ShenandoahGeneration::~ShenandoahGeneration() { 822 for (uint i = 0; i < _task_queues->size(); ++i) { 823 ShenandoahObjToScanQueue* q = _task_queues->queue(i); 824 delete q; 825 } 826 delete _task_queues; 827 } 828 829 void ShenandoahGeneration::reserve_task_queues(uint workers) { 830 _task_queues->reserve(workers); 831 } 832 833 ShenandoahObjToScanQueueSet* ShenandoahGeneration::old_gen_task_queues() const { 834 return nullptr; 835 } 836 837 void ShenandoahGeneration::scan_remembered_set(bool is_concurrent) { 838 assert(is_young(), "Should only scan remembered set for young generation."); 839 840 ShenandoahGenerationalHeap* const heap = ShenandoahGenerationalHeap::heap(); 841 uint nworkers = heap->workers()->active_workers(); 842 reserve_task_queues(nworkers); 843 844 ShenandoahReferenceProcessor* rp = ref_processor(); 845 ShenandoahRegionChunkIterator work_list(nworkers); 846 ShenandoahScanRememberedTask task(task_queues(), old_gen_task_queues(), rp, &work_list, is_concurrent); 847 heap->assert_gc_workers(nworkers); 848 heap->workers()->run_task(&task); 849 if (ShenandoahEnableCardStats) { 850 ShenandoahScanRemembered* scanner = heap->old_generation()->card_scan(); 851 assert(scanner != nullptr, "Not generational"); 852 scanner->log_card_stats(nworkers, CARD_STAT_SCAN_RS); 853 } 854 } 855 856 size_t ShenandoahGeneration::increment_affiliated_region_count() { 857 shenandoah_assert_heaplocked_or_safepoint(); 858 // During full gc, multiple GC worker threads may change region affiliations without a lock. No lock is enforced 859 // on read and write of _affiliated_region_count. At the end of full gc, a single thread overwrites the count with 860 // a coherent value. 861 return Atomic::add(&_affiliated_region_count, (size_t) 1); 862 } 863 864 size_t ShenandoahGeneration::decrement_affiliated_region_count() { 865 shenandoah_assert_heaplocked_or_safepoint(); 866 // During full gc, multiple GC worker threads may change region affiliations without a lock. No lock is enforced 867 // on read and write of _affiliated_region_count. At the end of full gc, a single thread overwrites the count with 868 // a coherent value. 869 auto affiliated_region_count = Atomic::sub(&_affiliated_region_count, (size_t) 1); 870 assert(ShenandoahHeap::heap()->is_full_gc_in_progress() || 871 (used() + _humongous_waste <= affiliated_region_count * ShenandoahHeapRegion::region_size_bytes()), 872 "used + humongous cannot exceed regions"); 873 return affiliated_region_count; 874 } 875 876 size_t ShenandoahGeneration::decrement_affiliated_region_count_without_lock() { 877 return Atomic::sub(&_affiliated_region_count, (size_t) 1); 878 } 879 880 size_t ShenandoahGeneration::increase_affiliated_region_count(size_t delta) { 881 shenandoah_assert_heaplocked_or_safepoint(); 882 return Atomic::add(&_affiliated_region_count, delta); 883 } 884 885 size_t ShenandoahGeneration::decrease_affiliated_region_count(size_t delta) { 886 shenandoah_assert_heaplocked_or_safepoint(); 887 assert(Atomic::load(&_affiliated_region_count) >= delta, "Affiliated region count cannot be negative"); 888 889 auto const affiliated_region_count = Atomic::sub(&_affiliated_region_count, delta); 890 assert(ShenandoahHeap::heap()->is_full_gc_in_progress() || 891 (_used + _humongous_waste <= affiliated_region_count * ShenandoahHeapRegion::region_size_bytes()), 892 "used + humongous cannot exceed regions"); 893 return affiliated_region_count; 894 } 895 896 void ShenandoahGeneration::establish_usage(size_t num_regions, size_t num_bytes, size_t humongous_waste) { 897 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at a safepoint"); 898 Atomic::store(&_affiliated_region_count, num_regions); 899 Atomic::store(&_used, num_bytes); 900 _humongous_waste = humongous_waste; 901 } 902 903 void ShenandoahGeneration::increase_used(size_t bytes) { 904 Atomic::add(&_used, bytes); 905 } 906 907 void ShenandoahGeneration::increase_humongous_waste(size_t bytes) { 908 if (bytes > 0) { 909 Atomic::add(&_humongous_waste, bytes); 910 } 911 } 912 913 void ShenandoahGeneration::decrease_humongous_waste(size_t bytes) { 914 if (bytes > 0) { 915 assert(ShenandoahHeap::heap()->is_full_gc_in_progress() || (_humongous_waste >= bytes), 916 "Waste (" SIZE_FORMAT ") cannot be negative (after subtracting " SIZE_FORMAT ")", _humongous_waste, bytes); 917 Atomic::sub(&_humongous_waste, bytes); 918 } 919 } 920 921 void ShenandoahGeneration::decrease_used(size_t bytes) { 922 assert(ShenandoahHeap::heap()->is_full_gc_in_progress() || 923 (_used >= bytes), "cannot reduce bytes used by generation below zero"); 924 Atomic::sub(&_used, bytes); 925 } 926 927 size_t ShenandoahGeneration::used_regions() const { 928 return Atomic::load(&_affiliated_region_count); 929 } 930 931 size_t ShenandoahGeneration::free_unaffiliated_regions() const { 932 size_t result = max_capacity() / ShenandoahHeapRegion::region_size_bytes(); 933 auto const used_regions = this->used_regions(); 934 if (used_regions > result) { 935 result = 0; 936 } else { 937 result -= used_regions; 938 } 939 return result; 940 } 941 942 size_t ShenandoahGeneration::used_regions_size() const { 943 return used_regions() * ShenandoahHeapRegion::region_size_bytes(); 944 } 945 946 size_t ShenandoahGeneration::available() const { 947 return available(max_capacity()); 948 } 949 950 // For ShenandoahYoungGeneration, Include the young available that may have been reserved for the Collector. 951 size_t ShenandoahGeneration::available_with_reserve() const { 952 return available(max_capacity()); 953 } 954 955 size_t ShenandoahGeneration::soft_available() const { 956 return available(soft_max_capacity()); 957 } 958 959 size_t ShenandoahGeneration::available(size_t capacity) const { 960 size_t in_use = used() + get_humongous_waste(); 961 return in_use > capacity ? 0 : capacity - in_use; 962 } 963 964 size_t ShenandoahGeneration::increase_capacity(size_t increment) { 965 shenandoah_assert_heaplocked_or_safepoint(); 966 967 // We do not enforce that new capacity >= heap->max_size_for(this). The maximum generation size is treated as a rule of thumb 968 // which may be violated during certain transitions, such as when we are forcing transfers for the purpose of promoting regions 969 // in place. 970 assert(ShenandoahHeap::heap()->is_full_gc_in_progress() || 971 (_max_capacity + increment <= ShenandoahHeap::heap()->max_capacity()), "Generation cannot be larger than heap size"); 972 assert(increment % ShenandoahHeapRegion::region_size_bytes() == 0, "Generation capacity must be multiple of region size"); 973 _max_capacity += increment; 974 975 // This detects arithmetic wraparound on _used 976 assert(ShenandoahHeap::heap()->is_full_gc_in_progress() || 977 (used_regions_size() >= used()), 978 "Affiliated regions must hold more than what is currently used"); 979 return _max_capacity; 980 } 981 982 size_t ShenandoahGeneration::set_capacity(size_t byte_size) { 983 shenandoah_assert_heaplocked_or_safepoint(); 984 _max_capacity = byte_size; 985 return _max_capacity; 986 } 987 988 size_t ShenandoahGeneration::decrease_capacity(size_t decrement) { 989 shenandoah_assert_heaplocked_or_safepoint(); 990 991 // We do not enforce that new capacity >= heap->min_size_for(this). The minimum generation size is treated as a rule of thumb 992 // which may be violated during certain transitions, such as when we are forcing transfers for the purpose of promoting regions 993 // in place. 994 assert(decrement % ShenandoahHeapRegion::region_size_bytes() == 0, "Generation capacity must be multiple of region size"); 995 assert(_max_capacity >= decrement, "Generation capacity cannot be negative"); 996 997 _max_capacity -= decrement; 998 999 // This detects arithmetic wraparound on _used 1000 assert(ShenandoahHeap::heap()->is_full_gc_in_progress() || 1001 (used_regions_size() >= used()), 1002 "Affiliated regions must hold more than what is currently used"); 1003 assert(ShenandoahHeap::heap()->is_full_gc_in_progress() || 1004 (_used <= _max_capacity), "Cannot use more than capacity"); 1005 assert(ShenandoahHeap::heap()->is_full_gc_in_progress() || 1006 (used_regions_size() <= _max_capacity), 1007 "Cannot use more than capacity"); 1008 return _max_capacity; 1009 } 1010 1011 void ShenandoahGeneration::record_success_concurrent(bool abbreviated) { 1012 heuristics()->record_success_concurrent(); 1013 ShenandoahHeap::heap()->shenandoah_policy()->record_success_concurrent(is_young(), abbreviated); 1014 }