1 /* 2 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp" 27 #include "gc/shenandoah/shenandoahCollectionSetPreselector.hpp" 28 #include "gc/shenandoah/shenandoahFreeSet.hpp" 29 #include "gc/shenandoah/shenandoahGeneration.hpp" 30 #include "gc/shenandoah/shenandoahGenerationalHeap.hpp" 31 #include "gc/shenandoah/shenandoahHeapRegionClosures.hpp" 32 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp" 33 #include "gc/shenandoah/shenandoahOldGeneration.hpp" 34 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp" 35 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp" 36 #include "gc/shenandoah/shenandoahTaskqueue.inline.hpp" 37 #include "gc/shenandoah/shenandoahUtils.hpp" 38 #include "gc/shenandoah/shenandoahVerifier.hpp" 39 #include "gc/shenandoah/shenandoahYoungGeneration.hpp" 40 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp" 41 42 #include "utilities/quickSort.hpp" 43 44 45 class ShenandoahResetUpdateRegionStateClosure : public ShenandoahHeapRegionClosure { 46 private: 47 ShenandoahHeap* _heap; 48 ShenandoahMarkingContext* const _ctx; 49 public: 50 ShenandoahResetUpdateRegionStateClosure() : 51 _heap(ShenandoahHeap::heap()), 52 _ctx(_heap->marking_context()) {} 53 54 void heap_region_do(ShenandoahHeapRegion* r) override { 55 if (r->is_active()) { 56 // Reset live data and set TAMS optimistically. We would recheck these under the pause 57 // anyway to capture any updates that happened since now. 58 _ctx->capture_top_at_mark_start(r); 59 r->clear_live_data(); 60 } 61 } 62 63 bool is_thread_safe() override { return true; } 64 }; 65 66 class ShenandoahResetBitmapTask : public WorkerTask { 67 private: 68 ShenandoahRegionIterator _regions; 69 ShenandoahGeneration* _generation; 70 71 public: 72 ShenandoahResetBitmapTask(ShenandoahGeneration* generation) : 73 WorkerTask("Shenandoah Reset Bitmap"), _generation(generation) {} 74 75 void work(uint worker_id) { 76 ShenandoahHeapRegion* region = _regions.next(); 77 ShenandoahHeap* heap = ShenandoahHeap::heap(); 78 ShenandoahMarkingContext* const ctx = heap->marking_context(); 79 while (region != nullptr) { 80 auto const affiliation = region->affiliation(); 81 bool needs_reset = affiliation == FREE || _generation->contains(affiliation); 82 if (needs_reset && heap->is_bitmap_slice_committed(region)) { 83 ctx->clear_bitmap(region); 84 } 85 region = _regions.next(); 86 } 87 } 88 }; 89 90 // Copy the write-version of the card-table into the read-version, clearing the 91 // write-copy. 92 class ShenandoahMergeWriteTable: public ShenandoahHeapRegionClosure { 93 private: 94 ShenandoahScanRemembered* _scanner; 95 public: 96 ShenandoahMergeWriteTable(ShenandoahScanRemembered* scanner) : _scanner(scanner) {} 97 98 void heap_region_do(ShenandoahHeapRegion* r) override { 99 assert(r->is_old(), "Don't waste time doing this for non-old regions"); 100 _scanner->merge_write_table(r->bottom(), ShenandoahHeapRegion::region_size_words()); 101 } 102 103 bool is_thread_safe() override { 104 return true; 105 } 106 }; 107 108 class ShenandoahCopyWriteCardTableToRead: public ShenandoahHeapRegionClosure { 109 private: 110 ShenandoahScanRemembered* _scanner; 111 public: 112 ShenandoahCopyWriteCardTableToRead(ShenandoahScanRemembered* scanner) : _scanner(scanner) {} 113 114 void heap_region_do(ShenandoahHeapRegion* region) override { 115 assert(region->is_old(), "Don't waste time doing this for non-old regions"); 116 _scanner->reset_remset(region->bottom(), ShenandoahHeapRegion::region_size_words()); 117 } 118 119 bool is_thread_safe() override { return true; } 120 }; 121 122 // Add [TAMS, top) volume over young regions. Used to correct age 0 cohort census 123 // for adaptive tenuring when census is taken during marking. 124 // In non-product builds, for the purposes of verification, we also collect the total 125 // live objects in young regions as well. 126 class ShenandoahUpdateCensusZeroCohortClosure : public ShenandoahHeapRegionClosure { 127 private: 128 ShenandoahMarkingContext* const _ctx; 129 // Population size units are words (not bytes) 130 size_t _age0_pop; // running tally of age0 population size 131 size_t _total_pop; // total live population size 132 public: 133 explicit ShenandoahUpdateCensusZeroCohortClosure(ShenandoahMarkingContext* ctx) 134 : _ctx(ctx), _age0_pop(0), _total_pop(0) {} 135 136 void heap_region_do(ShenandoahHeapRegion* r) override { 137 if (_ctx != nullptr && r->is_active()) { 138 assert(r->is_young(), "Young regions only"); 139 HeapWord* tams = _ctx->top_at_mark_start(r); 140 HeapWord* top = r->top(); 141 if (top > tams) { 142 _age0_pop += pointer_delta(top, tams); 143 } 144 // TODO: check significance of _ctx != nullptr above, can that 145 // spoof _total_pop in some corner cases? 146 NOT_PRODUCT(_total_pop += r->get_live_data_words();) 147 } 148 } 149 150 size_t get_age0_population() const { return _age0_pop; } 151 size_t get_total_population() const { return _total_pop; } 152 }; 153 154 void ShenandoahGeneration::confirm_heuristics_mode() { 155 if (_heuristics->is_diagnostic() && !UnlockDiagnosticVMOptions) { 156 vm_exit_during_initialization( 157 err_msg("Heuristics \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.", 158 _heuristics->name())); 159 } 160 if (_heuristics->is_experimental() && !UnlockExperimentalVMOptions) { 161 vm_exit_during_initialization( 162 err_msg("Heuristics \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.", 163 _heuristics->name())); 164 } 165 } 166 167 ShenandoahHeuristics* ShenandoahGeneration::initialize_heuristics(ShenandoahMode* gc_mode) { 168 _heuristics = gc_mode->initialize_heuristics(this); 169 _heuristics->set_guaranteed_gc_interval(ShenandoahGuaranteedGCInterval); 170 confirm_heuristics_mode(); 171 return _heuristics; 172 } 173 174 size_t ShenandoahGeneration::bytes_allocated_since_gc_start() const { 175 return Atomic::load(&_bytes_allocated_since_gc_start); 176 } 177 178 void ShenandoahGeneration::reset_bytes_allocated_since_gc_start() { 179 Atomic::store(&_bytes_allocated_since_gc_start, (size_t)0); 180 } 181 182 void ShenandoahGeneration::increase_allocated(size_t bytes) { 183 Atomic::add(&_bytes_allocated_since_gc_start, bytes, memory_order_relaxed); 184 } 185 186 void ShenandoahGeneration::set_evacuation_reserve(size_t new_val) { 187 _evacuation_reserve = new_val; 188 } 189 190 size_t ShenandoahGeneration::get_evacuation_reserve() const { 191 return _evacuation_reserve; 192 } 193 194 void ShenandoahGeneration::augment_evacuation_reserve(size_t increment) { 195 _evacuation_reserve += increment; 196 } 197 198 void ShenandoahGeneration::log_status(const char *msg) const { 199 typedef LogTarget(Info, gc, ergo) LogGcInfo; 200 201 if (!LogGcInfo::is_enabled()) { 202 return; 203 } 204 205 // Not under a lock here, so read each of these once to make sure 206 // byte size in proper unit and proper unit for byte size are consistent. 207 const size_t v_used = used(); 208 const size_t v_used_regions = used_regions_size(); 209 const size_t v_soft_max_capacity = soft_max_capacity(); 210 const size_t v_max_capacity = max_capacity(); 211 const size_t v_available = available(); 212 const size_t v_humongous_waste = get_humongous_waste(); 213 214 const LogGcInfo target; 215 LogStream ls(target); 216 ls.print("%s: ", msg); 217 if (_type != NON_GEN) { 218 ls.print("%s generation ", name()); 219 } 220 221 ls.print_cr("used: " PROPERFMT ", used regions: " PROPERFMT ", humongous waste: " PROPERFMT 222 ", soft capacity: " PROPERFMT ", max capacity: " PROPERFMT ", available: " PROPERFMT, 223 PROPERFMTARGS(v_used), PROPERFMTARGS(v_used_regions), PROPERFMTARGS(v_humongous_waste), 224 PROPERFMTARGS(v_soft_max_capacity), PROPERFMTARGS(v_max_capacity), PROPERFMTARGS(v_available)); 225 } 226 227 void ShenandoahGeneration::reset_mark_bitmap() { 228 ShenandoahHeap* heap = ShenandoahHeap::heap(); 229 heap->assert_gc_workers(heap->workers()->active_workers()); 230 231 set_mark_incomplete(); 232 233 ShenandoahResetBitmapTask task(this); 234 heap->workers()->run_task(&task); 235 } 236 237 // The ideal is to swap the remembered set so the safepoint effort is no more than a few pointer manipulations. 238 // However, limitations in the implementation of the mutator write-barrier make it difficult to simply change the 239 // location of the card table. So the interim implementation of swap_remembered_set will copy the write-table 240 // onto the read-table and will then clear the write-table. 241 void ShenandoahGeneration::swap_remembered_set() { 242 // Must be sure that marking is complete before we swap remembered set. 243 ShenandoahGenerationalHeap* heap = ShenandoahGenerationalHeap::heap(); 244 heap->assert_gc_workers(heap->workers()->active_workers()); 245 shenandoah_assert_safepoint(); 246 247 ShenandoahOldGeneration* old_generation = heap->old_generation(); 248 ShenandoahCopyWriteCardTableToRead task(old_generation->card_scan()); 249 old_generation->parallel_heap_region_iterate(&task); 250 } 251 252 // Copy the write-version of the card-table into the read-version, clearing the 253 // write-version. The work is done at a safepoint and in parallel by the GC 254 // worker threads. 255 void ShenandoahGeneration::merge_write_table() { 256 // This should only happen for degenerated cycles 257 ShenandoahGenerationalHeap* heap = ShenandoahGenerationalHeap::heap(); 258 heap->assert_gc_workers(heap->workers()->active_workers()); 259 shenandoah_assert_safepoint(); 260 261 ShenandoahOldGeneration* old_generation = heap->old_generation(); 262 ShenandoahMergeWriteTable task(old_generation->card_scan()); 263 old_generation->parallel_heap_region_iterate(&task); 264 } 265 266 void ShenandoahGeneration::prepare_gc() { 267 268 reset_mark_bitmap(); 269 270 // Capture Top At Mark Start for this generation (typically young) and reset mark bitmap. 271 ShenandoahResetUpdateRegionStateClosure cl; 272 parallel_heap_region_iterate_free(&cl); 273 } 274 275 void ShenandoahGeneration::parallel_heap_region_iterate_free(ShenandoahHeapRegionClosure* cl) { 276 ShenandoahHeap::heap()->parallel_heap_region_iterate(cl); 277 } 278 279 void ShenandoahGeneration::compute_evacuation_budgets(ShenandoahHeap* const heap) { 280 shenandoah_assert_generational(); 281 282 ShenandoahOldGeneration* const old_generation = heap->old_generation(); 283 ShenandoahYoungGeneration* const young_generation = heap->young_generation(); 284 285 // During initialization and phase changes, it is more likely that fewer objects die young and old-gen 286 // memory is not yet full (or is in the process of being replaced). During these times especially, it 287 // is beneficial to loan memory from old-gen to young-gen during the evacuation and update-refs phases 288 // of execution. 289 290 // Calculate EvacuationReserve before PromotionReserve. Evacuation is more critical than promotion. 291 // If we cannot evacuate old-gen, we will not be able to reclaim old-gen memory. Promotions are less 292 // critical. If we cannot promote, there may be degradation of young-gen memory because old objects 293 // accumulate there until they can be promoted. This increases the young-gen marking and evacuation work. 294 295 // First priority is to reclaim the easy garbage out of young-gen. 296 297 // maximum_young_evacuation_reserve is upper bound on memory to be evacuated out of young 298 const size_t maximum_young_evacuation_reserve = (young_generation->max_capacity() * ShenandoahEvacReserve) / 100; 299 const size_t young_evacuation_reserve = MIN2(maximum_young_evacuation_reserve, young_generation->available_with_reserve()); 300 301 // maximum_old_evacuation_reserve is an upper bound on memory evacuated from old and evacuated to old (promoted), 302 // clamped by the old generation space available. 303 // 304 // Here's the algebra. 305 // Let SOEP = ShenandoahOldEvacRatioPercent, 306 // OE = old evac, 307 // YE = young evac, and 308 // TE = total evac = OE + YE 309 // By definition: 310 // SOEP/100 = OE/TE 311 // = OE/(OE+YE) 312 // => SOEP/(100-SOEP) = OE/((OE+YE)-OE) // componendo-dividendo: If a/b = c/d, then a/(b-a) = c/(d-c) 313 // = OE/YE 314 // => OE = YE*SOEP/(100-SOEP) 315 316 // We have to be careful in the event that SOEP is set to 100 by the user. 317 assert(ShenandoahOldEvacRatioPercent <= 100, "Error"); 318 const size_t old_available = old_generation->available(); 319 const size_t maximum_old_evacuation_reserve = (ShenandoahOldEvacRatioPercent == 100) ? 320 old_available : MIN2((maximum_young_evacuation_reserve * ShenandoahOldEvacRatioPercent) / (100 - ShenandoahOldEvacRatioPercent), 321 old_available); 322 323 324 // Second priority is to reclaim garbage out of old-gen if there are old-gen collection candidates. Third priority 325 // is to promote as much as we have room to promote. However, if old-gen memory is in short supply, this means young 326 // GC is operating under "duress" and was unable to transfer the memory that we would normally expect. In this case, 327 // old-gen will refrain from compacting itself in order to allow a quicker young-gen cycle (by avoiding the update-refs 328 // through ALL of old-gen). If there is some memory available in old-gen, we will use this for promotions as promotions 329 // do not add to the update-refs burden of GC. 330 331 size_t old_evacuation_reserve, old_promo_reserve; 332 if (is_global()) { 333 // Global GC is typically triggered by user invocation of System.gc(), and typically indicates that there is lots 334 // of garbage to be reclaimed because we are starting a new phase of execution. Marking for global GC may take 335 // significantly longer than typical young marking because we must mark through all old objects. To expedite 336 // evacuation and update-refs, we give emphasis to reclaiming garbage first, wherever that garbage is found. 337 // Global GC will adjust generation sizes to accommodate the collection set it chooses. 338 339 // Set old_promo_reserve to enforce that no regions are preselected for promotion. Such regions typically 340 // have relatively high memory utilization. We still call select_aged_regions() because this will prepare for 341 // promotions in place, if relevant. 342 old_promo_reserve = 0; 343 344 // Dedicate all available old memory to old_evacuation reserve. This may be small, because old-gen is only 345 // expanded based on an existing mixed evacuation workload at the end of the previous GC cycle. We'll expand 346 // the budget for evacuation of old during GLOBAL cset selection. 347 old_evacuation_reserve = maximum_old_evacuation_reserve; 348 } else if (old_generation->has_unprocessed_collection_candidates()) { 349 // We reserved all old-gen memory at end of previous GC to hold anticipated evacuations to old-gen. If this is 350 // mixed evacuation, reserve all of this memory for compaction of old-gen and do not promote. Prioritize compaction 351 // over promotion in order to defragment OLD so that it will be better prepared to efficiently receive promoted memory. 352 old_evacuation_reserve = maximum_old_evacuation_reserve; 353 old_promo_reserve = 0; 354 } else { 355 // Make all old-evacuation memory for promotion, but if we can't use it all for promotion, we'll allow some evacuation. 356 old_evacuation_reserve = 0; 357 old_promo_reserve = maximum_old_evacuation_reserve; 358 } 359 assert(old_evacuation_reserve <= old_available, "Error"); 360 361 // We see too many old-evacuation failures if we force ourselves to evacuate into regions that are not initially empty. 362 // So we limit the old-evacuation reserve to unfragmented memory. Even so, old-evacuation is free to fill in nooks and 363 // crannies within existing partially used regions and it generally tries to do so. 364 const size_t old_free_unfragmented = old_generation->free_unaffiliated_regions() * ShenandoahHeapRegion::region_size_bytes(); 365 if (old_evacuation_reserve > old_free_unfragmented) { 366 const size_t delta = old_evacuation_reserve - old_free_unfragmented; 367 old_evacuation_reserve -= delta; 368 // Let promo consume fragments of old-gen memory if not global 369 if (!is_global()) { 370 old_promo_reserve += delta; 371 } 372 } 373 374 // Preselect regions for promotion by evacuation (obtaining the live data to seed promoted_reserve), 375 // and identify regions that will promote in place. These use the tenuring threshold. 376 const size_t consumed_by_advance_promotion = select_aged_regions(old_promo_reserve); 377 assert(consumed_by_advance_promotion <= maximum_old_evacuation_reserve, "Cannot promote more than available old-gen memory"); 378 379 // Note that unused old_promo_reserve might not be entirely consumed_by_advance_promotion. Do not transfer this 380 // to old_evacuation_reserve because this memory is likely very fragmented, and we do not want to increase the likelihood 381 // of old evacuation failure. 382 young_generation->set_evacuation_reserve(young_evacuation_reserve); 383 old_generation->set_evacuation_reserve(old_evacuation_reserve); 384 old_generation->set_promoted_reserve(consumed_by_advance_promotion); 385 386 // There is no need to expand OLD because all memory used here was set aside at end of previous GC, except in the 387 // case of a GLOBAL gc. During choose_collection_set() of GLOBAL, old will be expanded on demand. 388 } 389 390 // Having chosen the collection set, adjust the budgets for generational mode based on its composition. Note 391 // that young_generation->available() now knows about recently discovered immediate garbage. 392 // 393 void ShenandoahGeneration::adjust_evacuation_budgets(ShenandoahHeap* const heap, ShenandoahCollectionSet* const collection_set) { 394 shenandoah_assert_generational(); 395 // We may find that old_evacuation_reserve and/or loaned_for_young_evacuation are not fully consumed, in which case we may 396 // be able to increase regions_available_to_loan 397 398 // The role of adjust_evacuation_budgets() is to compute the correct value of regions_available_to_loan and to make 399 // effective use of this memory, including the remnant memory within these regions that may result from rounding loan to 400 // integral number of regions. Excess memory that is available to be loaned is applied to an allocation supplement, 401 // which allows mutators to allocate memory beyond the current capacity of young-gen on the promise that the loan 402 // will be repaid as soon as we finish updating references for the recently evacuated collection set. 403 404 // We cannot recalculate regions_available_to_loan by simply dividing old_generation->available() by region_size_bytes 405 // because the available memory may be distributed between many partially occupied regions that are already holding old-gen 406 // objects. Memory in partially occupied regions is not "available" to be loaned. Note that an increase in old-gen 407 // available that results from a decrease in memory consumed by old evacuation is not necessarily available to be loaned 408 // to young-gen. 409 410 size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes(); 411 ShenandoahOldGeneration* const old_generation = heap->old_generation(); 412 ShenandoahYoungGeneration* const young_generation = heap->young_generation(); 413 414 size_t old_evacuated = collection_set->get_old_bytes_reserved_for_evacuation(); 415 size_t old_evacuated_committed = (size_t) (ShenandoahOldEvacWaste * double(old_evacuated)); 416 size_t old_evacuation_reserve = old_generation->get_evacuation_reserve(); 417 418 if (old_evacuated_committed > old_evacuation_reserve) { 419 // This should only happen due to round-off errors when enforcing ShenandoahOldEvacWaste 420 assert(old_evacuated_committed <= (33 * old_evacuation_reserve) / 32, 421 "Round-off errors should be less than 3.125%%, committed: " SIZE_FORMAT ", reserved: " SIZE_FORMAT, 422 old_evacuated_committed, old_evacuation_reserve); 423 old_evacuated_committed = old_evacuation_reserve; 424 // Leave old_evac_reserve as previously configured 425 } else if (old_evacuated_committed < old_evacuation_reserve) { 426 // This happens if the old-gen collection consumes less than full budget. 427 old_evacuation_reserve = old_evacuated_committed; 428 old_generation->set_evacuation_reserve(old_evacuation_reserve); 429 } 430 431 size_t young_advance_promoted = collection_set->get_young_bytes_to_be_promoted(); 432 size_t young_advance_promoted_reserve_used = (size_t) (ShenandoahPromoEvacWaste * double(young_advance_promoted)); 433 434 size_t young_evacuated = collection_set->get_young_bytes_reserved_for_evacuation(); 435 size_t young_evacuated_reserve_used = (size_t) (ShenandoahEvacWaste * double(young_evacuated)); 436 437 size_t total_young_available = young_generation->available_with_reserve(); 438 assert(young_evacuated_reserve_used <= total_young_available, "Cannot evacuate more than is available in young"); 439 young_generation->set_evacuation_reserve(young_evacuated_reserve_used); 440 441 size_t old_available = old_generation->available(); 442 // Now that we've established the collection set, we know how much memory is really required by old-gen for evacuation 443 // and promotion reserves. Try shrinking OLD now in case that gives us a bit more runway for mutator allocations during 444 // evac and update phases. 445 size_t old_consumed = old_evacuated_committed + young_advance_promoted_reserve_used; 446 447 if (old_available < old_consumed) { 448 // This can happen due to round-off errors when adding the results of truncated integer arithmetic. 449 // We've already truncated old_evacuated_committed. Truncate young_advance_promoted_reserve_used here. 450 assert(young_advance_promoted_reserve_used <= (33 * (old_available - old_evacuated_committed)) / 32, 451 "Round-off errors should be less than 3.125%%, committed: " SIZE_FORMAT ", reserved: " SIZE_FORMAT, 452 young_advance_promoted_reserve_used, old_available - old_evacuated_committed); 453 young_advance_promoted_reserve_used = old_available - old_evacuated_committed; 454 old_consumed = old_evacuated_committed + young_advance_promoted_reserve_used; 455 } 456 457 assert(old_available >= old_consumed, "Cannot consume (" SIZE_FORMAT ") more than is available (" SIZE_FORMAT ")", 458 old_consumed, old_available); 459 size_t excess_old = old_available - old_consumed; 460 size_t unaffiliated_old_regions = old_generation->free_unaffiliated_regions(); 461 size_t unaffiliated_old = unaffiliated_old_regions * region_size_bytes; 462 assert(old_available >= unaffiliated_old, "Unaffiliated old is a subset of old available"); 463 464 // Make sure old_evac_committed is unaffiliated 465 if (old_evacuated_committed > 0) { 466 if (unaffiliated_old > old_evacuated_committed) { 467 size_t giveaway = unaffiliated_old - old_evacuated_committed; 468 size_t giveaway_regions = giveaway / region_size_bytes; // round down 469 if (giveaway_regions > 0) { 470 excess_old = MIN2(excess_old, giveaway_regions * region_size_bytes); 471 } else { 472 excess_old = 0; 473 } 474 } else { 475 excess_old = 0; 476 } 477 } 478 479 // If we find that OLD has excess regions, give them back to YOUNG now to reduce likelihood we run out of allocation 480 // runway during evacuation and update-refs. 481 size_t regions_to_xfer = 0; 482 if (excess_old > unaffiliated_old) { 483 // we can give back unaffiliated_old (all of unaffiliated is excess) 484 if (unaffiliated_old_regions > 0) { 485 regions_to_xfer = unaffiliated_old_regions; 486 } 487 } else if (unaffiliated_old_regions > 0) { 488 // excess_old < unaffiliated old: we can give back MIN(excess_old/region_size_bytes, unaffiliated_old_regions) 489 size_t excess_regions = excess_old / region_size_bytes; 490 regions_to_xfer = MIN2(excess_regions, unaffiliated_old_regions); 491 } 492 493 if (regions_to_xfer > 0) { 494 bool result = ShenandoahGenerationalHeap::cast(heap)->generation_sizer()->transfer_to_young(regions_to_xfer); 495 assert(excess_old >= regions_to_xfer * region_size_bytes, 496 "Cannot transfer (" SIZE_FORMAT ", " SIZE_FORMAT ") more than excess old (" SIZE_FORMAT ")", 497 regions_to_xfer, region_size_bytes, excess_old); 498 excess_old -= regions_to_xfer * region_size_bytes; 499 log_debug(gc, ergo)("%s transferred " SIZE_FORMAT " excess regions to young before start of evacuation", 500 result? "Successfully": "Unsuccessfully", regions_to_xfer); 501 } 502 503 // Add in the excess_old memory to hold unanticipated promotions, if any. If there are more unanticipated 504 // promotions than fit in reserved memory, they will be deferred until a future GC pass. 505 size_t total_promotion_reserve = young_advance_promoted_reserve_used + excess_old; 506 old_generation->set_promoted_reserve(total_promotion_reserve); 507 old_generation->reset_promoted_expended(); 508 } 509 510 typedef struct { 511 ShenandoahHeapRegion* _region; 512 size_t _live_data; 513 } AgedRegionData; 514 515 static int compare_by_aged_live(AgedRegionData a, AgedRegionData b) { 516 if (a._live_data < b._live_data) 517 return -1; 518 else if (a._live_data > b._live_data) 519 return 1; 520 else return 0; 521 } 522 523 inline void assert_no_in_place_promotions() { 524 #ifdef ASSERT 525 class ShenandoahNoInPlacePromotions : public ShenandoahHeapRegionClosure { 526 public: 527 void heap_region_do(ShenandoahHeapRegion *r) override { 528 assert(r->get_top_before_promote() == nullptr, 529 "Region " SIZE_FORMAT " should not be ready for in-place promotion", r->index()); 530 } 531 } cl; 532 ShenandoahHeap::heap()->heap_region_iterate(&cl); 533 #endif 534 } 535 536 // Preselect for inclusion into the collection set regions whose age is at or above tenure age which contain more than 537 // ShenandoahOldGarbageThreshold amounts of garbage. We identify these regions by setting the appropriate entry of 538 // the collection set's preselected regions array to true. All entries are initialized to false before calling this 539 // function. 540 // 541 // During the subsequent selection of the collection set, we give priority to these promotion set candidates. 542 // Without this prioritization, we found that the aged regions tend to be ignored because they typically have 543 // much less garbage and much more live data than the recently allocated "eden" regions. When aged regions are 544 // repeatedly excluded from the collection set, the amount of live memory within the young generation tends to 545 // accumulate and this has the undesirable side effect of causing young-generation collections to require much more 546 // CPU and wall-clock time. 547 // 548 // A second benefit of treating aged regions differently than other regions during collection set selection is 549 // that this allows us to more accurately budget memory to hold the results of evacuation. Memory for evacuation 550 // of aged regions must be reserved in the old generation. Memory for evacuation of all other regions must be 551 // reserved in the young generation. 552 size_t ShenandoahGeneration::select_aged_regions(size_t old_available) { 553 554 // There should be no regions configured for subsequent in-place-promotions carried over from the previous cycle. 555 assert_no_in_place_promotions(); 556 557 auto const heap = ShenandoahGenerationalHeap::heap(); 558 bool* const candidate_regions_for_promotion_by_copy = heap->collection_set()->preselected_regions(); 559 ShenandoahMarkingContext* const ctx = heap->marking_context(); 560 561 const uint tenuring_threshold = heap->age_census()->tenuring_threshold(); 562 const size_t old_garbage_threshold = (ShenandoahHeapRegion::region_size_bytes() * ShenandoahOldGarbageThreshold) / 100; 563 564 size_t old_consumed = 0; 565 size_t promo_potential = 0; 566 size_t candidates = 0; 567 568 // Tracks the padding of space above top in regions eligible for promotion in place 569 size_t promote_in_place_pad = 0; 570 571 // Sort the promotion-eligible regions in order of increasing live-data-bytes so that we can first reclaim regions that require 572 // less evacuation effort. This prioritizes garbage first, expanding the allocation pool early before we reclaim regions that 573 // have more live data. 574 const size_t num_regions = heap->num_regions(); 575 576 ResourceMark rm; 577 AgedRegionData* sorted_regions = NEW_RESOURCE_ARRAY(AgedRegionData, num_regions); 578 579 for (size_t i = 0; i < num_regions; i++) { 580 ShenandoahHeapRegion* const r = heap->get_region(i); 581 if (r->is_empty() || !r->has_live() || !r->is_young() || !r->is_regular()) { 582 // skip over regions that aren't regular young with some live data 583 continue; 584 } 585 if (r->age() >= tenuring_threshold) { 586 if ((r->garbage() < old_garbage_threshold)) { 587 // This tenure-worthy region has too little garbage, so we do not want to expend the copying effort to 588 // reclaim the garbage; instead this region may be eligible for promotion-in-place to the 589 // old generation. 590 HeapWord* tams = ctx->top_at_mark_start(r); 591 HeapWord* original_top = r->top(); 592 if (!heap->is_concurrent_old_mark_in_progress() && tams == original_top) { 593 // No allocations from this region have been made during concurrent mark. It meets all the criteria 594 // for in-place-promotion. Though we only need the value of top when we fill the end of the region, 595 // we use this field to indicate that this region should be promoted in place during the evacuation 596 // phase. 597 r->save_top_before_promote(); 598 599 size_t remnant_size = r->free() / HeapWordSize; 600 if (remnant_size > ShenandoahHeap::min_fill_size()) { 601 ShenandoahHeap::fill_with_object(original_top, remnant_size); 602 // Fill the remnant memory within this region to assure no allocations prior to promote in place. Otherwise, 603 // newly allocated objects will not be parsable when promote in place tries to register them. Furthermore, any 604 // new allocations would not necessarily be eligible for promotion. This addresses both issues. 605 r->set_top(r->end()); 606 promote_in_place_pad += remnant_size * HeapWordSize; 607 } else { 608 // Since the remnant is so small that it cannot be filled, we don't have to worry about any accidental 609 // allocations occurring within this region before the region is promoted in place. 610 } 611 } 612 // Else, we do not promote this region (either in place or by copy) because it has received new allocations. 613 614 // During evacuation, we exclude from promotion regions for which age > tenure threshold, garbage < garbage-threshold, 615 // and get_top_before_promote() != tams 616 } else { 617 // Record this promotion-eligible candidate region. After sorting and selecting the best candidates below, 618 // we may still decide to exclude this promotion-eligible region from the current collection set. If this 619 // happens, we will consider this region as part of the anticipated promotion potential for the next GC 620 // pass; see further below. 621 sorted_regions[candidates]._region = r; 622 sorted_regions[candidates++]._live_data = r->get_live_data_bytes(); 623 } 624 } else { 625 // We only evacuate & promote objects from regular regions whose garbage() is above old-garbage-threshold. 626 // Objects in tenure-worthy regions with less garbage are promoted in place. These take a different path to 627 // old-gen. Regions excluded from promotion because their garbage content is too low (causing us to anticipate that 628 // the region would be promoted in place) may be eligible for evacuation promotion by the time promotion takes 629 // place during a subsequent GC pass because more garbage is found within the region between now and then. This 630 // should not happen if we are properly adapting the tenure age. The theory behind adaptive tenuring threshold 631 // is to choose the youngest age that demonstrates no "significant" further loss of population since the previous 632 // age. If not this, we expect the tenure age to demonstrate linear population decay for at least two population 633 // samples, whereas we expect to observe exponential population decay for ages younger than the tenure age. 634 // 635 // In the case that certain regions which were anticipated to be promoted in place need to be promoted by 636 // evacuation, it may be the case that there is not sufficient reserve within old-gen to hold evacuation of 637 // these regions. The likely outcome is that these regions will not be selected for evacuation or promotion 638 // in the current cycle and we will anticipate that they will be promoted in the next cycle. This will cause 639 // us to reserve more old-gen memory so that these objects can be promoted in the subsequent cycle. 640 if (heap->is_aging_cycle() && (r->age() + 1 == tenuring_threshold)) { 641 if (r->garbage() >= old_garbage_threshold) { 642 promo_potential += r->get_live_data_bytes(); 643 } 644 } 645 } 646 // Note that we keep going even if one region is excluded from selection. 647 // Subsequent regions may be selected if they have smaller live data. 648 } 649 // Sort in increasing order according to live data bytes. Note that candidates represents the number of regions 650 // that qualify to be promoted by evacuation. 651 if (candidates > 0) { 652 size_t selected_regions = 0; 653 size_t selected_live = 0; 654 QuickSort::sort<AgedRegionData>(sorted_regions, candidates, compare_by_aged_live, false); 655 for (size_t i = 0; i < candidates; i++) { 656 ShenandoahHeapRegion* const region = sorted_regions[i]._region; 657 size_t region_live_data = sorted_regions[i]._live_data; 658 size_t promotion_need = (size_t) (region_live_data * ShenandoahPromoEvacWaste); 659 if (old_consumed + promotion_need <= old_available) { 660 old_consumed += promotion_need; 661 candidate_regions_for_promotion_by_copy[region->index()] = true; 662 selected_regions++; 663 selected_live += region_live_data; 664 } else { 665 // We rejected this promotable region from the collection set because we had no room to hold its copy. 666 // Add this region to promo potential for next GC. 667 promo_potential += region_live_data; 668 assert(!candidate_regions_for_promotion_by_copy[region->index()], "Shouldn't be selected"); 669 } 670 // We keep going even if one region is excluded from selection because we need to accumulate all eligible 671 // regions that are not preselected into promo_potential 672 } 673 log_debug(gc)("Preselected " SIZE_FORMAT " regions containing " SIZE_FORMAT " live bytes," 674 " consuming: " SIZE_FORMAT " of budgeted: " SIZE_FORMAT, 675 selected_regions, selected_live, old_consumed, old_available); 676 } 677 678 heap->old_generation()->set_pad_for_promote_in_place(promote_in_place_pad); 679 heap->old_generation()->set_promotion_potential(promo_potential); 680 return old_consumed; 681 } 682 683 void ShenandoahGeneration::prepare_regions_and_collection_set(bool concurrent) { 684 ShenandoahHeap* heap = ShenandoahHeap::heap(); 685 ShenandoahCollectionSet* collection_set = heap->collection_set(); 686 bool is_generational = heap->mode()->is_generational(); 687 688 assert(!heap->is_full_gc_in_progress(), "Only for concurrent and degenerated GC"); 689 assert(!is_old(), "Only YOUNG and GLOBAL GC perform evacuations"); 690 { 691 ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::final_update_region_states : 692 ShenandoahPhaseTimings::degen_gc_final_update_region_states); 693 ShenandoahFinalMarkUpdateRegionStateClosure cl(complete_marking_context()); 694 parallel_heap_region_iterate(&cl); 695 696 if (is_young()) { 697 // We always need to update the watermark for old regions. If there 698 // are mixed collections pending, we also need to synchronize the 699 // pinned status for old regions. Since we are already visiting every 700 // old region here, go ahead and sync the pin status too. 701 ShenandoahFinalMarkUpdateRegionStateClosure old_cl(nullptr); 702 heap->old_generation()->parallel_heap_region_iterate(&old_cl); 703 } 704 } 705 706 // Tally the census counts and compute the adaptive tenuring threshold 707 if (is_generational && ShenandoahGenerationalAdaptiveTenuring && !ShenandoahGenerationalCensusAtEvac) { 708 // Objects above TAMS weren't included in the age census. Since they were all 709 // allocated in this cycle they belong in the age 0 cohort. We walk over all 710 // young regions and sum the volume of objects between TAMS and top. 711 ShenandoahUpdateCensusZeroCohortClosure age0_cl(complete_marking_context()); 712 heap->young_generation()->heap_region_iterate(&age0_cl); 713 size_t age0_pop = age0_cl.get_age0_population(); 714 715 // Update the global census, including the missed age 0 cohort above, 716 // along with the census done during marking, and compute the tenuring threshold. 717 ShenandoahAgeCensus* census = ShenandoahGenerationalHeap::heap()->age_census(); 718 census->update_census(age0_pop); 719 #ifndef PRODUCT 720 size_t total_pop = age0_cl.get_total_population(); 721 size_t total_census = census->get_total(); 722 // Usually total_pop > total_census, but not by too much. 723 // We use integer division so anything up to just less than 2 is considered 724 // reasonable, and the "+1" is to avoid divide-by-zero. 725 assert((total_pop+1)/(total_census+1) == 1, "Extreme divergence: " 726 SIZE_FORMAT "/" SIZE_FORMAT, total_pop, total_census); 727 #endif 728 } 729 730 { 731 ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::choose_cset : 732 ShenandoahPhaseTimings::degen_gc_choose_cset); 733 734 collection_set->clear(); 735 ShenandoahHeapLocker locker(heap->lock()); 736 if (is_generational) { 737 // Seed the collection set with resource area-allocated 738 // preselected regions, which are removed when we exit this scope. 739 ShenandoahCollectionSetPreselector preselector(collection_set, heap->num_regions()); 740 741 // Find the amount that will be promoted, regions that will be promoted in 742 // place, and preselect older regions that will be promoted by evacuation. 743 compute_evacuation_budgets(heap); 744 745 // Choose the collection set, including the regions preselected above for 746 // promotion into the old generation. 747 _heuristics->choose_collection_set(collection_set); 748 if (!collection_set->is_empty()) { 749 // only make use of evacuation budgets when we are evacuating 750 adjust_evacuation_budgets(heap, collection_set); 751 } 752 753 if (is_global()) { 754 // We have just chosen a collection set for a global cycle. The mark bitmap covering old regions is complete, so 755 // the remembered set scan can use that to avoid walking into garbage. When the next old mark begins, we will 756 // use the mark bitmap to make the old regions parsable by coalescing and filling any unmarked objects. Thus, 757 // we prepare for old collections by remembering which regions are old at this time. Note that any objects 758 // promoted into old regions will be above TAMS, and so will be considered marked. However, free regions that 759 // become old after this point will not be covered correctly by the mark bitmap, so we must be careful not to 760 // coalesce those regions. Only the old regions which are not part of the collection set at this point are 761 // eligible for coalescing. As implemented now, this has the side effect of possibly initiating mixed-evacuations 762 // after a global cycle for old regions that were not included in this collection set. 763 heap->old_generation()->prepare_for_mixed_collections_after_global_gc(); 764 } 765 } else { 766 _heuristics->choose_collection_set(collection_set); 767 } 768 } 769 770 771 { 772 ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::final_rebuild_freeset : 773 ShenandoahPhaseTimings::degen_gc_final_rebuild_freeset); 774 ShenandoahHeapLocker locker(heap->lock()); 775 size_t young_cset_regions, old_cset_regions; 776 777 // We are preparing for evacuation. At this time, we ignore cset region tallies. 778 size_t first_old, last_old, num_old; 779 heap->free_set()->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old, last_old, num_old); 780 // Free set construction uses reserve quantities, because they are known to be valid here 781 heap->free_set()->finish_rebuild(young_cset_regions, old_cset_regions, num_old, true); 782 } 783 } 784 785 bool ShenandoahGeneration::is_bitmap_clear() { 786 ShenandoahHeap* heap = ShenandoahHeap::heap(); 787 ShenandoahMarkingContext* context = heap->marking_context(); 788 const size_t num_regions = heap->num_regions(); 789 for (size_t idx = 0; idx < num_regions; idx++) { 790 ShenandoahHeapRegion* r = heap->get_region(idx); 791 if (contains(r) && r->is_affiliated()) { 792 if (heap->is_bitmap_slice_committed(r) && (context->top_at_mark_start(r) > r->bottom()) && 793 !context->is_bitmap_range_within_region_clear(r->bottom(), r->end())) { 794 return false; 795 } 796 } 797 } 798 return true; 799 } 800 801 bool ShenandoahGeneration::is_mark_complete() { 802 return _is_marking_complete.is_set(); 803 } 804 805 void ShenandoahGeneration::set_mark_complete() { 806 _is_marking_complete.set(); 807 } 808 809 void ShenandoahGeneration::set_mark_incomplete() { 810 _is_marking_complete.unset(); 811 } 812 813 ShenandoahMarkingContext* ShenandoahGeneration::complete_marking_context() { 814 assert(is_mark_complete(), "Marking must be completed."); 815 return ShenandoahHeap::heap()->marking_context(); 816 } 817 818 void ShenandoahGeneration::cancel_marking() { 819 log_info(gc)("Cancel marking: %s", name()); 820 if (is_concurrent_mark_in_progress()) { 821 set_mark_incomplete(); 822 } 823 _task_queues->clear(); 824 ref_processor()->abandon_partial_discovery(); 825 set_concurrent_mark_in_progress(false); 826 } 827 828 ShenandoahGeneration::ShenandoahGeneration(ShenandoahGenerationType type, 829 uint max_workers, 830 size_t max_capacity, 831 size_t soft_max_capacity) : 832 _type(type), 833 _task_queues(new ShenandoahObjToScanQueueSet(max_workers)), 834 _ref_processor(new ShenandoahReferenceProcessor(MAX2(max_workers, 1U))), 835 _affiliated_region_count(0), _humongous_waste(0), _evacuation_reserve(0), 836 _used(0), _bytes_allocated_since_gc_start(0), 837 _max_capacity(max_capacity), _soft_max_capacity(soft_max_capacity), 838 _heuristics(nullptr) 839 { 840 _is_marking_complete.set(); 841 assert(max_workers > 0, "At least one queue"); 842 for (uint i = 0; i < max_workers; ++i) { 843 ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue(); 844 _task_queues->register_queue(i, task_queue); 845 } 846 } 847 848 ShenandoahGeneration::~ShenandoahGeneration() { 849 for (uint i = 0; i < _task_queues->size(); ++i) { 850 ShenandoahObjToScanQueue* q = _task_queues->queue(i); 851 delete q; 852 } 853 delete _task_queues; 854 } 855 856 void ShenandoahGeneration::reserve_task_queues(uint workers) { 857 _task_queues->reserve(workers); 858 } 859 860 ShenandoahObjToScanQueueSet* ShenandoahGeneration::old_gen_task_queues() const { 861 return nullptr; 862 } 863 864 void ShenandoahGeneration::scan_remembered_set(bool is_concurrent) { 865 assert(is_young(), "Should only scan remembered set for young generation."); 866 867 ShenandoahGenerationalHeap* const heap = ShenandoahGenerationalHeap::heap(); 868 uint nworkers = heap->workers()->active_workers(); 869 reserve_task_queues(nworkers); 870 871 ShenandoahReferenceProcessor* rp = ref_processor(); 872 ShenandoahRegionChunkIterator work_list(nworkers); 873 ShenandoahScanRememberedTask task(task_queues(), old_gen_task_queues(), rp, &work_list, is_concurrent); 874 heap->assert_gc_workers(nworkers); 875 heap->workers()->run_task(&task); 876 if (ShenandoahEnableCardStats) { 877 ShenandoahScanRemembered* scanner = heap->old_generation()->card_scan(); 878 assert(scanner != nullptr, "Not generational"); 879 scanner->log_card_stats(nworkers, CARD_STAT_SCAN_RS); 880 } 881 } 882 883 size_t ShenandoahGeneration::increment_affiliated_region_count() { 884 shenandoah_assert_heaplocked_or_safepoint(); 885 // During full gc, multiple GC worker threads may change region affiliations without a lock. No lock is enforced 886 // on read and write of _affiliated_region_count. At the end of full gc, a single thread overwrites the count with 887 // a coherent value. 888 _affiliated_region_count++; 889 return _affiliated_region_count; 890 } 891 892 size_t ShenandoahGeneration::decrement_affiliated_region_count() { 893 shenandoah_assert_heaplocked_or_safepoint(); 894 // During full gc, multiple GC worker threads may change region affiliations without a lock. No lock is enforced 895 // on read and write of _affiliated_region_count. At the end of full gc, a single thread overwrites the count with 896 // a coherent value. 897 _affiliated_region_count--; 898 assert(ShenandoahHeap::heap()->is_full_gc_in_progress() || 899 (_used + _humongous_waste <= _affiliated_region_count * ShenandoahHeapRegion::region_size_bytes()), 900 "used + humongous cannot exceed regions"); 901 return _affiliated_region_count; 902 } 903 904 size_t ShenandoahGeneration::increase_affiliated_region_count(size_t delta) { 905 shenandoah_assert_heaplocked_or_safepoint(); 906 _affiliated_region_count += delta; 907 return _affiliated_region_count; 908 } 909 910 size_t ShenandoahGeneration::decrease_affiliated_region_count(size_t delta) { 911 shenandoah_assert_heaplocked_or_safepoint(); 912 assert(_affiliated_region_count >= delta, "Affiliated region count cannot be negative"); 913 914 _affiliated_region_count -= delta; 915 assert(ShenandoahHeap::heap()->is_full_gc_in_progress() || 916 (_used + _humongous_waste <= _affiliated_region_count * ShenandoahHeapRegion::region_size_bytes()), 917 "used + humongous cannot exceed regions"); 918 return _affiliated_region_count; 919 } 920 921 void ShenandoahGeneration::establish_usage(size_t num_regions, size_t num_bytes, size_t humongous_waste) { 922 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at a safepoint"); 923 _affiliated_region_count = num_regions; 924 _used = num_bytes; 925 _humongous_waste = humongous_waste; 926 } 927 928 void ShenandoahGeneration::increase_used(size_t bytes) { 929 Atomic::add(&_used, bytes); 930 } 931 932 void ShenandoahGeneration::increase_humongous_waste(size_t bytes) { 933 if (bytes > 0) { 934 Atomic::add(&_humongous_waste, bytes); 935 } 936 } 937 938 void ShenandoahGeneration::decrease_humongous_waste(size_t bytes) { 939 if (bytes > 0) { 940 assert(ShenandoahHeap::heap()->is_full_gc_in_progress() || (_humongous_waste >= bytes), 941 "Waste (" SIZE_FORMAT ") cannot be negative (after subtracting " SIZE_FORMAT ")", _humongous_waste, bytes); 942 Atomic::sub(&_humongous_waste, bytes); 943 } 944 } 945 946 void ShenandoahGeneration::decrease_used(size_t bytes) { 947 assert(ShenandoahHeap::heap()->is_full_gc_in_progress() || 948 (_used >= bytes), "cannot reduce bytes used by generation below zero"); 949 Atomic::sub(&_used, bytes); 950 } 951 952 size_t ShenandoahGeneration::used_regions() const { 953 return _affiliated_region_count; 954 } 955 956 size_t ShenandoahGeneration::free_unaffiliated_regions() const { 957 size_t result = max_capacity() / ShenandoahHeapRegion::region_size_bytes(); 958 if (_affiliated_region_count > result) { 959 result = 0; 960 } else { 961 result -= _affiliated_region_count; 962 } 963 return result; 964 } 965 966 size_t ShenandoahGeneration::used_regions_size() const { 967 return _affiliated_region_count * ShenandoahHeapRegion::region_size_bytes(); 968 } 969 970 size_t ShenandoahGeneration::available() const { 971 return available(max_capacity()); 972 } 973 974 // For ShenandoahYoungGeneration, Include the young available that may have been reserved for the Collector. 975 size_t ShenandoahGeneration::available_with_reserve() const { 976 return available(max_capacity()); 977 } 978 979 size_t ShenandoahGeneration::soft_available() const { 980 return available(soft_max_capacity()); 981 } 982 983 size_t ShenandoahGeneration::available(size_t capacity) const { 984 size_t in_use = used() + get_humongous_waste(); 985 return in_use > capacity ? 0 : capacity - in_use; 986 } 987 988 size_t ShenandoahGeneration::increase_capacity(size_t increment) { 989 shenandoah_assert_heaplocked_or_safepoint(); 990 991 // We do not enforce that new capacity >= heap->max_size_for(this). The maximum generation size is treated as a rule of thumb 992 // which may be violated during certain transitions, such as when we are forcing transfers for the purpose of promoting regions 993 // in place. 994 assert(ShenandoahHeap::heap()->is_full_gc_in_progress() || 995 (_max_capacity + increment <= ShenandoahHeap::heap()->max_capacity()), "Generation cannot be larger than heap size"); 996 assert(increment % ShenandoahHeapRegion::region_size_bytes() == 0, "Generation capacity must be multiple of region size"); 997 _max_capacity += increment; 998 999 // This detects arithmetic wraparound on _used 1000 assert(ShenandoahHeap::heap()->is_full_gc_in_progress() || 1001 (_affiliated_region_count * ShenandoahHeapRegion::region_size_bytes() >= _used), 1002 "Affiliated regions must hold more than what is currently used"); 1003 return _max_capacity; 1004 } 1005 1006 size_t ShenandoahGeneration::set_capacity(size_t byte_size) { 1007 shenandoah_assert_heaplocked_or_safepoint(); 1008 _max_capacity = byte_size; 1009 return _max_capacity; 1010 } 1011 1012 size_t ShenandoahGeneration::decrease_capacity(size_t decrement) { 1013 shenandoah_assert_heaplocked_or_safepoint(); 1014 1015 // We do not enforce that new capacity >= heap->min_size_for(this). The minimum generation size is treated as a rule of thumb 1016 // which may be violated during certain transitions, such as when we are forcing transfers for the purpose of promoting regions 1017 // in place. 1018 assert(decrement % ShenandoahHeapRegion::region_size_bytes() == 0, "Generation capacity must be multiple of region size"); 1019 assert(_max_capacity >= decrement, "Generation capacity cannot be negative"); 1020 1021 _max_capacity -= decrement; 1022 1023 // This detects arithmetic wraparound on _used 1024 assert(ShenandoahHeap::heap()->is_full_gc_in_progress() || 1025 (_affiliated_region_count * ShenandoahHeapRegion::region_size_bytes() >= _used), 1026 "Affiliated regions must hold more than what is currently used"); 1027 assert(ShenandoahHeap::heap()->is_full_gc_in_progress() || 1028 (_used <= _max_capacity), "Cannot use more than capacity"); 1029 assert(ShenandoahHeap::heap()->is_full_gc_in_progress() || 1030 (_affiliated_region_count * ShenandoahHeapRegion::region_size_bytes() <= _max_capacity), 1031 "Cannot use more than capacity"); 1032 return _max_capacity; 1033 } 1034 1035 void ShenandoahGeneration::record_success_concurrent(bool abbreviated) { 1036 heuristics()->record_success_concurrent(); 1037 ShenandoahHeap::heap()->shenandoah_policy()->record_success_concurrent(is_young(), abbreviated); 1038 }