1 /* 2 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp" 27 #include "gc/shenandoah/shenandoahCollectionSetPreselector.hpp" 28 #include "gc/shenandoah/shenandoahFreeSet.hpp" 29 #include "gc/shenandoah/shenandoahGeneration.hpp" 30 #include "gc/shenandoah/shenandoahGenerationalHeap.hpp" 31 #include "gc/shenandoah/shenandoahMarkClosures.hpp" 32 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp" 33 #include "gc/shenandoah/shenandoahOldGeneration.hpp" 34 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp" 35 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp" 36 #include "gc/shenandoah/shenandoahTaskqueue.inline.hpp" 37 #include "gc/shenandoah/shenandoahUtils.hpp" 38 #include "gc/shenandoah/shenandoahVerifier.hpp" 39 #include "gc/shenandoah/shenandoahYoungGeneration.hpp" 40 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp" 41 42 #include "utilities/quickSort.hpp" 43 44 45 class ShenandoahResetUpdateRegionStateClosure : public ShenandoahHeapRegionClosure { 46 private: 47 ShenandoahHeap* _heap; 48 ShenandoahMarkingContext* const _ctx; 49 public: 50 ShenandoahResetUpdateRegionStateClosure() : 51 _heap(ShenandoahHeap::heap()), 52 _ctx(_heap->marking_context()) {} 53 54 void heap_region_do(ShenandoahHeapRegion* r) override { 55 if (r->is_active()) { 56 // Reset live data and set TAMS optimistically. We would recheck these under the pause 57 // anyway to capture any updates that happened since now. 58 _ctx->capture_top_at_mark_start(r); 59 r->clear_live_data(); 60 } 61 } 62 63 bool is_thread_safe() override { return true; } 64 }; 65 66 class ShenandoahResetBitmapTask : public WorkerTask { 67 private: 68 ShenandoahRegionIterator _regions; 69 ShenandoahGeneration* _generation; 70 71 public: 72 ShenandoahResetBitmapTask(ShenandoahGeneration* generation) : 73 WorkerTask("Shenandoah Reset Bitmap"), _generation(generation) {} 74 75 void work(uint worker_id) { 76 ShenandoahHeapRegion* region = _regions.next(); 77 ShenandoahHeap* heap = ShenandoahHeap::heap(); 78 ShenandoahMarkingContext* const ctx = heap->marking_context(); 79 while (region != nullptr) { 80 auto const affiliation = region->affiliation(); 81 bool needs_reset = affiliation == FREE || _generation->contains(affiliation); 82 if (needs_reset && heap->is_bitmap_slice_committed(region)) { 83 ctx->clear_bitmap(region); 84 } 85 region = _regions.next(); 86 } 87 } 88 }; 89 90 // Copy the write-version of the card-table into the read-version, clearing the 91 // write-copy. 92 class ShenandoahMergeWriteTable: public ShenandoahHeapRegionClosure { 93 private: 94 ShenandoahScanRemembered* _scanner; 95 public: 96 ShenandoahMergeWriteTable(ShenandoahScanRemembered* scanner) : _scanner(scanner) {} 97 98 void heap_region_do(ShenandoahHeapRegion* r) override { 99 assert(r->is_old(), "Don't waste time doing this for non-old regions"); 100 _scanner->merge_write_table(r->bottom(), ShenandoahHeapRegion::region_size_words()); 101 } 102 103 bool is_thread_safe() override { 104 return true; 105 } 106 }; 107 108 class ShenandoahCopyWriteCardTableToRead: public ShenandoahHeapRegionClosure { 109 private: 110 ShenandoahScanRemembered* _scanner; 111 public: 112 ShenandoahCopyWriteCardTableToRead(ShenandoahScanRemembered* scanner) : _scanner(scanner) {} 113 114 void heap_region_do(ShenandoahHeapRegion* region) override { 115 assert(region->is_old(), "Don't waste time doing this for non-old regions"); 116 _scanner->reset_remset(region->bottom(), ShenandoahHeapRegion::region_size_words()); 117 } 118 119 bool is_thread_safe() override { return true; } 120 }; 121 122 void ShenandoahGeneration::confirm_heuristics_mode() { 123 if (_heuristics->is_diagnostic() && !UnlockDiagnosticVMOptions) { 124 vm_exit_during_initialization( 125 err_msg("Heuristics \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.", 126 _heuristics->name())); 127 } 128 if (_heuristics->is_experimental() && !UnlockExperimentalVMOptions) { 129 vm_exit_during_initialization( 130 err_msg("Heuristics \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.", 131 _heuristics->name())); 132 } 133 } 134 135 ShenandoahHeuristics* ShenandoahGeneration::initialize_heuristics(ShenandoahMode* gc_mode) { 136 _heuristics = gc_mode->initialize_heuristics(this); 137 _heuristics->set_guaranteed_gc_interval(ShenandoahGuaranteedGCInterval); 138 confirm_heuristics_mode(); 139 return _heuristics; 140 } 141 142 size_t ShenandoahGeneration::bytes_allocated_since_gc_start() const { 143 return Atomic::load(&_bytes_allocated_since_gc_start); 144 } 145 146 void ShenandoahGeneration::reset_bytes_allocated_since_gc_start() { 147 Atomic::store(&_bytes_allocated_since_gc_start, (size_t)0); 148 } 149 150 void ShenandoahGeneration::increase_allocated(size_t bytes) { 151 Atomic::add(&_bytes_allocated_since_gc_start, bytes, memory_order_relaxed); 152 } 153 154 void ShenandoahGeneration::set_evacuation_reserve(size_t new_val) { 155 _evacuation_reserve = new_val; 156 } 157 158 size_t ShenandoahGeneration::get_evacuation_reserve() const { 159 return _evacuation_reserve; 160 } 161 162 void ShenandoahGeneration::augment_evacuation_reserve(size_t increment) { 163 _evacuation_reserve += increment; 164 } 165 166 void ShenandoahGeneration::log_status(const char *msg) const { 167 typedef LogTarget(Info, gc, ergo) LogGcInfo; 168 169 if (!LogGcInfo::is_enabled()) { 170 return; 171 } 172 173 // Not under a lock here, so read each of these once to make sure 174 // byte size in proper unit and proper unit for byte size are consistent. 175 size_t v_used = used(); 176 size_t v_used_regions = used_regions_size(); 177 size_t v_soft_max_capacity = soft_max_capacity(); 178 size_t v_max_capacity = max_capacity(); 179 size_t v_available = available(); 180 size_t v_humongous_waste = get_humongous_waste(); 181 LogGcInfo::print("%s: %s generation used: " SIZE_FORMAT "%s, used regions: " SIZE_FORMAT "%s, " 182 "humongous waste: " SIZE_FORMAT "%s, soft capacity: " SIZE_FORMAT "%s, max capacity: " SIZE_FORMAT "%s, " 183 "available: " SIZE_FORMAT "%s", msg, name(), 184 byte_size_in_proper_unit(v_used), proper_unit_for_byte_size(v_used), 185 byte_size_in_proper_unit(v_used_regions), proper_unit_for_byte_size(v_used_regions), 186 byte_size_in_proper_unit(v_humongous_waste), proper_unit_for_byte_size(v_humongous_waste), 187 byte_size_in_proper_unit(v_soft_max_capacity), proper_unit_for_byte_size(v_soft_max_capacity), 188 byte_size_in_proper_unit(v_max_capacity), proper_unit_for_byte_size(v_max_capacity), 189 byte_size_in_proper_unit(v_available), proper_unit_for_byte_size(v_available)); 190 } 191 192 void ShenandoahGeneration::reset_mark_bitmap() { 193 ShenandoahHeap* heap = ShenandoahHeap::heap(); 194 heap->assert_gc_workers(heap->workers()->active_workers()); 195 196 set_mark_incomplete(); 197 198 ShenandoahResetBitmapTask task(this); 199 heap->workers()->run_task(&task); 200 } 201 202 // The ideal is to swap the remembered set so the safepoint effort is no more than a few pointer manipulations. 203 // However, limitations in the implementation of the mutator write-barrier make it difficult to simply change the 204 // location of the card table. So the interim implementation of swap_remembered_set will copy the write-table 205 // onto the read-table and will then clear the write-table. 206 void ShenandoahGeneration::swap_remembered_set() { 207 // Must be sure that marking is complete before we swap remembered set. 208 ShenandoahGenerationalHeap* heap = ShenandoahGenerationalHeap::heap(); 209 heap->assert_gc_workers(heap->workers()->active_workers()); 210 shenandoah_assert_safepoint(); 211 212 ShenandoahOldGeneration* old_generation = heap->old_generation(); 213 ShenandoahCopyWriteCardTableToRead task(old_generation->card_scan()); 214 old_generation->parallel_heap_region_iterate(&task); 215 } 216 217 // Copy the write-version of the card-table into the read-version, clearing the 218 // write-version. The work is done at a safepoint and in parallel by the GC 219 // worker threads. 220 void ShenandoahGeneration::merge_write_table() { 221 // This should only happen for degenerated cycles 222 ShenandoahGenerationalHeap* heap = ShenandoahGenerationalHeap::heap(); 223 heap->assert_gc_workers(heap->workers()->active_workers()); 224 shenandoah_assert_safepoint(); 225 226 ShenandoahOldGeneration* old_generation = heap->old_generation(); 227 ShenandoahMergeWriteTable task(old_generation->card_scan()); 228 old_generation->parallel_heap_region_iterate(&task); 229 } 230 231 void ShenandoahGeneration::prepare_gc() { 232 233 reset_mark_bitmap(); 234 235 // Capture Top At Mark Start for this generation (typically young) and reset mark bitmap. 236 ShenandoahResetUpdateRegionStateClosure cl; 237 parallel_heap_region_iterate_free(&cl); 238 } 239 240 void ShenandoahGeneration::parallel_heap_region_iterate_free(ShenandoahHeapRegionClosure* cl) { 241 ShenandoahHeap::heap()->parallel_heap_region_iterate(cl); 242 } 243 244 void ShenandoahGeneration::compute_evacuation_budgets(ShenandoahHeap* const heap) { 245 shenandoah_assert_generational(); 246 247 ShenandoahOldGeneration* const old_generation = heap->old_generation(); 248 ShenandoahYoungGeneration* const young_generation = heap->young_generation(); 249 250 // During initialization and phase changes, it is more likely that fewer objects die young and old-gen 251 // memory is not yet full (or is in the process of being replaced). During these times especially, it 252 // is beneficial to loan memory from old-gen to young-gen during the evacuation and update-refs phases 253 // of execution. 254 255 // Calculate EvacuationReserve before PromotionReserve. Evacuation is more critical than promotion. 256 // If we cannot evacuate old-gen, we will not be able to reclaim old-gen memory. Promotions are less 257 // critical. If we cannot promote, there may be degradation of young-gen memory because old objects 258 // accumulate there until they can be promoted. This increases the young-gen marking and evacuation work. 259 260 // First priority is to reclaim the easy garbage out of young-gen. 261 262 // maximum_young_evacuation_reserve is upper bound on memory to be evacuated out of young 263 const size_t maximum_young_evacuation_reserve = (young_generation->max_capacity() * ShenandoahEvacReserve) / 100; 264 const size_t young_evacuation_reserve = MIN2(maximum_young_evacuation_reserve, young_generation->available_with_reserve()); 265 266 // maximum_old_evacuation_reserve is an upper bound on memory evacuated from old and evacuated to old (promoted), 267 // clamped by the old generation space available. 268 // 269 // Here's the algebra. 270 // Let SOEP = ShenandoahOldEvacRatioPercent, 271 // OE = old evac, 272 // YE = young evac, and 273 // TE = total evac = OE + YE 274 // By definition: 275 // SOEP/100 = OE/TE 276 // = OE/(OE+YE) 277 // => SOEP/(100-SOEP) = OE/((OE+YE)-OE) // componendo-dividendo: If a/b = c/d, then a/(b-a) = c/(d-c) 278 // = OE/YE 279 // => OE = YE*SOEP/(100-SOEP) 280 281 // We have to be careful in the event that SOEP is set to 100 by the user. 282 assert(ShenandoahOldEvacRatioPercent <= 100, "Error"); 283 const size_t old_available = old_generation->available(); 284 const size_t maximum_old_evacuation_reserve = (ShenandoahOldEvacRatioPercent == 100) ? 285 old_available : MIN2((maximum_young_evacuation_reserve * ShenandoahOldEvacRatioPercent) / (100 - ShenandoahOldEvacRatioPercent), 286 old_available); 287 288 289 // Second priority is to reclaim garbage out of old-gen if there are old-gen collection candidates. Third priority 290 // is to promote as much as we have room to promote. However, if old-gen memory is in short supply, this means young 291 // GC is operating under "duress" and was unable to transfer the memory that we would normally expect. In this case, 292 // old-gen will refrain from compacting itself in order to allow a quicker young-gen cycle (by avoiding the update-refs 293 // through ALL of old-gen). If there is some memory available in old-gen, we will use this for promotions as promotions 294 // do not add to the update-refs burden of GC. 295 296 size_t old_evacuation_reserve, old_promo_reserve; 297 if (is_global()) { 298 // Global GC is typically triggered by user invocation of System.gc(), and typically indicates that there is lots 299 // of garbage to be reclaimed because we are starting a new phase of execution. Marking for global GC may take 300 // significantly longer than typical young marking because we must mark through all old objects. To expedite 301 // evacuation and update-refs, we give emphasis to reclaiming garbage first, wherever that garbage is found. 302 // Global GC will adjust generation sizes to accommodate the collection set it chooses. 303 304 // Set old_promo_reserve to enforce that no regions are preselected for promotion. Such regions typically 305 // have relatively high memory utilization. We still call select_aged_regions() because this will prepare for 306 // promotions in place, if relevant. 307 old_promo_reserve = 0; 308 309 // Dedicate all available old memory to old_evacuation reserve. This may be small, because old-gen is only 310 // expanded based on an existing mixed evacuation workload at the end of the previous GC cycle. We'll expand 311 // the budget for evacuation of old during GLOBAL cset selection. 312 old_evacuation_reserve = maximum_old_evacuation_reserve; 313 } else if (old_generation->has_unprocessed_collection_candidates()) { 314 // We reserved all old-gen memory at end of previous GC to hold anticipated evacuations to old-gen. If this is 315 // mixed evacuation, reserve all of this memory for compaction of old-gen and do not promote. Prioritize compaction 316 // over promotion in order to defragment OLD so that it will be better prepared to efficiently receive promoted memory. 317 old_evacuation_reserve = maximum_old_evacuation_reserve; 318 old_promo_reserve = 0; 319 } else { 320 // Make all old-evacuation memory for promotion, but if we can't use it all for promotion, we'll allow some evacuation. 321 old_evacuation_reserve = 0; 322 old_promo_reserve = maximum_old_evacuation_reserve; 323 } 324 assert(old_evacuation_reserve <= old_available, "Error"); 325 326 // We see too many old-evacuation failures if we force ourselves to evacuate into regions that are not initially empty. 327 // So we limit the old-evacuation reserve to unfragmented memory. Even so, old-evacuation is free to fill in nooks and 328 // crannies within existing partially used regions and it generally tries to do so. 329 const size_t old_free_unfragmented = old_generation->free_unaffiliated_regions() * ShenandoahHeapRegion::region_size_bytes(); 330 if (old_evacuation_reserve > old_free_unfragmented) { 331 const size_t delta = old_evacuation_reserve - old_free_unfragmented; 332 old_evacuation_reserve -= delta; 333 // Let promo consume fragments of old-gen memory if not global 334 if (!is_global()) { 335 old_promo_reserve += delta; 336 } 337 } 338 339 // Preselect regions for promotion by evacuation (obtaining the live data to seed promoted_reserve), 340 // and identify regions that will promote in place. These use the tenuring threshold. 341 const size_t consumed_by_advance_promotion = select_aged_regions(old_promo_reserve); 342 assert(consumed_by_advance_promotion <= maximum_old_evacuation_reserve, "Cannot promote more than available old-gen memory"); 343 344 // Note that unused old_promo_reserve might not be entirely consumed_by_advance_promotion. Do not transfer this 345 // to old_evacuation_reserve because this memory is likely very fragmented, and we do not want to increase the likelihood 346 // of old evacuation failure. 347 young_generation->set_evacuation_reserve(young_evacuation_reserve); 348 old_generation->set_evacuation_reserve(old_evacuation_reserve); 349 old_generation->set_promoted_reserve(consumed_by_advance_promotion); 350 351 // There is no need to expand OLD because all memory used here was set aside at end of previous GC, except in the 352 // case of a GLOBAL gc. During choose_collection_set() of GLOBAL, old will be expanded on demand. 353 } 354 355 // Having chosen the collection set, adjust the budgets for generational mode based on its composition. Note 356 // that young_generation->available() now knows about recently discovered immediate garbage. 357 // 358 void ShenandoahGeneration::adjust_evacuation_budgets(ShenandoahHeap* const heap, ShenandoahCollectionSet* const collection_set) { 359 shenandoah_assert_generational(); 360 // We may find that old_evacuation_reserve and/or loaned_for_young_evacuation are not fully consumed, in which case we may 361 // be able to increase regions_available_to_loan 362 363 // The role of adjust_evacuation_budgets() is to compute the correct value of regions_available_to_loan and to make 364 // effective use of this memory, including the remnant memory within these regions that may result from rounding loan to 365 // integral number of regions. Excess memory that is available to be loaned is applied to an allocation supplement, 366 // which allows mutators to allocate memory beyond the current capacity of young-gen on the promise that the loan 367 // will be repaid as soon as we finish updating references for the recently evacuated collection set. 368 369 // We cannot recalculate regions_available_to_loan by simply dividing old_generation->available() by region_size_bytes 370 // because the available memory may be distributed between many partially occupied regions that are already holding old-gen 371 // objects. Memory in partially occupied regions is not "available" to be loaned. Note that an increase in old-gen 372 // available that results from a decrease in memory consumed by old evacuation is not necessarily available to be loaned 373 // to young-gen. 374 375 size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes(); 376 ShenandoahOldGeneration* const old_generation = heap->old_generation(); 377 ShenandoahYoungGeneration* const young_generation = heap->young_generation(); 378 379 size_t old_evacuated = collection_set->get_old_bytes_reserved_for_evacuation(); 380 size_t old_evacuated_committed = (size_t) (ShenandoahOldEvacWaste * double(old_evacuated)); 381 size_t old_evacuation_reserve = old_generation->get_evacuation_reserve(); 382 383 if (old_evacuated_committed > old_evacuation_reserve) { 384 // This should only happen due to round-off errors when enforcing ShenandoahOldEvacWaste 385 assert(old_evacuated_committed <= (33 * old_evacuation_reserve) / 32, 386 "Round-off errors should be less than 3.125%%, committed: " SIZE_FORMAT ", reserved: " SIZE_FORMAT, 387 old_evacuated_committed, old_evacuation_reserve); 388 old_evacuated_committed = old_evacuation_reserve; 389 // Leave old_evac_reserve as previously configured 390 } else if (old_evacuated_committed < old_evacuation_reserve) { 391 // This happens if the old-gen collection consumes less than full budget. 392 old_evacuation_reserve = old_evacuated_committed; 393 old_generation->set_evacuation_reserve(old_evacuation_reserve); 394 } 395 396 size_t young_advance_promoted = collection_set->get_young_bytes_to_be_promoted(); 397 size_t young_advance_promoted_reserve_used = (size_t) (ShenandoahPromoEvacWaste * double(young_advance_promoted)); 398 399 size_t young_evacuated = collection_set->get_young_bytes_reserved_for_evacuation(); 400 size_t young_evacuated_reserve_used = (size_t) (ShenandoahEvacWaste * double(young_evacuated)); 401 402 size_t total_young_available = young_generation->available_with_reserve(); 403 assert(young_evacuated_reserve_used <= total_young_available, "Cannot evacuate more than is available in young"); 404 young_generation->set_evacuation_reserve(young_evacuated_reserve_used); 405 406 size_t old_available = old_generation->available(); 407 // Now that we've established the collection set, we know how much memory is really required by old-gen for evacuation 408 // and promotion reserves. Try shrinking OLD now in case that gives us a bit more runway for mutator allocations during 409 // evac and update phases. 410 size_t old_consumed = old_evacuated_committed + young_advance_promoted_reserve_used; 411 412 if (old_available < old_consumed) { 413 // This can happen due to round-off errors when adding the results of truncated integer arithmetic. 414 // We've already truncated old_evacuated_committed. Truncate young_advance_promoted_reserve_used here. 415 assert(young_advance_promoted_reserve_used <= (33 * (old_available - old_evacuated_committed)) / 32, 416 "Round-off errors should be less than 3.125%%, committed: " SIZE_FORMAT ", reserved: " SIZE_FORMAT, 417 young_advance_promoted_reserve_used, old_available - old_evacuated_committed); 418 young_advance_promoted_reserve_used = old_available - old_evacuated_committed; 419 old_consumed = old_evacuated_committed + young_advance_promoted_reserve_used; 420 } 421 422 assert(old_available >= old_consumed, "Cannot consume (" SIZE_FORMAT ") more than is available (" SIZE_FORMAT ")", 423 old_consumed, old_available); 424 size_t excess_old = old_available - old_consumed; 425 size_t unaffiliated_old_regions = old_generation->free_unaffiliated_regions(); 426 size_t unaffiliated_old = unaffiliated_old_regions * region_size_bytes; 427 assert(old_available >= unaffiliated_old, "Unaffiliated old is a subset of old available"); 428 429 // Make sure old_evac_committed is unaffiliated 430 if (old_evacuated_committed > 0) { 431 if (unaffiliated_old > old_evacuated_committed) { 432 size_t giveaway = unaffiliated_old - old_evacuated_committed; 433 size_t giveaway_regions = giveaway / region_size_bytes; // round down 434 if (giveaway_regions > 0) { 435 excess_old = MIN2(excess_old, giveaway_regions * region_size_bytes); 436 } else { 437 excess_old = 0; 438 } 439 } else { 440 excess_old = 0; 441 } 442 } 443 444 // If we find that OLD has excess regions, give them back to YOUNG now to reduce likelihood we run out of allocation 445 // runway during evacuation and update-refs. 446 size_t regions_to_xfer = 0; 447 if (excess_old > unaffiliated_old) { 448 // we can give back unaffiliated_old (all of unaffiliated is excess) 449 if (unaffiliated_old_regions > 0) { 450 regions_to_xfer = unaffiliated_old_regions; 451 } 452 } else if (unaffiliated_old_regions > 0) { 453 // excess_old < unaffiliated old: we can give back MIN(excess_old/region_size_bytes, unaffiliated_old_regions) 454 size_t excess_regions = excess_old / region_size_bytes; 455 regions_to_xfer = MIN2(excess_regions, unaffiliated_old_regions); 456 } 457 458 if (regions_to_xfer > 0) { 459 bool result = ShenandoahGenerationalHeap::cast(heap)->generation_sizer()->transfer_to_young(regions_to_xfer); 460 assert(excess_old >= regions_to_xfer * region_size_bytes, 461 "Cannot transfer (" SIZE_FORMAT ", " SIZE_FORMAT ") more than excess old (" SIZE_FORMAT ")", 462 regions_to_xfer, region_size_bytes, excess_old); 463 excess_old -= regions_to_xfer * region_size_bytes; 464 log_debug(gc, ergo)("%s transferred " SIZE_FORMAT " excess regions to young before start of evacuation", 465 result? "Successfully": "Unsuccessfully", regions_to_xfer); 466 } 467 468 // Add in the excess_old memory to hold unanticipated promotions, if any. If there are more unanticipated 469 // promotions than fit in reserved memory, they will be deferred until a future GC pass. 470 size_t total_promotion_reserve = young_advance_promoted_reserve_used + excess_old; 471 old_generation->set_promoted_reserve(total_promotion_reserve); 472 old_generation->reset_promoted_expended(); 473 } 474 475 typedef struct { 476 ShenandoahHeapRegion* _region; 477 size_t _live_data; 478 } AgedRegionData; 479 480 static int compare_by_aged_live(AgedRegionData a, AgedRegionData b) { 481 if (a._live_data < b._live_data) 482 return -1; 483 else if (a._live_data > b._live_data) 484 return 1; 485 else return 0; 486 } 487 488 inline void assert_no_in_place_promotions() { 489 #ifdef ASSERT 490 class ShenandoahNoInPlacePromotions : public ShenandoahHeapRegionClosure { 491 public: 492 void heap_region_do(ShenandoahHeapRegion *r) override { 493 assert(r->get_top_before_promote() == nullptr, 494 "Region " SIZE_FORMAT " should not be ready for in-place promotion", r->index()); 495 } 496 } cl; 497 ShenandoahHeap::heap()->heap_region_iterate(&cl); 498 #endif 499 } 500 501 // Preselect for inclusion into the collection set regions whose age is at or above tenure age which contain more than 502 // ShenandoahOldGarbageThreshold amounts of garbage. We identify these regions by setting the appropriate entry of 503 // the collection set's preselected regions array to true. All entries are initialized to false before calling this 504 // function. 505 // 506 // During the subsequent selection of the collection set, we give priority to these promotion set candidates. 507 // Without this prioritization, we found that the aged regions tend to be ignored because they typically have 508 // much less garbage and much more live data than the recently allocated "eden" regions. When aged regions are 509 // repeatedly excluded from the collection set, the amount of live memory within the young generation tends to 510 // accumulate and this has the undesirable side effect of causing young-generation collections to require much more 511 // CPU and wall-clock time. 512 // 513 // A second benefit of treating aged regions differently than other regions during collection set selection is 514 // that this allows us to more accurately budget memory to hold the results of evacuation. Memory for evacuation 515 // of aged regions must be reserved in the old generation. Memory for evacuation of all other regions must be 516 // reserved in the young generation. 517 size_t ShenandoahGeneration::select_aged_regions(size_t old_available) { 518 519 // There should be no regions configured for subsequent in-place-promotions carried over from the previous cycle. 520 assert_no_in_place_promotions(); 521 522 auto const heap = ShenandoahGenerationalHeap::heap(); 523 bool* const candidate_regions_for_promotion_by_copy = heap->collection_set()->preselected_regions(); 524 ShenandoahMarkingContext* const ctx = heap->marking_context(); 525 526 const uint tenuring_threshold = heap->age_census()->tenuring_threshold(); 527 const size_t old_garbage_threshold = (ShenandoahHeapRegion::region_size_bytes() * ShenandoahOldGarbageThreshold) / 100; 528 529 size_t old_consumed = 0; 530 size_t promo_potential = 0; 531 size_t candidates = 0; 532 533 // Tracks the padding of space above top in regions eligible for promotion in place 534 size_t promote_in_place_pad = 0; 535 536 // Sort the promotion-eligible regions in order of increasing live-data-bytes so that we can first reclaim regions that require 537 // less evacuation effort. This prioritizes garbage first, expanding the allocation pool early before we reclaim regions that 538 // have more live data. 539 const size_t num_regions = heap->num_regions(); 540 541 ResourceMark rm; 542 AgedRegionData* sorted_regions = NEW_RESOURCE_ARRAY(AgedRegionData, num_regions); 543 544 for (size_t i = 0; i < num_regions; i++) { 545 ShenandoahHeapRegion* const r = heap->get_region(i); 546 if (r->is_empty() || !r->has_live() || !r->is_young() || !r->is_regular()) { 547 // skip over regions that aren't regular young with some live data 548 continue; 549 } 550 if (r->age() >= tenuring_threshold) { 551 if ((r->garbage() < old_garbage_threshold)) { 552 // This tenure-worthy region has too little garbage, so we do not want to expend the copying effort to 553 // reclaim the garbage; instead this region may be eligible for promotion-in-place to the 554 // old generation. 555 HeapWord* tams = ctx->top_at_mark_start(r); 556 HeapWord* original_top = r->top(); 557 if (!heap->is_concurrent_old_mark_in_progress() && tams == original_top) { 558 // No allocations from this region have been made during concurrent mark. It meets all the criteria 559 // for in-place-promotion. Though we only need the value of top when we fill the end of the region, 560 // we use this field to indicate that this region should be promoted in place during the evacuation 561 // phase. 562 r->save_top_before_promote(); 563 564 size_t remnant_size = r->free() / HeapWordSize; 565 if (remnant_size > ShenandoahHeap::min_fill_size()) { 566 ShenandoahHeap::fill_with_object(original_top, remnant_size); 567 // Fill the remnant memory within this region to assure no allocations prior to promote in place. Otherwise, 568 // newly allocated objects will not be parsable when promote in place tries to register them. Furthermore, any 569 // new allocations would not necessarily be eligible for promotion. This addresses both issues. 570 r->set_top(r->end()); 571 promote_in_place_pad += remnant_size * HeapWordSize; 572 } else { 573 // Since the remnant is so small that it cannot be filled, we don't have to worry about any accidental 574 // allocations occurring within this region before the region is promoted in place. 575 } 576 } 577 // Else, we do not promote this region (either in place or by copy) because it has received new allocations. 578 579 // During evacuation, we exclude from promotion regions for which age > tenure threshold, garbage < garbage-threshold, 580 // and get_top_before_promote() != tams 581 } else { 582 // Record this promotion-eligible candidate region. After sorting and selecting the best candidates below, 583 // we may still decide to exclude this promotion-eligible region from the current collection set. If this 584 // happens, we will consider this region as part of the anticipated promotion potential for the next GC 585 // pass; see further below. 586 sorted_regions[candidates]._region = r; 587 sorted_regions[candidates++]._live_data = r->get_live_data_bytes(); 588 } 589 } else { 590 // We only evacuate & promote objects from regular regions whose garbage() is above old-garbage-threshold. 591 // Objects in tenure-worthy regions with less garbage are promoted in place. These take a different path to 592 // old-gen. Regions excluded from promotion because their garbage content is too low (causing us to anticipate that 593 // the region would be promoted in place) may be eligible for evacuation promotion by the time promotion takes 594 // place during a subsequent GC pass because more garbage is found within the region between now and then. This 595 // should not happen if we are properly adapting the tenure age. The theory behind adaptive tenuring threshold 596 // is to choose the youngest age that demonstrates no "significant" further loss of population since the previous 597 // age. If not this, we expect the tenure age to demonstrate linear population decay for at least two population 598 // samples, whereas we expect to observe exponential population decay for ages younger than the tenure age. 599 // 600 // In the case that certain regions which were anticipated to be promoted in place need to be promoted by 601 // evacuation, it may be the case that there is not sufficient reserve within old-gen to hold evacuation of 602 // these regions. The likely outcome is that these regions will not be selected for evacuation or promotion 603 // in the current cycle and we will anticipate that they will be promoted in the next cycle. This will cause 604 // us to reserve more old-gen memory so that these objects can be promoted in the subsequent cycle. 605 if (heap->is_aging_cycle() && (r->age() + 1 == tenuring_threshold)) { 606 if (r->garbage() >= old_garbage_threshold) { 607 promo_potential += r->get_live_data_bytes(); 608 } 609 } 610 } 611 // Note that we keep going even if one region is excluded from selection. 612 // Subsequent regions may be selected if they have smaller live data. 613 } 614 // Sort in increasing order according to live data bytes. Note that candidates represents the number of regions 615 // that qualify to be promoted by evacuation. 616 if (candidates > 0) { 617 size_t selected_regions = 0; 618 size_t selected_live = 0; 619 QuickSort::sort<AgedRegionData>(sorted_regions, candidates, compare_by_aged_live); 620 for (size_t i = 0; i < candidates; i++) { 621 ShenandoahHeapRegion* const region = sorted_regions[i]._region; 622 size_t region_live_data = sorted_regions[i]._live_data; 623 size_t promotion_need = (size_t) (region_live_data * ShenandoahPromoEvacWaste); 624 if (old_consumed + promotion_need <= old_available) { 625 old_consumed += promotion_need; 626 candidate_regions_for_promotion_by_copy[region->index()] = true; 627 selected_regions++; 628 selected_live += region_live_data; 629 } else { 630 // We rejected this promotable region from the collection set because we had no room to hold its copy. 631 // Add this region to promo potential for next GC. 632 promo_potential += region_live_data; 633 assert(!candidate_regions_for_promotion_by_copy[region->index()], "Shouldn't be selected"); 634 } 635 // We keep going even if one region is excluded from selection because we need to accumulate all eligible 636 // regions that are not preselected into promo_potential 637 } 638 log_debug(gc)("Preselected " SIZE_FORMAT " regions containing " SIZE_FORMAT " live bytes," 639 " consuming: " SIZE_FORMAT " of budgeted: " SIZE_FORMAT, 640 selected_regions, selected_live, old_consumed, old_available); 641 } 642 643 heap->old_generation()->set_pad_for_promote_in_place(promote_in_place_pad); 644 heap->old_generation()->set_promotion_potential(promo_potential); 645 return old_consumed; 646 } 647 648 void ShenandoahGeneration::prepare_regions_and_collection_set(bool concurrent) { 649 ShenandoahHeap* heap = ShenandoahHeap::heap(); 650 ShenandoahCollectionSet* collection_set = heap->collection_set(); 651 bool is_generational = heap->mode()->is_generational(); 652 653 assert(!heap->is_full_gc_in_progress(), "Only for concurrent and degenerated GC"); 654 assert(!is_old(), "Only YOUNG and GLOBAL GC perform evacuations"); 655 { 656 ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::final_update_region_states : 657 ShenandoahPhaseTimings::degen_gc_final_update_region_states); 658 ShenandoahFinalMarkUpdateRegionStateClosure cl(complete_marking_context()); 659 parallel_heap_region_iterate(&cl); 660 661 if (is_young()) { 662 // We always need to update the watermark for old regions. If there 663 // are mixed collections pending, we also need to synchronize the 664 // pinned status for old regions. Since we are already visiting every 665 // old region here, go ahead and sync the pin status too. 666 ShenandoahFinalMarkUpdateRegionStateClosure old_cl(nullptr); 667 heap->old_generation()->parallel_heap_region_iterate(&old_cl); 668 } 669 } 670 671 // Tally the census counts and compute the adaptive tenuring threshold 672 if (is_generational && ShenandoahGenerationalAdaptiveTenuring && !ShenandoahGenerationalCensusAtEvac) { 673 // Objects above TAMS weren't included in the age census. Since they were all 674 // allocated in this cycle they belong in the age 0 cohort. We walk over all 675 // young regions and sum the volume of objects between TAMS and top. 676 ShenandoahUpdateCensusZeroCohortClosure age0_cl(complete_marking_context()); 677 heap->young_generation()->heap_region_iterate(&age0_cl); 678 size_t age0_pop = age0_cl.get_age0_population(); 679 680 // Update the global census, including the missed age 0 cohort above, 681 // along with the census done during marking, and compute the tenuring threshold. 682 ShenandoahAgeCensus* census = ShenandoahGenerationalHeap::heap()->age_census(); 683 census->update_census(age0_pop); 684 #ifndef PRODUCT 685 size_t total_pop = age0_cl.get_total_population(); 686 size_t total_census = census->get_total(); 687 // Usually total_pop > total_census, but not by too much. 688 // We use integer division so anything up to just less than 2 is considered 689 // reasonable, and the "+1" is to avoid divide-by-zero. 690 assert((total_pop+1)/(total_census+1) == 1, "Extreme divergence: " 691 SIZE_FORMAT "/" SIZE_FORMAT, total_pop, total_census); 692 #endif 693 } 694 695 { 696 ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::choose_cset : 697 ShenandoahPhaseTimings::degen_gc_choose_cset); 698 699 collection_set->clear(); 700 ShenandoahHeapLocker locker(heap->lock()); 701 if (is_generational) { 702 // Seed the collection set with resource area-allocated 703 // preselected regions, which are removed when we exit this scope. 704 ShenandoahCollectionSetPreselector preselector(collection_set, heap->num_regions()); 705 706 // Find the amount that will be promoted, regions that will be promoted in 707 // place, and preselect older regions that will be promoted by evacuation. 708 compute_evacuation_budgets(heap); 709 710 // Choose the collection set, including the regions preselected above for 711 // promotion into the old generation. 712 _heuristics->choose_collection_set(collection_set); 713 if (!collection_set->is_empty()) { 714 // only make use of evacuation budgets when we are evacuating 715 adjust_evacuation_budgets(heap, collection_set); 716 } 717 718 if (is_global()) { 719 // We have just chosen a collection set for a global cycle. The mark bitmap covering old regions is complete, so 720 // the remembered set scan can use that to avoid walking into garbage. When the next old mark begins, we will 721 // use the mark bitmap to make the old regions parsable by coalescing and filling any unmarked objects. Thus, 722 // we prepare for old collections by remembering which regions are old at this time. Note that any objects 723 // promoted into old regions will be above TAMS, and so will be considered marked. However, free regions that 724 // become old after this point will not be covered correctly by the mark bitmap, so we must be careful not to 725 // coalesce those regions. Only the old regions which are not part of the collection set at this point are 726 // eligible for coalescing. As implemented now, this has the side effect of possibly initiating mixed-evacuations 727 // after a global cycle for old regions that were not included in this collection set. 728 heap->old_generation()->prepare_for_mixed_collections_after_global_gc(); 729 } 730 } else { 731 _heuristics->choose_collection_set(collection_set); 732 } 733 } 734 735 736 { 737 ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::final_rebuild_freeset : 738 ShenandoahPhaseTimings::degen_gc_final_rebuild_freeset); 739 ShenandoahHeapLocker locker(heap->lock()); 740 size_t young_cset_regions, old_cset_regions; 741 742 // We are preparing for evacuation. At this time, we ignore cset region tallies. 743 size_t first_old, last_old, num_old; 744 heap->free_set()->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old, last_old, num_old); 745 // Free set construction uses reserve quantities, because they are known to be valid here 746 heap->free_set()->finish_rebuild(young_cset_regions, old_cset_regions, num_old, true); 747 } 748 } 749 750 bool ShenandoahGeneration::is_bitmap_clear() { 751 ShenandoahHeap* heap = ShenandoahHeap::heap(); 752 ShenandoahMarkingContext* context = heap->marking_context(); 753 const size_t num_regions = heap->num_regions(); 754 for (size_t idx = 0; idx < num_regions; idx++) { 755 ShenandoahHeapRegion* r = heap->get_region(idx); 756 if (contains(r) && r->is_affiliated()) { 757 if (heap->is_bitmap_slice_committed(r) && (context->top_at_mark_start(r) > r->bottom()) && 758 !context->is_bitmap_clear_range(r->bottom(), r->end())) { 759 return false; 760 } 761 } 762 } 763 return true; 764 } 765 766 bool ShenandoahGeneration::is_mark_complete() { 767 return _is_marking_complete.is_set(); 768 } 769 770 void ShenandoahGeneration::set_mark_complete() { 771 _is_marking_complete.set(); 772 } 773 774 void ShenandoahGeneration::set_mark_incomplete() { 775 _is_marking_complete.unset(); 776 } 777 778 ShenandoahMarkingContext* ShenandoahGeneration::complete_marking_context() { 779 assert(is_mark_complete(), "Marking must be completed."); 780 return ShenandoahHeap::heap()->marking_context(); 781 } 782 783 void ShenandoahGeneration::cancel_marking() { 784 log_info(gc)("Cancel marking: %s", name()); 785 if (is_concurrent_mark_in_progress()) { 786 set_mark_incomplete(); 787 } 788 _task_queues->clear(); 789 ref_processor()->abandon_partial_discovery(); 790 set_concurrent_mark_in_progress(false); 791 } 792 793 ShenandoahGeneration::ShenandoahGeneration(ShenandoahGenerationType type, 794 uint max_workers, 795 size_t max_capacity, 796 size_t soft_max_capacity) : 797 _type(type), 798 _task_queues(new ShenandoahObjToScanQueueSet(max_workers)), 799 _ref_processor(new ShenandoahReferenceProcessor(MAX2(max_workers, 1U))), 800 _affiliated_region_count(0), _humongous_waste(0), _evacuation_reserve(0), 801 _used(0), _bytes_allocated_since_gc_start(0), 802 _max_capacity(max_capacity), _soft_max_capacity(soft_max_capacity), 803 _heuristics(nullptr) 804 { 805 _is_marking_complete.set(); 806 assert(max_workers > 0, "At least one queue"); 807 for (uint i = 0; i < max_workers; ++i) { 808 ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue(); 809 _task_queues->register_queue(i, task_queue); 810 } 811 } 812 813 ShenandoahGeneration::~ShenandoahGeneration() { 814 for (uint i = 0; i < _task_queues->size(); ++i) { 815 ShenandoahObjToScanQueue* q = _task_queues->queue(i); 816 delete q; 817 } 818 delete _task_queues; 819 } 820 821 void ShenandoahGeneration::reserve_task_queues(uint workers) { 822 _task_queues->reserve(workers); 823 } 824 825 ShenandoahObjToScanQueueSet* ShenandoahGeneration::old_gen_task_queues() const { 826 return nullptr; 827 } 828 829 void ShenandoahGeneration::scan_remembered_set(bool is_concurrent) { 830 assert(is_young(), "Should only scan remembered set for young generation."); 831 832 ShenandoahGenerationalHeap* const heap = ShenandoahGenerationalHeap::heap(); 833 uint nworkers = heap->workers()->active_workers(); 834 reserve_task_queues(nworkers); 835 836 ShenandoahReferenceProcessor* rp = ref_processor(); 837 ShenandoahRegionChunkIterator work_list(nworkers); 838 ShenandoahScanRememberedTask task(task_queues(), old_gen_task_queues(), rp, &work_list, is_concurrent); 839 heap->assert_gc_workers(nworkers); 840 heap->workers()->run_task(&task); 841 if (ShenandoahEnableCardStats) { 842 ShenandoahScanRemembered* scanner = heap->old_generation()->card_scan(); 843 assert(scanner != nullptr, "Not generational"); 844 scanner->log_card_stats(nworkers, CARD_STAT_SCAN_RS); 845 } 846 } 847 848 size_t ShenandoahGeneration::increment_affiliated_region_count() { 849 shenandoah_assert_heaplocked_or_safepoint(); 850 // During full gc, multiple GC worker threads may change region affiliations without a lock. No lock is enforced 851 // on read and write of _affiliated_region_count. At the end of full gc, a single thread overwrites the count with 852 // a coherent value. 853 _affiliated_region_count++; 854 return _affiliated_region_count; 855 } 856 857 size_t ShenandoahGeneration::decrement_affiliated_region_count() { 858 shenandoah_assert_heaplocked_or_safepoint(); 859 // During full gc, multiple GC worker threads may change region affiliations without a lock. No lock is enforced 860 // on read and write of _affiliated_region_count. At the end of full gc, a single thread overwrites the count with 861 // a coherent value. 862 _affiliated_region_count--; 863 assert(ShenandoahHeap::heap()->is_full_gc_in_progress() || 864 (_used + _humongous_waste <= _affiliated_region_count * ShenandoahHeapRegion::region_size_bytes()), 865 "used + humongous cannot exceed regions"); 866 return _affiliated_region_count; 867 } 868 869 size_t ShenandoahGeneration::increase_affiliated_region_count(size_t delta) { 870 shenandoah_assert_heaplocked_or_safepoint(); 871 _affiliated_region_count += delta; 872 return _affiliated_region_count; 873 } 874 875 size_t ShenandoahGeneration::decrease_affiliated_region_count(size_t delta) { 876 shenandoah_assert_heaplocked_or_safepoint(); 877 assert(_affiliated_region_count >= delta, "Affiliated region count cannot be negative"); 878 879 _affiliated_region_count -= delta; 880 assert(ShenandoahHeap::heap()->is_full_gc_in_progress() || 881 (_used + _humongous_waste <= _affiliated_region_count * ShenandoahHeapRegion::region_size_bytes()), 882 "used + humongous cannot exceed regions"); 883 return _affiliated_region_count; 884 } 885 886 void ShenandoahGeneration::establish_usage(size_t num_regions, size_t num_bytes, size_t humongous_waste) { 887 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at a safepoint"); 888 _affiliated_region_count = num_regions; 889 _used = num_bytes; 890 _humongous_waste = humongous_waste; 891 } 892 893 void ShenandoahGeneration::increase_used(size_t bytes) { 894 Atomic::add(&_used, bytes); 895 } 896 897 void ShenandoahGeneration::increase_humongous_waste(size_t bytes) { 898 if (bytes > 0) { 899 Atomic::add(&_humongous_waste, bytes); 900 } 901 } 902 903 void ShenandoahGeneration::decrease_humongous_waste(size_t bytes) { 904 if (bytes > 0) { 905 assert(ShenandoahHeap::heap()->is_full_gc_in_progress() || (_humongous_waste >= bytes), 906 "Waste (" SIZE_FORMAT ") cannot be negative (after subtracting " SIZE_FORMAT ")", _humongous_waste, bytes); 907 Atomic::sub(&_humongous_waste, bytes); 908 } 909 } 910 911 void ShenandoahGeneration::decrease_used(size_t bytes) { 912 assert(ShenandoahHeap::heap()->is_full_gc_in_progress() || 913 (_used >= bytes), "cannot reduce bytes used by generation below zero"); 914 Atomic::sub(&_used, bytes); 915 } 916 917 size_t ShenandoahGeneration::used_regions() const { 918 return _affiliated_region_count; 919 } 920 921 size_t ShenandoahGeneration::free_unaffiliated_regions() const { 922 size_t result = max_capacity() / ShenandoahHeapRegion::region_size_bytes(); 923 if (_affiliated_region_count > result) { 924 result = 0; 925 } else { 926 result -= _affiliated_region_count; 927 } 928 return result; 929 } 930 931 size_t ShenandoahGeneration::used_regions_size() const { 932 return _affiliated_region_count * ShenandoahHeapRegion::region_size_bytes(); 933 } 934 935 size_t ShenandoahGeneration::available() const { 936 return available(max_capacity()); 937 } 938 939 // For ShenandoahYoungGeneration, Include the young available that may have been reserved for the Collector. 940 size_t ShenandoahGeneration::available_with_reserve() const { 941 return available(max_capacity()); 942 } 943 944 size_t ShenandoahGeneration::soft_available() const { 945 return available(soft_max_capacity()); 946 } 947 948 size_t ShenandoahGeneration::available(size_t capacity) const { 949 size_t in_use = used() + get_humongous_waste(); 950 return in_use > capacity ? 0 : capacity - in_use; 951 } 952 953 size_t ShenandoahGeneration::increase_capacity(size_t increment) { 954 shenandoah_assert_heaplocked_or_safepoint(); 955 956 // We do not enforce that new capacity >= heap->max_size_for(this). The maximum generation size is treated as a rule of thumb 957 // which may be violated during certain transitions, such as when we are forcing transfers for the purpose of promoting regions 958 // in place. 959 assert(ShenandoahHeap::heap()->is_full_gc_in_progress() || 960 (_max_capacity + increment <= ShenandoahHeap::heap()->max_capacity()), "Generation cannot be larger than heap size"); 961 assert(increment % ShenandoahHeapRegion::region_size_bytes() == 0, "Generation capacity must be multiple of region size"); 962 _max_capacity += increment; 963 964 // This detects arithmetic wraparound on _used 965 assert(ShenandoahHeap::heap()->is_full_gc_in_progress() || 966 (_affiliated_region_count * ShenandoahHeapRegion::region_size_bytes() >= _used), 967 "Affiliated regions must hold more than what is currently used"); 968 return _max_capacity; 969 } 970 971 size_t ShenandoahGeneration::set_capacity(size_t byte_size) { 972 shenandoah_assert_heaplocked_or_safepoint(); 973 _max_capacity = byte_size; 974 return _max_capacity; 975 } 976 977 size_t ShenandoahGeneration::decrease_capacity(size_t decrement) { 978 shenandoah_assert_heaplocked_or_safepoint(); 979 980 // We do not enforce that new capacity >= heap->min_size_for(this). The minimum generation size is treated as a rule of thumb 981 // which may be violated during certain transitions, such as when we are forcing transfers for the purpose of promoting regions 982 // in place. 983 assert(decrement % ShenandoahHeapRegion::region_size_bytes() == 0, "Generation capacity must be multiple of region size"); 984 assert(_max_capacity >= decrement, "Generation capacity cannot be negative"); 985 986 _max_capacity -= decrement; 987 988 // This detects arithmetic wraparound on _used 989 assert(ShenandoahHeap::heap()->is_full_gc_in_progress() || 990 (_affiliated_region_count * ShenandoahHeapRegion::region_size_bytes() >= _used), 991 "Affiliated regions must hold more than what is currently used"); 992 assert(ShenandoahHeap::heap()->is_full_gc_in_progress() || 993 (_used <= _max_capacity), "Cannot use more than capacity"); 994 assert(ShenandoahHeap::heap()->is_full_gc_in_progress() || 995 (_affiliated_region_count * ShenandoahHeapRegion::region_size_bytes() <= _max_capacity), 996 "Cannot use more than capacity"); 997 return _max_capacity; 998 } 999 1000 void ShenandoahGeneration::record_success_concurrent(bool abbreviated) { 1001 heuristics()->record_success_concurrent(); 1002 ShenandoahHeap::heap()->shenandoah_policy()->record_success_concurrent(is_young(), abbreviated); 1003 }