1 /*
   2  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
   3  * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
  27 #include "gc/shenandoah/shenandoahCollectionSetPreselector.hpp"
  28 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  29 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  30 #include "gc/shenandoah/shenandoahGeneration.hpp"
  31 #include "gc/shenandoah/shenandoahGenerationalHeap.inline.hpp"
  32 #include "gc/shenandoah/shenandoahHeapRegionClosures.hpp"
  33 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
  34 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
  35 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
  36 #include "gc/shenandoah/shenandoahTaskqueue.inline.hpp"
  37 #include "gc/shenandoah/shenandoahUtils.hpp"
  38 #include "gc/shenandoah/shenandoahVerifier.hpp"
  39 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
  40 #include "utilities/quickSort.hpp"
  41 
  42 template <bool PREPARE_FOR_CURRENT_CYCLE, bool FULL_GC = false>
  43 class ShenandoahResetBitmapClosure final : public ShenandoahHeapRegionClosure {
  44 private:
  45   ShenandoahHeap*           _heap;
  46   ShenandoahMarkingContext* _ctx;
  47 
  48 public:
  49   explicit ShenandoahResetBitmapClosure() :
  50     ShenandoahHeapRegionClosure(), _heap(ShenandoahHeap::heap()), _ctx(_heap->marking_context()) {}
  51 
  52   void heap_region_do(ShenandoahHeapRegion* region) override {
  53     assert(!_heap->is_uncommit_in_progress(), "Cannot uncommit bitmaps while resetting them.");
  54     if (PREPARE_FOR_CURRENT_CYCLE) {
  55       if (region->need_bitmap_reset() && _heap->is_bitmap_slice_committed(region)) {
  56         _ctx->clear_bitmap(region);
  57       } else {
  58         region->set_needs_bitmap_reset();
  59       }
  60       // Capture Top At Mark Start for this generation.
  61       if (FULL_GC || region->is_active()) {
  62         // Reset live data and set TAMS optimistically. We would recheck these under the pause
  63         // anyway to capture any updates that happened since now.
  64         _ctx->capture_top_at_mark_start(region);
  65         region->clear_live_data();
  66       }
  67     } else {
  68       if (_heap->is_bitmap_slice_committed(region)) {
  69         _ctx->clear_bitmap(region);
  70         region->unset_needs_bitmap_reset();
  71       } else {
  72         region->set_needs_bitmap_reset();
  73       }
  74     }
  75   }
  76 
  77   bool is_thread_safe() override { return true; }
  78 };
  79 
  80 // Copy the write-version of the card-table into the read-version, clearing the
  81 // write-copy.
  82 class ShenandoahMergeWriteTable: public ShenandoahHeapRegionClosure {
  83 private:
  84   ShenandoahScanRemembered* _scanner;
  85 public:
  86   ShenandoahMergeWriteTable(ShenandoahScanRemembered* scanner) : _scanner(scanner) {}
  87 
  88   void heap_region_do(ShenandoahHeapRegion* r) override {
  89     assert(r->is_old(), "Don't waste time doing this for non-old regions");
  90     _scanner->merge_write_table(r->bottom(), ShenandoahHeapRegion::region_size_words());
  91   }
  92 
  93   bool is_thread_safe() override {
  94     return true;
  95   }
  96 };
  97 
  98 // Add [TAMS, top) volume over young regions. Used to correct age 0 cohort census
  99 // for adaptive tenuring when census is taken during marking.
 100 // In non-product builds, for the purposes of verification, we also collect the total
 101 // live objects in young regions as well.
 102 class ShenandoahUpdateCensusZeroCohortClosure : public ShenandoahHeapRegionClosure {
 103 private:
 104   ShenandoahMarkingContext* const _ctx;
 105   // Population size units are words (not bytes)
 106   size_t _age0_pop;                // running tally of age0 population size
 107   size_t _total_pop;               // total live population size
 108 public:
 109   explicit ShenandoahUpdateCensusZeroCohortClosure(ShenandoahMarkingContext* ctx)
 110     : _ctx(ctx), _age0_pop(0), _total_pop(0) {}
 111 
 112   void heap_region_do(ShenandoahHeapRegion* r) override {
 113     if (_ctx != nullptr && r->is_active()) {
 114       assert(r->is_young(), "Young regions only");
 115       HeapWord* tams = _ctx->top_at_mark_start(r);
 116       HeapWord* top  = r->top();
 117       if (top > tams) {
 118         _age0_pop += pointer_delta(top, tams);
 119       }
 120       // TODO: check significance of _ctx != nullptr above, can that
 121       // spoof _total_pop in some corner cases?
 122       NOT_PRODUCT(_total_pop += r->get_live_data_words();)
 123     }
 124   }
 125 
 126   size_t get_age0_population()  const { return _age0_pop; }
 127   size_t get_total_population() const { return _total_pop; }
 128 };
 129 
 130 void ShenandoahGeneration::confirm_heuristics_mode() {
 131   if (_heuristics->is_diagnostic() && !UnlockDiagnosticVMOptions) {
 132     vm_exit_during_initialization(
 133             err_msg("Heuristics \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
 134                     _heuristics->name()));
 135   }
 136   if (_heuristics->is_experimental() && !UnlockExperimentalVMOptions) {
 137     vm_exit_during_initialization(
 138             err_msg("Heuristics \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
 139                     _heuristics->name()));
 140   }
 141 }
 142 
 143 ShenandoahHeuristics* ShenandoahGeneration::initialize_heuristics(ShenandoahMode* gc_mode) {
 144   _heuristics = gc_mode->initialize_heuristics(this);
 145   _heuristics->set_guaranteed_gc_interval(ShenandoahGuaranteedGCInterval);
 146   confirm_heuristics_mode();
 147   return _heuristics;
 148 }
 149 
 150 size_t ShenandoahGeneration::bytes_allocated_since_gc_start() const {
 151   return Atomic::load(&_bytes_allocated_since_gc_start);
 152 }
 153 
 154 void ShenandoahGeneration::reset_bytes_allocated_since_gc_start() {
 155   Atomic::store(&_bytes_allocated_since_gc_start, (size_t)0);
 156 }
 157 
 158 void ShenandoahGeneration::increase_allocated(size_t bytes) {
 159   Atomic::add(&_bytes_allocated_since_gc_start, bytes, memory_order_relaxed);
 160 }
 161 
 162 void ShenandoahGeneration::set_evacuation_reserve(size_t new_val) {
 163   _evacuation_reserve = new_val;
 164 }
 165 
 166 size_t ShenandoahGeneration::get_evacuation_reserve() const {
 167   return _evacuation_reserve;
 168 }
 169 
 170 void ShenandoahGeneration::augment_evacuation_reserve(size_t increment) {
 171   _evacuation_reserve += increment;
 172 }
 173 
 174 void ShenandoahGeneration::log_status(const char *msg) const {
 175   typedef LogTarget(Info, gc, ergo) LogGcInfo;
 176 
 177   if (!LogGcInfo::is_enabled()) {
 178     return;
 179   }
 180 
 181   // Not under a lock here, so read each of these once to make sure
 182   // byte size in proper unit and proper unit for byte size are consistent.
 183   const size_t v_used = used();
 184   const size_t v_used_regions = used_regions_size();
 185   const size_t v_soft_max_capacity = soft_max_capacity();
 186   const size_t v_max_capacity = max_capacity();
 187   const size_t v_available = available();
 188   const size_t v_humongous_waste = get_humongous_waste();
 189 
 190   const LogGcInfo target;
 191   LogStream ls(target);
 192   ls.print("%s: ", msg);
 193   if (_type != NON_GEN) {
 194     ls.print("%s generation ", name());
 195   }
 196 
 197   ls.print_cr("used: " PROPERFMT ", used regions: " PROPERFMT ", humongous waste: " PROPERFMT
 198               ", soft capacity: " PROPERFMT ", max capacity: " PROPERFMT ", available: " PROPERFMT,
 199               PROPERFMTARGS(v_used), PROPERFMTARGS(v_used_regions), PROPERFMTARGS(v_humongous_waste),
 200               PROPERFMTARGS(v_soft_max_capacity), PROPERFMTARGS(v_max_capacity), PROPERFMTARGS(v_available));
 201 }
 202 
 203 template <bool PREPARE_FOR_CURRENT_CYCLE, bool FULL_GC>
 204 void ShenandoahGeneration::reset_mark_bitmap() {
 205   ShenandoahHeap* heap = ShenandoahHeap::heap();
 206   heap->assert_gc_workers(heap->workers()->active_workers());
 207 
 208   set_mark_incomplete();
 209 
 210   ShenandoahResetBitmapClosure<PREPARE_FOR_CURRENT_CYCLE, FULL_GC> closure;
 211   parallel_heap_region_iterate_free(&closure);
 212 }
 213 // Explicit specializations
 214 template void ShenandoahGeneration::reset_mark_bitmap<true, false>();
 215 template void ShenandoahGeneration::reset_mark_bitmap<true, true>();
 216 template void ShenandoahGeneration::reset_mark_bitmap<false, false>();
 217 
 218 // Swap the read and write card table pointers prior to the next remset scan.
 219 // This avoids the need to synchronize reads of the table by the GC workers
 220 // doing remset scanning, on the one hand, with the dirtying of the table by
 221 // mutators on the other.
 222 void ShenandoahGeneration::swap_card_tables() {
 223   // Must be sure that marking is complete before we swap remembered set.
 224   ShenandoahGenerationalHeap* heap = ShenandoahGenerationalHeap::heap();
 225   heap->assert_gc_workers(heap->workers()->active_workers());
 226   shenandoah_assert_safepoint();
 227 
 228   ShenandoahOldGeneration* old_generation = heap->old_generation();
 229   old_generation->card_scan()->swap_card_tables();
 230 }
 231 
 232 // Copy the write-version of the card-table into the read-version, clearing the
 233 // write-version. The work is done at a safepoint and in parallel by the GC
 234 // worker threads.
 235 void ShenandoahGeneration::merge_write_table() {
 236   // This should only happen for degenerated cycles
 237   ShenandoahGenerationalHeap* heap = ShenandoahGenerationalHeap::heap();
 238   heap->assert_gc_workers(heap->workers()->active_workers());
 239   shenandoah_assert_safepoint();
 240 
 241   ShenandoahOldGeneration* old_generation = heap->old_generation();
 242   ShenandoahMergeWriteTable task(old_generation->card_scan());
 243   old_generation->parallel_heap_region_iterate(&task);
 244 }
 245 
 246 void ShenandoahGeneration::prepare_gc() {
 247   reset_mark_bitmap<true>();
 248 }
 249 
 250 void ShenandoahGeneration::parallel_heap_region_iterate_free(ShenandoahHeapRegionClosure* cl) {
 251   ShenandoahHeap::heap()->parallel_heap_region_iterate(cl);
 252 }
 253 
 254 void ShenandoahGeneration::compute_evacuation_budgets(ShenandoahHeap* const heap) {
 255   shenandoah_assert_generational();
 256 
 257   ShenandoahOldGeneration* const old_generation = heap->old_generation();
 258   ShenandoahYoungGeneration* const young_generation = heap->young_generation();
 259 
 260   // During initialization and phase changes, it is more likely that fewer objects die young and old-gen
 261   // memory is not yet full (or is in the process of being replaced).  During these times especially, it
 262   // is beneficial to loan memory from old-gen to young-gen during the evacuation and update-refs phases
 263   // of execution.
 264 
 265   // Calculate EvacuationReserve before PromotionReserve.  Evacuation is more critical than promotion.
 266   // If we cannot evacuate old-gen, we will not be able to reclaim old-gen memory.  Promotions are less
 267   // critical.  If we cannot promote, there may be degradation of young-gen memory because old objects
 268   // accumulate there until they can be promoted.  This increases the young-gen marking and evacuation work.
 269 
 270   // First priority is to reclaim the easy garbage out of young-gen.
 271 
 272   // maximum_young_evacuation_reserve is upper bound on memory to be evacuated out of young
 273   const size_t maximum_young_evacuation_reserve = (young_generation->max_capacity() * ShenandoahEvacReserve) / 100;
 274   const size_t young_evacuation_reserve = MIN2(maximum_young_evacuation_reserve, young_generation->available_with_reserve());
 275 
 276   // maximum_old_evacuation_reserve is an upper bound on memory evacuated from old and evacuated to old (promoted),
 277   // clamped by the old generation space available.
 278   //
 279   // Here's the algebra.
 280   // Let SOEP = ShenandoahOldEvacRatioPercent,
 281   //     OE = old evac,
 282   //     YE = young evac, and
 283   //     TE = total evac = OE + YE
 284   // By definition:
 285   //            SOEP/100 = OE/TE
 286   //                     = OE/(OE+YE)
 287   //  => SOEP/(100-SOEP) = OE/((OE+YE)-OE)         // componendo-dividendo: If a/b = c/d, then a/(b-a) = c/(d-c)
 288   //                     = OE/YE
 289   //  =>              OE = YE*SOEP/(100-SOEP)
 290 
 291   // We have to be careful in the event that SOEP is set to 100 by the user.
 292   assert(ShenandoahOldEvacRatioPercent <= 100, "Error");
 293   const size_t old_available = old_generation->available();
 294   const size_t maximum_old_evacuation_reserve = (ShenandoahOldEvacRatioPercent == 100) ?
 295     old_available : MIN2((maximum_young_evacuation_reserve * ShenandoahOldEvacRatioPercent) / (100 - ShenandoahOldEvacRatioPercent),
 296                           old_available);
 297 
 298 
 299   // Second priority is to reclaim garbage out of old-gen if there are old-gen collection candidates.  Third priority
 300   // is to promote as much as we have room to promote.  However, if old-gen memory is in short supply, this means young
 301   // GC is operating under "duress" and was unable to transfer the memory that we would normally expect.  In this case,
 302   // old-gen will refrain from compacting itself in order to allow a quicker young-gen cycle (by avoiding the update-refs
 303   // through ALL of old-gen).  If there is some memory available in old-gen, we will use this for promotions as promotions
 304   // do not add to the update-refs burden of GC.
 305 
 306   size_t old_evacuation_reserve, old_promo_reserve;
 307   if (is_global()) {
 308     // Global GC is typically triggered by user invocation of System.gc(), and typically indicates that there is lots
 309     // of garbage to be reclaimed because we are starting a new phase of execution.  Marking for global GC may take
 310     // significantly longer than typical young marking because we must mark through all old objects.  To expedite
 311     // evacuation and update-refs, we give emphasis to reclaiming garbage first, wherever that garbage is found.
 312     // Global GC will adjust generation sizes to accommodate the collection set it chooses.
 313 
 314     // Set old_promo_reserve to enforce that no regions are preselected for promotion.  Such regions typically
 315     // have relatively high memory utilization.  We still call select_aged_regions() because this will prepare for
 316     // promotions in place, if relevant.
 317     old_promo_reserve = 0;
 318 
 319     // Dedicate all available old memory to old_evacuation reserve.  This may be small, because old-gen is only
 320     // expanded based on an existing mixed evacuation workload at the end of the previous GC cycle.  We'll expand
 321     // the budget for evacuation of old during GLOBAL cset selection.
 322     old_evacuation_reserve = maximum_old_evacuation_reserve;
 323   } else if (old_generation->has_unprocessed_collection_candidates()) {
 324     // We reserved all old-gen memory at end of previous GC to hold anticipated evacuations to old-gen.  If this is
 325     // mixed evacuation, reserve all of this memory for compaction of old-gen and do not promote.  Prioritize compaction
 326     // over promotion in order to defragment OLD so that it will be better prepared to efficiently receive promoted memory.
 327     old_evacuation_reserve = maximum_old_evacuation_reserve;
 328     old_promo_reserve = 0;
 329   } else {
 330     // Make all old-evacuation memory for promotion, but if we can't use it all for promotion, we'll allow some evacuation.
 331     old_evacuation_reserve = 0;
 332     old_promo_reserve = maximum_old_evacuation_reserve;
 333   }
 334   assert(old_evacuation_reserve <= old_available, "Error");
 335 
 336   // We see too many old-evacuation failures if we force ourselves to evacuate into regions that are not initially empty.
 337   // So we limit the old-evacuation reserve to unfragmented memory.  Even so, old-evacuation is free to fill in nooks and
 338   // crannies within existing partially used regions and it generally tries to do so.
 339   const size_t old_free_unfragmented = old_generation->free_unaffiliated_regions() * ShenandoahHeapRegion::region_size_bytes();
 340   if (old_evacuation_reserve > old_free_unfragmented) {
 341     const size_t delta = old_evacuation_reserve - old_free_unfragmented;
 342     old_evacuation_reserve -= delta;
 343     // Let promo consume fragments of old-gen memory if not global
 344     if (!is_global()) {
 345       old_promo_reserve += delta;
 346     }
 347   }
 348 
 349   // Preselect regions for promotion by evacuation (obtaining the live data to seed promoted_reserve),
 350   // and identify regions that will promote in place. These use the tenuring threshold.
 351   const size_t consumed_by_advance_promotion = select_aged_regions(old_promo_reserve);
 352   assert(consumed_by_advance_promotion <= maximum_old_evacuation_reserve, "Cannot promote more than available old-gen memory");
 353 
 354   // Note that unused old_promo_reserve might not be entirely consumed_by_advance_promotion.  Do not transfer this
 355   // to old_evacuation_reserve because this memory is likely very fragmented, and we do not want to increase the likelihood
 356   // of old evacuation failure.
 357   young_generation->set_evacuation_reserve(young_evacuation_reserve);
 358   old_generation->set_evacuation_reserve(old_evacuation_reserve);
 359   old_generation->set_promoted_reserve(consumed_by_advance_promotion);
 360 
 361   // There is no need to expand OLD because all memory used here was set aside at end of previous GC, except in the
 362   // case of a GLOBAL gc.  During choose_collection_set() of GLOBAL, old will be expanded on demand.
 363 }
 364 
 365 // Having chosen the collection set, adjust the budgets for generational mode based on its composition.  Note
 366 // that young_generation->available() now knows about recently discovered immediate garbage.
 367 //
 368 void ShenandoahGeneration::adjust_evacuation_budgets(ShenandoahHeap* const heap, ShenandoahCollectionSet* const collection_set) {
 369   shenandoah_assert_generational();
 370   // We may find that old_evacuation_reserve and/or loaned_for_young_evacuation are not fully consumed, in which case we may
 371   //  be able to increase regions_available_to_loan
 372 
 373   // The role of adjust_evacuation_budgets() is to compute the correct value of regions_available_to_loan and to make
 374   // effective use of this memory, including the remnant memory within these regions that may result from rounding loan to
 375   // integral number of regions.  Excess memory that is available to be loaned is applied to an allocation supplement,
 376   // which allows mutators to allocate memory beyond the current capacity of young-gen on the promise that the loan
 377   // will be repaid as soon as we finish updating references for the recently evacuated collection set.
 378 
 379   // We cannot recalculate regions_available_to_loan by simply dividing old_generation->available() by region_size_bytes
 380   // because the available memory may be distributed between many partially occupied regions that are already holding old-gen
 381   // objects.  Memory in partially occupied regions is not "available" to be loaned.  Note that an increase in old-gen
 382   // available that results from a decrease in memory consumed by old evacuation is not necessarily available to be loaned
 383   // to young-gen.
 384 
 385   size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
 386   ShenandoahOldGeneration* const old_generation = heap->old_generation();
 387   ShenandoahYoungGeneration* const young_generation = heap->young_generation();
 388 
 389   size_t old_evacuated = collection_set->get_old_bytes_reserved_for_evacuation();
 390   size_t old_evacuated_committed = (size_t) (ShenandoahOldEvacWaste * double(old_evacuated));
 391   size_t old_evacuation_reserve = old_generation->get_evacuation_reserve();
 392 
 393   if (old_evacuated_committed > old_evacuation_reserve) {
 394     // This should only happen due to round-off errors when enforcing ShenandoahOldEvacWaste
 395     assert(old_evacuated_committed <= (33 * old_evacuation_reserve) / 32,
 396            "Round-off errors should be less than 3.125%%, committed: %zu, reserved: %zu",
 397            old_evacuated_committed, old_evacuation_reserve);
 398     old_evacuated_committed = old_evacuation_reserve;
 399     // Leave old_evac_reserve as previously configured
 400   } else if (old_evacuated_committed < old_evacuation_reserve) {
 401     // This happens if the old-gen collection consumes less than full budget.
 402     old_evacuation_reserve = old_evacuated_committed;
 403     old_generation->set_evacuation_reserve(old_evacuation_reserve);
 404   }
 405 
 406   size_t young_advance_promoted = collection_set->get_young_bytes_to_be_promoted();
 407   size_t young_advance_promoted_reserve_used = (size_t) (ShenandoahPromoEvacWaste * double(young_advance_promoted));
 408 
 409   size_t young_evacuated = collection_set->get_young_bytes_reserved_for_evacuation();
 410   size_t young_evacuated_reserve_used = (size_t) (ShenandoahEvacWaste * double(young_evacuated));
 411 
 412   size_t total_young_available = young_generation->available_with_reserve();
 413   assert(young_evacuated_reserve_used <= total_young_available, "Cannot evacuate more than is available in young");
 414   young_generation->set_evacuation_reserve(young_evacuated_reserve_used);
 415 
 416   size_t old_available = old_generation->available();
 417   // Now that we've established the collection set, we know how much memory is really required by old-gen for evacuation
 418   // and promotion reserves.  Try shrinking OLD now in case that gives us a bit more runway for mutator allocations during
 419   // evac and update phases.
 420   size_t old_consumed = old_evacuated_committed + young_advance_promoted_reserve_used;
 421 
 422   if (old_available < old_consumed) {
 423     // This can happen due to round-off errors when adding the results of truncated integer arithmetic.
 424     // We've already truncated old_evacuated_committed.  Truncate young_advance_promoted_reserve_used here.
 425     assert(young_advance_promoted_reserve_used <= (33 * (old_available - old_evacuated_committed)) / 32,
 426            "Round-off errors should be less than 3.125%%, committed: %zu, reserved: %zu",
 427            young_advance_promoted_reserve_used, old_available - old_evacuated_committed);
 428     young_advance_promoted_reserve_used = old_available - old_evacuated_committed;
 429     old_consumed = old_evacuated_committed + young_advance_promoted_reserve_used;
 430   }
 431 
 432   assert(old_available >= old_consumed, "Cannot consume (%zu) more than is available (%zu)",
 433          old_consumed, old_available);
 434   size_t excess_old = old_available - old_consumed;
 435   size_t unaffiliated_old_regions = old_generation->free_unaffiliated_regions();
 436   size_t unaffiliated_old = unaffiliated_old_regions * region_size_bytes;
 437   assert(old_available >= unaffiliated_old, "Unaffiliated old is a subset of old available");
 438 
 439   // Make sure old_evac_committed is unaffiliated
 440   if (old_evacuated_committed > 0) {
 441     if (unaffiliated_old > old_evacuated_committed) {
 442       size_t giveaway = unaffiliated_old - old_evacuated_committed;
 443       size_t giveaway_regions = giveaway / region_size_bytes;  // round down
 444       if (giveaway_regions > 0) {
 445         excess_old = MIN2(excess_old, giveaway_regions * region_size_bytes);
 446       } else {
 447         excess_old = 0;
 448       }
 449     } else {
 450       excess_old = 0;
 451     }
 452   }
 453 
 454   // If we find that OLD has excess regions, give them back to YOUNG now to reduce likelihood we run out of allocation
 455   // runway during evacuation and update-refs.
 456   size_t regions_to_xfer = 0;
 457   if (excess_old > unaffiliated_old) {
 458     // we can give back unaffiliated_old (all of unaffiliated is excess)
 459     if (unaffiliated_old_regions > 0) {
 460       regions_to_xfer = unaffiliated_old_regions;
 461     }
 462   } else if (unaffiliated_old_regions > 0) {
 463     // excess_old < unaffiliated old: we can give back MIN(excess_old/region_size_bytes, unaffiliated_old_regions)
 464     size_t excess_regions = excess_old / region_size_bytes;
 465     regions_to_xfer = MIN2(excess_regions, unaffiliated_old_regions);
 466   }
 467 
 468   if (regions_to_xfer > 0) {
 469     bool result = ShenandoahGenerationalHeap::cast(heap)->generation_sizer()->transfer_to_young(regions_to_xfer);
 470     assert(excess_old >= regions_to_xfer * region_size_bytes,
 471            "Cannot transfer (%zu, %zu) more than excess old (%zu)",
 472            regions_to_xfer, region_size_bytes, excess_old);
 473     excess_old -= regions_to_xfer * region_size_bytes;
 474     log_debug(gc, ergo)("%s transferred %zu excess regions to young before start of evacuation",
 475                        result? "Successfully": "Unsuccessfully", regions_to_xfer);
 476   }
 477 
 478   // Add in the excess_old memory to hold unanticipated promotions, if any.  If there are more unanticipated
 479   // promotions than fit in reserved memory, they will be deferred until a future GC pass.
 480   size_t total_promotion_reserve = young_advance_promoted_reserve_used + excess_old;
 481   old_generation->set_promoted_reserve(total_promotion_reserve);
 482   old_generation->reset_promoted_expended();
 483 }
 484 
 485 typedef struct {
 486   ShenandoahHeapRegion* _region;
 487   size_t _live_data;
 488 } AgedRegionData;
 489 
 490 static int compare_by_aged_live(AgedRegionData a, AgedRegionData b) {
 491   if (a._live_data < b._live_data)
 492     return -1;
 493   else if (a._live_data > b._live_data)
 494     return 1;
 495   else return 0;
 496 }
 497 
 498 inline void assert_no_in_place_promotions() {
 499 #ifdef ASSERT
 500   class ShenandoahNoInPlacePromotions : public ShenandoahHeapRegionClosure {
 501   public:
 502     void heap_region_do(ShenandoahHeapRegion *r) override {
 503       assert(r->get_top_before_promote() == nullptr,
 504              "Region %zu should not be ready for in-place promotion", r->index());
 505     }
 506   } cl;
 507   ShenandoahHeap::heap()->heap_region_iterate(&cl);
 508 #endif
 509 }
 510 
 511 // Preselect for inclusion into the collection set regions whose age is at or above tenure age which contain more than
 512 // ShenandoahOldGarbageThreshold amounts of garbage.  We identify these regions by setting the appropriate entry of
 513 // the collection set's preselected regions array to true.  All entries are initialized to false before calling this
 514 // function.
 515 //
 516 // During the subsequent selection of the collection set, we give priority to these promotion set candidates.
 517 // Without this prioritization, we found that the aged regions tend to be ignored because they typically have
 518 // much less garbage and much more live data than the recently allocated "eden" regions.  When aged regions are
 519 // repeatedly excluded from the collection set, the amount of live memory within the young generation tends to
 520 // accumulate and this has the undesirable side effect of causing young-generation collections to require much more
 521 // CPU and wall-clock time.
 522 //
 523 // A second benefit of treating aged regions differently than other regions during collection set selection is
 524 // that this allows us to more accurately budget memory to hold the results of evacuation.  Memory for evacuation
 525 // of aged regions must be reserved in the old generation.  Memory for evacuation of all other regions must be
 526 // reserved in the young generation.
 527 size_t ShenandoahGeneration::select_aged_regions(size_t old_available) {
 528 
 529   // There should be no regions configured for subsequent in-place-promotions carried over from the previous cycle.
 530   assert_no_in_place_promotions();
 531 
 532   auto const heap = ShenandoahGenerationalHeap::heap();
 533   bool* const candidate_regions_for_promotion_by_copy = heap->collection_set()->preselected_regions();
 534   ShenandoahMarkingContext* const ctx = heap->marking_context();
 535 
 536   const size_t old_garbage_threshold = (ShenandoahHeapRegion::region_size_bytes() * ShenandoahOldGarbageThreshold) / 100;
 537 
 538   size_t old_consumed = 0;
 539   size_t promo_potential = 0;
 540   size_t candidates = 0;
 541 
 542   // Tracks the padding of space above top in regions eligible for promotion in place
 543   size_t promote_in_place_pad = 0;
 544 
 545   // Sort the promotion-eligible regions in order of increasing live-data-bytes so that we can first reclaim regions that require
 546   // less evacuation effort.  This prioritizes garbage first, expanding the allocation pool early before we reclaim regions that
 547   // have more live data.
 548   const size_t num_regions = heap->num_regions();
 549 
 550   ResourceMark rm;
 551   AgedRegionData* sorted_regions = NEW_RESOURCE_ARRAY(AgedRegionData, num_regions);
 552 
 553   for (size_t i = 0; i < num_regions; i++) {
 554     ShenandoahHeapRegion* const r = heap->get_region(i);
 555     if (r->is_empty() || !r->has_live() || !r->is_young() || !r->is_regular()) {
 556       // skip over regions that aren't regular young with some live data
 557       continue;
 558     }
 559     if (heap->is_tenurable(r)) {
 560       if ((r->garbage() < old_garbage_threshold)) {
 561         // This tenure-worthy region has too little garbage, so we do not want to expend the copying effort to
 562         // reclaim the garbage; instead this region may be eligible for promotion-in-place to the
 563         // old generation.
 564         HeapWord* tams = ctx->top_at_mark_start(r);
 565         HeapWord* original_top = r->top();
 566         if (!heap->is_concurrent_old_mark_in_progress() && tams == original_top) {
 567           // No allocations from this region have been made during concurrent mark. It meets all the criteria
 568           // for in-place-promotion. Though we only need the value of top when we fill the end of the region,
 569           // we use this field to indicate that this region should be promoted in place during the evacuation
 570           // phase.
 571           r->save_top_before_promote();
 572 
 573           size_t remnant_size = r->free() / HeapWordSize;
 574           if (remnant_size > ShenandoahHeap::min_fill_size()) {
 575             ShenandoahHeap::fill_with_object(original_top, remnant_size);
 576             // Fill the remnant memory within this region to assure no allocations prior to promote in place.  Otherwise,
 577             // newly allocated objects will not be parsable when promote in place tries to register them.  Furthermore, any
 578             // new allocations would not necessarily be eligible for promotion.  This addresses both issues.
 579             r->set_top(r->end());
 580             promote_in_place_pad += remnant_size * HeapWordSize;
 581           } else {
 582             // Since the remnant is so small that it cannot be filled, we don't have to worry about any accidental
 583             // allocations occurring within this region before the region is promoted in place.
 584           }
 585         }
 586         // Else, we do not promote this region (either in place or by copy) because it has received new allocations.
 587 
 588         // During evacuation, we exclude from promotion regions for which age > tenure threshold, garbage < garbage-threshold,
 589         //  and get_top_before_promote() != tams
 590       } else {
 591         // Record this promotion-eligible candidate region. After sorting and selecting the best candidates below,
 592         // we may still decide to exclude this promotion-eligible region from the current collection set.  If this
 593         // happens, we will consider this region as part of the anticipated promotion potential for the next GC
 594         // pass; see further below.
 595         sorted_regions[candidates]._region = r;
 596         sorted_regions[candidates++]._live_data = r->get_live_data_bytes();
 597       }
 598     } else {
 599       // We only evacuate & promote objects from regular regions whose garbage() is above old-garbage-threshold.
 600       // Objects in tenure-worthy regions with less garbage are promoted in place. These take a different path to
 601       // old-gen.  Regions excluded from promotion because their garbage content is too low (causing us to anticipate that
 602       // the region would be promoted in place) may be eligible for evacuation promotion by the time promotion takes
 603       // place during a subsequent GC pass because more garbage is found within the region between now and then.  This
 604       // should not happen if we are properly adapting the tenure age.  The theory behind adaptive tenuring threshold
 605       // is to choose the youngest age that demonstrates no "significant" further loss of population since the previous
 606       // age.  If not this, we expect the tenure age to demonstrate linear population decay for at least two population
 607       // samples, whereas we expect to observe exponential population decay for ages younger than the tenure age.
 608       //
 609       // In the case that certain regions which were anticipated to be promoted in place need to be promoted by
 610       // evacuation, it may be the case that there is not sufficient reserve within old-gen to hold evacuation of
 611       // these regions.  The likely outcome is that these regions will not be selected for evacuation or promotion
 612       // in the current cycle and we will anticipate that they will be promoted in the next cycle.  This will cause
 613       // us to reserve more old-gen memory so that these objects can be promoted in the subsequent cycle.
 614       if (heap->is_aging_cycle() && heap->age_census()->is_tenurable(r->age() + 1)) {
 615         if (r->garbage() >= old_garbage_threshold) {
 616           promo_potential += r->get_live_data_bytes();
 617         }
 618       }
 619     }
 620     // Note that we keep going even if one region is excluded from selection.
 621     // Subsequent regions may be selected if they have smaller live data.
 622   }
 623   // Sort in increasing order according to live data bytes.  Note that candidates represents the number of regions
 624   // that qualify to be promoted by evacuation.
 625   if (candidates > 0) {
 626     size_t selected_regions = 0;
 627     size_t selected_live = 0;
 628     QuickSort::sort<AgedRegionData>(sorted_regions, candidates, compare_by_aged_live);
 629     for (size_t i = 0; i < candidates; i++) {
 630       ShenandoahHeapRegion* const region = sorted_regions[i]._region;
 631       size_t region_live_data = sorted_regions[i]._live_data;
 632       size_t promotion_need = (size_t) (region_live_data * ShenandoahPromoEvacWaste);
 633       if (old_consumed + promotion_need <= old_available) {
 634         old_consumed += promotion_need;
 635         candidate_regions_for_promotion_by_copy[region->index()] = true;
 636         selected_regions++;
 637         selected_live += region_live_data;
 638       } else {
 639         // We rejected this promotable region from the collection set because we had no room to hold its copy.
 640         // Add this region to promo potential for next GC.
 641         promo_potential += region_live_data;
 642         assert(!candidate_regions_for_promotion_by_copy[region->index()], "Shouldn't be selected");
 643       }
 644       // We keep going even if one region is excluded from selection because we need to accumulate all eligible
 645       // regions that are not preselected into promo_potential
 646     }
 647     log_debug(gc)("Preselected %zu regions containing %zu live bytes,"
 648                  " consuming: %zu of budgeted: %zu",
 649                  selected_regions, selected_live, old_consumed, old_available);
 650   }
 651 
 652   heap->old_generation()->set_pad_for_promote_in_place(promote_in_place_pad);
 653   heap->old_generation()->set_promotion_potential(promo_potential);
 654   return old_consumed;
 655 }
 656 
 657 void ShenandoahGeneration::prepare_regions_and_collection_set(bool concurrent) {
 658   ShenandoahHeap* heap = ShenandoahHeap::heap();
 659   ShenandoahCollectionSet* collection_set = heap->collection_set();
 660   bool is_generational = heap->mode()->is_generational();
 661 
 662   assert(!heap->is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
 663   assert(!is_old(), "Only YOUNG and GLOBAL GC perform evacuations");
 664   {
 665     ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::final_update_region_states :
 666                             ShenandoahPhaseTimings::degen_gc_final_update_region_states);
 667     ShenandoahFinalMarkUpdateRegionStateClosure cl(complete_marking_context());
 668     parallel_heap_region_iterate(&cl);
 669 
 670     if (is_young()) {
 671       // We always need to update the watermark for old regions. If there
 672       // are mixed collections pending, we also need to synchronize the
 673       // pinned status for old regions. Since we are already visiting every
 674       // old region here, go ahead and sync the pin status too.
 675       ShenandoahFinalMarkUpdateRegionStateClosure old_cl(nullptr);
 676       heap->old_generation()->parallel_heap_region_iterate(&old_cl);
 677     }
 678   }
 679 
 680   // Tally the census counts and compute the adaptive tenuring threshold
 681   if (is_generational && ShenandoahGenerationalAdaptiveTenuring && !ShenandoahGenerationalCensusAtEvac) {
 682     // Objects above TAMS weren't included in the age census. Since they were all
 683     // allocated in this cycle they belong in the age 0 cohort. We walk over all
 684     // young regions and sum the volume of objects between TAMS and top.
 685     ShenandoahUpdateCensusZeroCohortClosure age0_cl(complete_marking_context());
 686     heap->young_generation()->heap_region_iterate(&age0_cl);
 687     size_t age0_pop = age0_cl.get_age0_population();
 688 
 689     // Update the global census, including the missed age 0 cohort above,
 690     // along with the census done during marking, and compute the tenuring threshold.
 691     ShenandoahAgeCensus* census = ShenandoahGenerationalHeap::heap()->age_census();
 692     census->update_census(age0_pop);
 693 #ifndef PRODUCT
 694     size_t total_pop = age0_cl.get_total_population();
 695     size_t total_census = census->get_total();
 696     // Usually total_pop > total_census, but not by too much.
 697     // We use integer division so anything up to just less than 2 is considered
 698     // reasonable, and the "+1" is to avoid divide-by-zero.
 699     assert((total_pop+1)/(total_census+1) ==  1, "Extreme divergence: "
 700            "%zu/%zu", total_pop, total_census);
 701 #endif
 702   }
 703 
 704   {
 705     ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::choose_cset :
 706                             ShenandoahPhaseTimings::degen_gc_choose_cset);
 707 
 708     collection_set->clear();
 709     ShenandoahHeapLocker locker(heap->lock());
 710     if (is_generational) {
 711       // Seed the collection set with resource area-allocated
 712       // preselected regions, which are removed when we exit this scope.
 713       ShenandoahCollectionSetPreselector preselector(collection_set, heap->num_regions());
 714 
 715       // Find the amount that will be promoted, regions that will be promoted in
 716       // place, and preselect older regions that will be promoted by evacuation.
 717       compute_evacuation_budgets(heap);
 718 
 719       // Choose the collection set, including the regions preselected above for
 720       // promotion into the old generation.
 721       _heuristics->choose_collection_set(collection_set);
 722       if (!collection_set->is_empty()) {
 723         // only make use of evacuation budgets when we are evacuating
 724         adjust_evacuation_budgets(heap, collection_set);
 725       }
 726 
 727       if (is_global()) {
 728         // We have just chosen a collection set for a global cycle. The mark bitmap covering old regions is complete, so
 729         // the remembered set scan can use that to avoid walking into garbage. When the next old mark begins, we will
 730         // use the mark bitmap to make the old regions parsable by coalescing and filling any unmarked objects. Thus,
 731         // we prepare for old collections by remembering which regions are old at this time. Note that any objects
 732         // promoted into old regions will be above TAMS, and so will be considered marked. However, free regions that
 733         // become old after this point will not be covered correctly by the mark bitmap, so we must be careful not to
 734         // coalesce those regions. Only the old regions which are not part of the collection set at this point are
 735         // eligible for coalescing. As implemented now, this has the side effect of possibly initiating mixed-evacuations
 736         // after a global cycle for old regions that were not included in this collection set.
 737         heap->old_generation()->prepare_for_mixed_collections_after_global_gc();
 738       }
 739     } else {
 740       _heuristics->choose_collection_set(collection_set);
 741     }
 742   }
 743 
 744 
 745   {
 746     ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::final_rebuild_freeset :
 747                             ShenandoahPhaseTimings::degen_gc_final_rebuild_freeset);
 748     ShenandoahHeapLocker locker(heap->lock());
 749     size_t young_cset_regions, old_cset_regions;
 750 
 751     // We are preparing for evacuation.  At this time, we ignore cset region tallies.
 752     size_t first_old, last_old, num_old;
 753     heap->free_set()->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old, last_old, num_old);
 754     // Free set construction uses reserve quantities, because they are known to be valid here
 755     heap->free_set()->finish_rebuild(young_cset_regions, old_cset_regions, num_old, true);
 756   }
 757 }
 758 
 759 bool ShenandoahGeneration::is_bitmap_clear() {
 760   ShenandoahHeap* heap = ShenandoahHeap::heap();
 761   ShenandoahMarkingContext* context = heap->marking_context();
 762   const size_t num_regions = heap->num_regions();
 763   for (size_t idx = 0; idx < num_regions; idx++) {
 764     ShenandoahHeapRegion* r = heap->get_region(idx);
 765     if (contains(r) && r->is_affiliated()) {
 766       if (heap->is_bitmap_slice_committed(r) && (context->top_at_mark_start(r) > r->bottom()) &&
 767           !context->is_bitmap_range_within_region_clear(r->bottom(), r->end())) {
 768         return false;
 769       }
 770     }
 771   }
 772   return true;
 773 }
 774 
 775 void ShenandoahGeneration::set_mark_complete() {
 776   _is_marking_complete.set();
 777 }
 778 
 779 void ShenandoahGeneration::set_mark_incomplete() {
 780   _is_marking_complete.unset();
 781 }
 782 
 783 ShenandoahMarkingContext* ShenandoahGeneration::complete_marking_context() {
 784   assert(is_mark_complete(), "Marking must be completed.");
 785   return ShenandoahHeap::heap()->marking_context();
 786 }
 787 
 788 void ShenandoahGeneration::cancel_marking() {
 789   log_info(gc)("Cancel marking: %s", name());
 790   if (is_concurrent_mark_in_progress()) {
 791     set_mark_incomplete();
 792   }
 793   _task_queues->clear();
 794   ref_processor()->abandon_partial_discovery();
 795   set_concurrent_mark_in_progress(false);
 796 }
 797 
 798 ShenandoahGeneration::ShenandoahGeneration(ShenandoahGenerationType type,
 799                                            uint max_workers,
 800                                            size_t max_capacity,
 801                                            size_t soft_max_capacity) :
 802   _type(type),
 803   _task_queues(new ShenandoahObjToScanQueueSet(max_workers)),
 804   _ref_processor(new ShenandoahReferenceProcessor(MAX2(max_workers, 1U))),
 805   _affiliated_region_count(0), _humongous_waste(0), _evacuation_reserve(0),
 806   _used(0), _bytes_allocated_since_gc_start(0),
 807   _max_capacity(max_capacity), _soft_max_capacity(soft_max_capacity),
 808   _heuristics(nullptr)
 809 {
 810   _is_marking_complete.set();
 811   assert(max_workers > 0, "At least one queue");
 812   for (uint i = 0; i < max_workers; ++i) {
 813     ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue();
 814     _task_queues->register_queue(i, task_queue);
 815   }
 816 }
 817 
 818 ShenandoahGeneration::~ShenandoahGeneration() {
 819   for (uint i = 0; i < _task_queues->size(); ++i) {
 820     ShenandoahObjToScanQueue* q = _task_queues->queue(i);
 821     delete q;
 822   }
 823   delete _task_queues;
 824 }
 825 
 826 void ShenandoahGeneration::reserve_task_queues(uint workers) {
 827   _task_queues->reserve(workers);
 828 }
 829 
 830 ShenandoahObjToScanQueueSet* ShenandoahGeneration::old_gen_task_queues() const {
 831   return nullptr;
 832 }
 833 
 834 void ShenandoahGeneration::scan_remembered_set(bool is_concurrent) {
 835   assert(is_young(), "Should only scan remembered set for young generation.");
 836 
 837   ShenandoahGenerationalHeap* const heap = ShenandoahGenerationalHeap::heap();
 838   uint nworkers = heap->workers()->active_workers();
 839   reserve_task_queues(nworkers);
 840 
 841   ShenandoahReferenceProcessor* rp = ref_processor();
 842   ShenandoahRegionChunkIterator work_list(nworkers);
 843   ShenandoahScanRememberedTask task(task_queues(), old_gen_task_queues(), rp, &work_list, is_concurrent);
 844   heap->assert_gc_workers(nworkers);
 845   heap->workers()->run_task(&task);
 846   if (ShenandoahEnableCardStats) {
 847     ShenandoahScanRemembered* scanner = heap->old_generation()->card_scan();
 848     assert(scanner != nullptr, "Not generational");
 849     scanner->log_card_stats(nworkers, CARD_STAT_SCAN_RS);
 850   }
 851 }
 852 
 853 size_t ShenandoahGeneration::increment_affiliated_region_count() {
 854   shenandoah_assert_heaplocked_or_safepoint();
 855   // During full gc, multiple GC worker threads may change region affiliations without a lock.  No lock is enforced
 856   // on read and write of _affiliated_region_count.  At the end of full gc, a single thread overwrites the count with
 857   // a coherent value.
 858   return Atomic::add(&_affiliated_region_count, (size_t) 1);
 859 }
 860 
 861 size_t ShenandoahGeneration::decrement_affiliated_region_count() {
 862   shenandoah_assert_heaplocked_or_safepoint();
 863   // During full gc, multiple GC worker threads may change region affiliations without a lock.  No lock is enforced
 864   // on read and write of _affiliated_region_count.  At the end of full gc, a single thread overwrites the count with
 865   // a coherent value.
 866   auto affiliated_region_count = Atomic::sub(&_affiliated_region_count, (size_t) 1);
 867   assert(ShenandoahHeap::heap()->is_full_gc_in_progress() ||
 868          (used() + _humongous_waste <= affiliated_region_count * ShenandoahHeapRegion::region_size_bytes()),
 869          "used + humongous cannot exceed regions");
 870   return affiliated_region_count;
 871 }
 872 
 873 size_t ShenandoahGeneration::decrement_affiliated_region_count_without_lock() {
 874   return Atomic::sub(&_affiliated_region_count, (size_t) 1);
 875 }
 876 
 877 size_t ShenandoahGeneration::increase_affiliated_region_count(size_t delta) {
 878   shenandoah_assert_heaplocked_or_safepoint();
 879   return Atomic::add(&_affiliated_region_count, delta);
 880 }
 881 
 882 size_t ShenandoahGeneration::decrease_affiliated_region_count(size_t delta) {
 883   shenandoah_assert_heaplocked_or_safepoint();
 884   assert(Atomic::load(&_affiliated_region_count) >= delta, "Affiliated region count cannot be negative");
 885 
 886   auto const affiliated_region_count = Atomic::sub(&_affiliated_region_count, delta);
 887   assert(ShenandoahHeap::heap()->is_full_gc_in_progress() ||
 888          (_used + _humongous_waste <= affiliated_region_count * ShenandoahHeapRegion::region_size_bytes()),
 889          "used + humongous cannot exceed regions");
 890   return affiliated_region_count;
 891 }
 892 
 893 void ShenandoahGeneration::establish_usage(size_t num_regions, size_t num_bytes, size_t humongous_waste) {
 894   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at a safepoint");
 895   Atomic::store(&_affiliated_region_count, num_regions);
 896   Atomic::store(&_used, num_bytes);
 897   _humongous_waste = humongous_waste;
 898 }
 899 
 900 void ShenandoahGeneration::increase_used(size_t bytes) {
 901   Atomic::add(&_used, bytes);
 902 }
 903 
 904 void ShenandoahGeneration::increase_humongous_waste(size_t bytes) {
 905   if (bytes > 0) {
 906     Atomic::add(&_humongous_waste, bytes);
 907   }
 908 }
 909 
 910 void ShenandoahGeneration::decrease_humongous_waste(size_t bytes) {
 911   if (bytes > 0) {
 912     assert(ShenandoahHeap::heap()->is_full_gc_in_progress() || (_humongous_waste >= bytes),
 913            "Waste (%zu) cannot be negative (after subtracting %zu)", _humongous_waste, bytes);
 914     Atomic::sub(&_humongous_waste, bytes);
 915   }
 916 }
 917 
 918 void ShenandoahGeneration::decrease_used(size_t bytes) {
 919   assert(ShenandoahHeap::heap()->is_full_gc_in_progress() ||
 920          (_used >= bytes), "cannot reduce bytes used by generation below zero");
 921   Atomic::sub(&_used, bytes);
 922 }
 923 
 924 size_t ShenandoahGeneration::used_regions() const {
 925   return Atomic::load(&_affiliated_region_count);
 926 }
 927 
 928 size_t ShenandoahGeneration::free_unaffiliated_regions() const {
 929   size_t result = max_capacity() / ShenandoahHeapRegion::region_size_bytes();
 930   auto const used_regions = this->used_regions();
 931   if (used_regions > result) {
 932     result = 0;
 933   } else {
 934     result -= used_regions;
 935   }
 936   return result;
 937 }
 938 
 939 size_t ShenandoahGeneration::used_regions_size() const {
 940   return used_regions() * ShenandoahHeapRegion::region_size_bytes();
 941 }
 942 
 943 size_t ShenandoahGeneration::available() const {
 944   return available(max_capacity());
 945 }
 946 
 947 // For ShenandoahYoungGeneration, Include the young available that may have been reserved for the Collector.
 948 size_t ShenandoahGeneration::available_with_reserve() const {
 949   return available(max_capacity());
 950 }
 951 
 952 size_t ShenandoahGeneration::soft_available() const {
 953   return available(soft_max_capacity());
 954 }
 955 
 956 size_t ShenandoahGeneration::available(size_t capacity) const {
 957   size_t in_use = used() + get_humongous_waste();
 958   return in_use > capacity ? 0 : capacity - in_use;
 959 }
 960 
 961 size_t ShenandoahGeneration::increase_capacity(size_t increment) {
 962   shenandoah_assert_heaplocked_or_safepoint();
 963 
 964   // We do not enforce that new capacity >= heap->max_size_for(this).  The maximum generation size is treated as a rule of thumb
 965   // which may be violated during certain transitions, such as when we are forcing transfers for the purpose of promoting regions
 966   // in place.
 967   assert(ShenandoahHeap::heap()->is_full_gc_in_progress() ||
 968          (_max_capacity + increment <= ShenandoahHeap::heap()->max_capacity()), "Generation cannot be larger than heap size");
 969   assert(increment % ShenandoahHeapRegion::region_size_bytes() == 0, "Generation capacity must be multiple of region size");
 970   _max_capacity += increment;
 971 
 972   // This detects arithmetic wraparound on _used
 973   assert(ShenandoahHeap::heap()->is_full_gc_in_progress() ||
 974          (used_regions_size() >= used()),
 975          "Affiliated regions must hold more than what is currently used");
 976   return _max_capacity;
 977 }
 978 
 979 size_t ShenandoahGeneration::set_capacity(size_t byte_size) {
 980   shenandoah_assert_heaplocked_or_safepoint();
 981   _max_capacity = byte_size;
 982   return _max_capacity;
 983 }
 984 
 985 size_t ShenandoahGeneration::decrease_capacity(size_t decrement) {
 986   shenandoah_assert_heaplocked_or_safepoint();
 987 
 988   // We do not enforce that new capacity >= heap->min_size_for(this).  The minimum generation size is treated as a rule of thumb
 989   // which may be violated during certain transitions, such as when we are forcing transfers for the purpose of promoting regions
 990   // in place.
 991   assert(decrement % ShenandoahHeapRegion::region_size_bytes() == 0, "Generation capacity must be multiple of region size");
 992   assert(_max_capacity >= decrement, "Generation capacity cannot be negative");
 993 
 994   _max_capacity -= decrement;
 995 
 996   // This detects arithmetic wraparound on _used
 997   assert(ShenandoahHeap::heap()->is_full_gc_in_progress() ||
 998          (used_regions_size() >= used()),
 999          "Affiliated regions must hold more than what is currently used");
1000   assert(ShenandoahHeap::heap()->is_full_gc_in_progress() ||
1001          (_used <= _max_capacity), "Cannot use more than capacity");
1002   assert(ShenandoahHeap::heap()->is_full_gc_in_progress() ||
1003          (used_regions_size() <= _max_capacity),
1004          "Cannot use more than capacity");
1005   return _max_capacity;
1006 }
1007 
1008 void ShenandoahGeneration::record_success_concurrent(bool abbreviated) {
1009   heuristics()->record_success_concurrent();
1010   ShenandoahHeap::heap()->shenandoah_policy()->record_success_concurrent(is_young(), abbreviated);
1011 }