1 /*
   2  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  27 #include "gc/shenandoah/shenandoahCollectionSetPreselector.hpp"
  28 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  29 #include "gc/shenandoah/shenandoahGeneration.hpp"
  30 #include "gc/shenandoah/shenandoahGenerationalHeap.hpp"
  31 #include "gc/shenandoah/shenandoahHeapRegionClosures.hpp"
  32 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  33 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
  34 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
  35 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
  36 #include "gc/shenandoah/shenandoahTaskqueue.inline.hpp"
  37 #include "gc/shenandoah/shenandoahUtils.hpp"
  38 #include "gc/shenandoah/shenandoahVerifier.hpp"
  39 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
  40 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
  41 
  42 #include "utilities/quickSort.hpp"
  43 
  44 template <bool PREPARE_FOR_CURRENT_CYCLE, bool FULL_GC = false>
  45 class ShenandoahResetBitmapClosure final : public ShenandoahHeapRegionClosure {
  46 private:
  47   ShenandoahHeap*           _heap;
  48   ShenandoahMarkingContext* _ctx;
  49 
  50 public:
  51   explicit ShenandoahResetBitmapClosure() :
  52     ShenandoahHeapRegionClosure(), _heap(ShenandoahHeap::heap()), _ctx(_heap->marking_context()) {}
  53 
  54   void heap_region_do(ShenandoahHeapRegion* region) override {
  55     assert(!_heap->is_uncommit_in_progress(), "Cannot uncommit bitmaps while resetting them.");
  56     if (PREPARE_FOR_CURRENT_CYCLE) {
  57       if (region->need_bitmap_reset() && _heap->is_bitmap_slice_committed(region)) {
  58         _ctx->clear_bitmap(region);
  59       } else {
  60         region->set_needs_bitmap_reset();
  61       }
  62       // Capture Top At Mark Start for this generation.
  63       if (FULL_GC || region->is_active()) {
  64         // Reset live data and set TAMS optimistically. We would recheck these under the pause
  65         // anyway to capture any updates that happened since now.
  66         _ctx->capture_top_at_mark_start(region);
  67         region->clear_live_data();
  68       }
  69     } else {
  70       if (_heap->is_bitmap_slice_committed(region)) {
  71         _ctx->clear_bitmap(region);
  72         region->unset_needs_bitmap_reset();
  73       } else {
  74         region->set_needs_bitmap_reset();
  75       }
  76     }
  77   }
  78 
  79   bool is_thread_safe() override { return true; }
  80 };
  81 
  82 // Copy the write-version of the card-table into the read-version, clearing the
  83 // write-copy.
  84 class ShenandoahMergeWriteTable: public ShenandoahHeapRegionClosure {
  85 private:
  86   ShenandoahScanRemembered* _scanner;
  87 public:
  88   ShenandoahMergeWriteTable(ShenandoahScanRemembered* scanner) : _scanner(scanner) {}
  89 
  90   void heap_region_do(ShenandoahHeapRegion* r) override {
  91     assert(r->is_old(), "Don't waste time doing this for non-old regions");
  92     _scanner->merge_write_table(r->bottom(), ShenandoahHeapRegion::region_size_words());
  93   }
  94 
  95   bool is_thread_safe() override {
  96     return true;
  97   }
  98 };
  99 
 100 class ShenandoahCopyWriteCardTableToRead: public ShenandoahHeapRegionClosure {
 101 private:
 102   ShenandoahScanRemembered* _scanner;
 103 public:
 104   ShenandoahCopyWriteCardTableToRead(ShenandoahScanRemembered* scanner) : _scanner(scanner) {}
 105 
 106   void heap_region_do(ShenandoahHeapRegion* region) override {
 107     assert(region->is_old(), "Don't waste time doing this for non-old regions");
 108     _scanner->reset_remset(region->bottom(), ShenandoahHeapRegion::region_size_words());
 109   }
 110 
 111   bool is_thread_safe() override { return true; }
 112 };
 113 
 114 // Add [TAMS, top) volume over young regions. Used to correct age 0 cohort census
 115 // for adaptive tenuring when census is taken during marking.
 116 // In non-product builds, for the purposes of verification, we also collect the total
 117 // live objects in young regions as well.
 118 class ShenandoahUpdateCensusZeroCohortClosure : public ShenandoahHeapRegionClosure {
 119 private:
 120   ShenandoahMarkingContext* const _ctx;
 121   // Population size units are words (not bytes)
 122   size_t _age0_pop;                // running tally of age0 population size
 123   size_t _total_pop;               // total live population size
 124 public:
 125   explicit ShenandoahUpdateCensusZeroCohortClosure(ShenandoahMarkingContext* ctx)
 126     : _ctx(ctx), _age0_pop(0), _total_pop(0) {}
 127 
 128   void heap_region_do(ShenandoahHeapRegion* r) override {
 129     if (_ctx != nullptr && r->is_active()) {
 130       assert(r->is_young(), "Young regions only");
 131       HeapWord* tams = _ctx->top_at_mark_start(r);
 132       HeapWord* top  = r->top();
 133       if (top > tams) {
 134         _age0_pop += pointer_delta(top, tams);
 135       }
 136       // TODO: check significance of _ctx != nullptr above, can that
 137       // spoof _total_pop in some corner cases?
 138       NOT_PRODUCT(_total_pop += r->get_live_data_words();)
 139     }
 140   }
 141 
 142   size_t get_age0_population()  const { return _age0_pop; }
 143   size_t get_total_population() const { return _total_pop; }
 144 };
 145 
 146 void ShenandoahGeneration::confirm_heuristics_mode() {
 147   if (_heuristics->is_diagnostic() && !UnlockDiagnosticVMOptions) {
 148     vm_exit_during_initialization(
 149             err_msg("Heuristics \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
 150                     _heuristics->name()));
 151   }
 152   if (_heuristics->is_experimental() && !UnlockExperimentalVMOptions) {
 153     vm_exit_during_initialization(
 154             err_msg("Heuristics \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
 155                     _heuristics->name()));
 156   }
 157 }
 158 
 159 ShenandoahHeuristics* ShenandoahGeneration::initialize_heuristics(ShenandoahMode* gc_mode) {
 160   _heuristics = gc_mode->initialize_heuristics(this);
 161   _heuristics->set_guaranteed_gc_interval(ShenandoahGuaranteedGCInterval);
 162   confirm_heuristics_mode();
 163   return _heuristics;
 164 }
 165 
 166 size_t ShenandoahGeneration::bytes_allocated_since_gc_start() const {
 167   return Atomic::load(&_bytes_allocated_since_gc_start);
 168 }
 169 
 170 void ShenandoahGeneration::reset_bytes_allocated_since_gc_start() {
 171   Atomic::store(&_bytes_allocated_since_gc_start, (size_t)0);
 172 }
 173 
 174 void ShenandoahGeneration::increase_allocated(size_t bytes) {
 175   Atomic::add(&_bytes_allocated_since_gc_start, bytes, memory_order_relaxed);
 176 }
 177 
 178 void ShenandoahGeneration::set_evacuation_reserve(size_t new_val) {
 179   _evacuation_reserve = new_val;
 180 }
 181 
 182 size_t ShenandoahGeneration::get_evacuation_reserve() const {
 183   return _evacuation_reserve;
 184 }
 185 
 186 void ShenandoahGeneration::augment_evacuation_reserve(size_t increment) {
 187   _evacuation_reserve += increment;
 188 }
 189 
 190 void ShenandoahGeneration::log_status(const char *msg) const {
 191   typedef LogTarget(Info, gc, ergo) LogGcInfo;
 192 
 193   if (!LogGcInfo::is_enabled()) {
 194     return;
 195   }
 196 
 197   // Not under a lock here, so read each of these once to make sure
 198   // byte size in proper unit and proper unit for byte size are consistent.
 199   const size_t v_used = used();
 200   const size_t v_used_regions = used_regions_size();
 201   const size_t v_soft_max_capacity = soft_max_capacity();
 202   const size_t v_max_capacity = max_capacity();
 203   const size_t v_available = available();
 204   const size_t v_humongous_waste = get_humongous_waste();
 205 
 206   const LogGcInfo target;
 207   LogStream ls(target);
 208   ls.print("%s: ", msg);
 209   if (_type != NON_GEN) {
 210     ls.print("%s generation ", name());
 211   }
 212 
 213   ls.print_cr("used: " PROPERFMT ", used regions: " PROPERFMT ", humongous waste: " PROPERFMT
 214               ", soft capacity: " PROPERFMT ", max capacity: " PROPERFMT ", available: " PROPERFMT,
 215               PROPERFMTARGS(v_used), PROPERFMTARGS(v_used_regions), PROPERFMTARGS(v_humongous_waste),
 216               PROPERFMTARGS(v_soft_max_capacity), PROPERFMTARGS(v_max_capacity), PROPERFMTARGS(v_available));
 217 }
 218 
 219 template <bool PREPARE_FOR_CURRENT_CYCLE, bool FULL_GC>
 220 void ShenandoahGeneration::reset_mark_bitmap() {
 221   ShenandoahHeap* heap = ShenandoahHeap::heap();
 222   heap->assert_gc_workers(heap->workers()->active_workers());
 223 
 224   set_mark_incomplete();
 225 
 226   ShenandoahResetBitmapClosure<PREPARE_FOR_CURRENT_CYCLE, FULL_GC> closure;
 227   parallel_heap_region_iterate_free(&closure);
 228 }
 229 // Explicit specializations
 230 template void ShenandoahGeneration::reset_mark_bitmap<true, false>();
 231 template void ShenandoahGeneration::reset_mark_bitmap<true, true>();
 232 template void ShenandoahGeneration::reset_mark_bitmap<false, false>();
 233 
 234 // The ideal is to swap the remembered set so the safepoint effort is no more than a few pointer manipulations.
 235 // However, limitations in the implementation of the mutator write-barrier make it difficult to simply change the
 236 // location of the card table.  So the interim implementation of swap_remembered_set will copy the write-table
 237 // onto the read-table and will then clear the write-table.
 238 void ShenandoahGeneration::swap_remembered_set() {
 239   // Must be sure that marking is complete before we swap remembered set.
 240   ShenandoahGenerationalHeap* heap = ShenandoahGenerationalHeap::heap();
 241   heap->assert_gc_workers(heap->workers()->active_workers());
 242   shenandoah_assert_safepoint();
 243 
 244   ShenandoahOldGeneration* old_generation = heap->old_generation();
 245   ShenandoahCopyWriteCardTableToRead task(old_generation->card_scan());
 246   old_generation->parallel_heap_region_iterate(&task);
 247 }
 248 
 249 // Copy the write-version of the card-table into the read-version, clearing the
 250 // write-version. The work is done at a safepoint and in parallel by the GC
 251 // worker threads.
 252 void ShenandoahGeneration::merge_write_table() {
 253   // This should only happen for degenerated cycles
 254   ShenandoahGenerationalHeap* heap = ShenandoahGenerationalHeap::heap();
 255   heap->assert_gc_workers(heap->workers()->active_workers());
 256   shenandoah_assert_safepoint();
 257 
 258   ShenandoahOldGeneration* old_generation = heap->old_generation();
 259   ShenandoahMergeWriteTable task(old_generation->card_scan());
 260   old_generation->parallel_heap_region_iterate(&task);
 261 }
 262 
 263 void ShenandoahGeneration::prepare_gc() {
 264   reset_mark_bitmap<true>();
 265 }
 266 
 267 void ShenandoahGeneration::parallel_heap_region_iterate_free(ShenandoahHeapRegionClosure* cl) {
 268   ShenandoahHeap::heap()->parallel_heap_region_iterate(cl);
 269 }
 270 
 271 void ShenandoahGeneration::compute_evacuation_budgets(ShenandoahHeap* const heap) {
 272   shenandoah_assert_generational();
 273 
 274   ShenandoahOldGeneration* const old_generation = heap->old_generation();
 275   ShenandoahYoungGeneration* const young_generation = heap->young_generation();
 276 
 277   // During initialization and phase changes, it is more likely that fewer objects die young and old-gen
 278   // memory is not yet full (or is in the process of being replaced).  During these times especially, it
 279   // is beneficial to loan memory from old-gen to young-gen during the evacuation and update-refs phases
 280   // of execution.
 281 
 282   // Calculate EvacuationReserve before PromotionReserve.  Evacuation is more critical than promotion.
 283   // If we cannot evacuate old-gen, we will not be able to reclaim old-gen memory.  Promotions are less
 284   // critical.  If we cannot promote, there may be degradation of young-gen memory because old objects
 285   // accumulate there until they can be promoted.  This increases the young-gen marking and evacuation work.
 286 
 287   // First priority is to reclaim the easy garbage out of young-gen.
 288 
 289   // maximum_young_evacuation_reserve is upper bound on memory to be evacuated out of young
 290   const size_t maximum_young_evacuation_reserve = (young_generation->max_capacity() * ShenandoahEvacReserve) / 100;
 291   const size_t young_evacuation_reserve = MIN2(maximum_young_evacuation_reserve, young_generation->available_with_reserve());
 292 
 293   // maximum_old_evacuation_reserve is an upper bound on memory evacuated from old and evacuated to old (promoted),
 294   // clamped by the old generation space available.
 295   //
 296   // Here's the algebra.
 297   // Let SOEP = ShenandoahOldEvacRatioPercent,
 298   //     OE = old evac,
 299   //     YE = young evac, and
 300   //     TE = total evac = OE + YE
 301   // By definition:
 302   //            SOEP/100 = OE/TE
 303   //                     = OE/(OE+YE)
 304   //  => SOEP/(100-SOEP) = OE/((OE+YE)-OE)         // componendo-dividendo: If a/b = c/d, then a/(b-a) = c/(d-c)
 305   //                     = OE/YE
 306   //  =>              OE = YE*SOEP/(100-SOEP)
 307 
 308   // We have to be careful in the event that SOEP is set to 100 by the user.
 309   assert(ShenandoahOldEvacRatioPercent <= 100, "Error");
 310   const size_t old_available = old_generation->available();
 311   const size_t maximum_old_evacuation_reserve = (ShenandoahOldEvacRatioPercent == 100) ?
 312     old_available : MIN2((maximum_young_evacuation_reserve * ShenandoahOldEvacRatioPercent) / (100 - ShenandoahOldEvacRatioPercent),
 313                           old_available);
 314 
 315 
 316   // Second priority is to reclaim garbage out of old-gen if there are old-gen collection candidates.  Third priority
 317   // is to promote as much as we have room to promote.  However, if old-gen memory is in short supply, this means young
 318   // GC is operating under "duress" and was unable to transfer the memory that we would normally expect.  In this case,
 319   // old-gen will refrain from compacting itself in order to allow a quicker young-gen cycle (by avoiding the update-refs
 320   // through ALL of old-gen).  If there is some memory available in old-gen, we will use this for promotions as promotions
 321   // do not add to the update-refs burden of GC.
 322 
 323   size_t old_evacuation_reserve, old_promo_reserve;
 324   if (is_global()) {
 325     // Global GC is typically triggered by user invocation of System.gc(), and typically indicates that there is lots
 326     // of garbage to be reclaimed because we are starting a new phase of execution.  Marking for global GC may take
 327     // significantly longer than typical young marking because we must mark through all old objects.  To expedite
 328     // evacuation and update-refs, we give emphasis to reclaiming garbage first, wherever that garbage is found.
 329     // Global GC will adjust generation sizes to accommodate the collection set it chooses.
 330 
 331     // Set old_promo_reserve to enforce that no regions are preselected for promotion.  Such regions typically
 332     // have relatively high memory utilization.  We still call select_aged_regions() because this will prepare for
 333     // promotions in place, if relevant.
 334     old_promo_reserve = 0;
 335 
 336     // Dedicate all available old memory to old_evacuation reserve.  This may be small, because old-gen is only
 337     // expanded based on an existing mixed evacuation workload at the end of the previous GC cycle.  We'll expand
 338     // the budget for evacuation of old during GLOBAL cset selection.
 339     old_evacuation_reserve = maximum_old_evacuation_reserve;
 340   } else if (old_generation->has_unprocessed_collection_candidates()) {
 341     // We reserved all old-gen memory at end of previous GC to hold anticipated evacuations to old-gen.  If this is
 342     // mixed evacuation, reserve all of this memory for compaction of old-gen and do not promote.  Prioritize compaction
 343     // over promotion in order to defragment OLD so that it will be better prepared to efficiently receive promoted memory.
 344     old_evacuation_reserve = maximum_old_evacuation_reserve;
 345     old_promo_reserve = 0;
 346   } else {
 347     // Make all old-evacuation memory for promotion, but if we can't use it all for promotion, we'll allow some evacuation.
 348     old_evacuation_reserve = 0;
 349     old_promo_reserve = maximum_old_evacuation_reserve;
 350   }
 351   assert(old_evacuation_reserve <= old_available, "Error");
 352 
 353   // We see too many old-evacuation failures if we force ourselves to evacuate into regions that are not initially empty.
 354   // So we limit the old-evacuation reserve to unfragmented memory.  Even so, old-evacuation is free to fill in nooks and
 355   // crannies within existing partially used regions and it generally tries to do so.
 356   const size_t old_free_unfragmented = old_generation->free_unaffiliated_regions() * ShenandoahHeapRegion::region_size_bytes();
 357   if (old_evacuation_reserve > old_free_unfragmented) {
 358     const size_t delta = old_evacuation_reserve - old_free_unfragmented;
 359     old_evacuation_reserve -= delta;
 360     // Let promo consume fragments of old-gen memory if not global
 361     if (!is_global()) {
 362       old_promo_reserve += delta;
 363     }
 364   }
 365 
 366   // Preselect regions for promotion by evacuation (obtaining the live data to seed promoted_reserve),
 367   // and identify regions that will promote in place. These use the tenuring threshold.
 368   const size_t consumed_by_advance_promotion = select_aged_regions(old_promo_reserve);
 369   assert(consumed_by_advance_promotion <= maximum_old_evacuation_reserve, "Cannot promote more than available old-gen memory");
 370 
 371   // Note that unused old_promo_reserve might not be entirely consumed_by_advance_promotion.  Do not transfer this
 372   // to old_evacuation_reserve because this memory is likely very fragmented, and we do not want to increase the likelihood
 373   // of old evacuation failure.
 374   young_generation->set_evacuation_reserve(young_evacuation_reserve);
 375   old_generation->set_evacuation_reserve(old_evacuation_reserve);
 376   old_generation->set_promoted_reserve(consumed_by_advance_promotion);
 377 
 378   // There is no need to expand OLD because all memory used here was set aside at end of previous GC, except in the
 379   // case of a GLOBAL gc.  During choose_collection_set() of GLOBAL, old will be expanded on demand.
 380 }
 381 
 382 // Having chosen the collection set, adjust the budgets for generational mode based on its composition.  Note
 383 // that young_generation->available() now knows about recently discovered immediate garbage.
 384 //
 385 void ShenandoahGeneration::adjust_evacuation_budgets(ShenandoahHeap* const heap, ShenandoahCollectionSet* const collection_set) {
 386   shenandoah_assert_generational();
 387   // We may find that old_evacuation_reserve and/or loaned_for_young_evacuation are not fully consumed, in which case we may
 388   //  be able to increase regions_available_to_loan
 389 
 390   // The role of adjust_evacuation_budgets() is to compute the correct value of regions_available_to_loan and to make
 391   // effective use of this memory, including the remnant memory within these regions that may result from rounding loan to
 392   // integral number of regions.  Excess memory that is available to be loaned is applied to an allocation supplement,
 393   // which allows mutators to allocate memory beyond the current capacity of young-gen on the promise that the loan
 394   // will be repaid as soon as we finish updating references for the recently evacuated collection set.
 395 
 396   // We cannot recalculate regions_available_to_loan by simply dividing old_generation->available() by region_size_bytes
 397   // because the available memory may be distributed between many partially occupied regions that are already holding old-gen
 398   // objects.  Memory in partially occupied regions is not "available" to be loaned.  Note that an increase in old-gen
 399   // available that results from a decrease in memory consumed by old evacuation is not necessarily available to be loaned
 400   // to young-gen.
 401 
 402   size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
 403   ShenandoahOldGeneration* const old_generation = heap->old_generation();
 404   ShenandoahYoungGeneration* const young_generation = heap->young_generation();
 405 
 406   size_t old_evacuated = collection_set->get_old_bytes_reserved_for_evacuation();
 407   size_t old_evacuated_committed = (size_t) (ShenandoahOldEvacWaste * double(old_evacuated));
 408   size_t old_evacuation_reserve = old_generation->get_evacuation_reserve();
 409 
 410   if (old_evacuated_committed > old_evacuation_reserve) {
 411     // This should only happen due to round-off errors when enforcing ShenandoahOldEvacWaste
 412     assert(old_evacuated_committed <= (33 * old_evacuation_reserve) / 32,
 413            "Round-off errors should be less than 3.125%%, committed: " SIZE_FORMAT ", reserved: " SIZE_FORMAT,
 414            old_evacuated_committed, old_evacuation_reserve);
 415     old_evacuated_committed = old_evacuation_reserve;
 416     // Leave old_evac_reserve as previously configured
 417   } else if (old_evacuated_committed < old_evacuation_reserve) {
 418     // This happens if the old-gen collection consumes less than full budget.
 419     old_evacuation_reserve = old_evacuated_committed;
 420     old_generation->set_evacuation_reserve(old_evacuation_reserve);
 421   }
 422 
 423   size_t young_advance_promoted = collection_set->get_young_bytes_to_be_promoted();
 424   size_t young_advance_promoted_reserve_used = (size_t) (ShenandoahPromoEvacWaste * double(young_advance_promoted));
 425 
 426   size_t young_evacuated = collection_set->get_young_bytes_reserved_for_evacuation();
 427   size_t young_evacuated_reserve_used = (size_t) (ShenandoahEvacWaste * double(young_evacuated));
 428 
 429   size_t total_young_available = young_generation->available_with_reserve();
 430   assert(young_evacuated_reserve_used <= total_young_available, "Cannot evacuate more than is available in young");
 431   young_generation->set_evacuation_reserve(young_evacuated_reserve_used);
 432 
 433   size_t old_available = old_generation->available();
 434   // Now that we've established the collection set, we know how much memory is really required by old-gen for evacuation
 435   // and promotion reserves.  Try shrinking OLD now in case that gives us a bit more runway for mutator allocations during
 436   // evac and update phases.
 437   size_t old_consumed = old_evacuated_committed + young_advance_promoted_reserve_used;
 438 
 439   if (old_available < old_consumed) {
 440     // This can happen due to round-off errors when adding the results of truncated integer arithmetic.
 441     // We've already truncated old_evacuated_committed.  Truncate young_advance_promoted_reserve_used here.
 442     assert(young_advance_promoted_reserve_used <= (33 * (old_available - old_evacuated_committed)) / 32,
 443            "Round-off errors should be less than 3.125%%, committed: " SIZE_FORMAT ", reserved: " SIZE_FORMAT,
 444            young_advance_promoted_reserve_used, old_available - old_evacuated_committed);
 445     young_advance_promoted_reserve_used = old_available - old_evacuated_committed;
 446     old_consumed = old_evacuated_committed + young_advance_promoted_reserve_used;
 447   }
 448 
 449   assert(old_available >= old_consumed, "Cannot consume (" SIZE_FORMAT ") more than is available (" SIZE_FORMAT ")",
 450          old_consumed, old_available);
 451   size_t excess_old = old_available - old_consumed;
 452   size_t unaffiliated_old_regions = old_generation->free_unaffiliated_regions();
 453   size_t unaffiliated_old = unaffiliated_old_regions * region_size_bytes;
 454   assert(old_available >= unaffiliated_old, "Unaffiliated old is a subset of old available");
 455 
 456   // Make sure old_evac_committed is unaffiliated
 457   if (old_evacuated_committed > 0) {
 458     if (unaffiliated_old > old_evacuated_committed) {
 459       size_t giveaway = unaffiliated_old - old_evacuated_committed;
 460       size_t giveaway_regions = giveaway / region_size_bytes;  // round down
 461       if (giveaway_regions > 0) {
 462         excess_old = MIN2(excess_old, giveaway_regions * region_size_bytes);
 463       } else {
 464         excess_old = 0;
 465       }
 466     } else {
 467       excess_old = 0;
 468     }
 469   }
 470 
 471   // If we find that OLD has excess regions, give them back to YOUNG now to reduce likelihood we run out of allocation
 472   // runway during evacuation and update-refs.
 473   size_t regions_to_xfer = 0;
 474   if (excess_old > unaffiliated_old) {
 475     // we can give back unaffiliated_old (all of unaffiliated is excess)
 476     if (unaffiliated_old_regions > 0) {
 477       regions_to_xfer = unaffiliated_old_regions;
 478     }
 479   } else if (unaffiliated_old_regions > 0) {
 480     // excess_old < unaffiliated old: we can give back MIN(excess_old/region_size_bytes, unaffiliated_old_regions)
 481     size_t excess_regions = excess_old / region_size_bytes;
 482     regions_to_xfer = MIN2(excess_regions, unaffiliated_old_regions);
 483   }
 484 
 485   if (regions_to_xfer > 0) {
 486     bool result = ShenandoahGenerationalHeap::cast(heap)->generation_sizer()->transfer_to_young(regions_to_xfer);
 487     assert(excess_old >= regions_to_xfer * region_size_bytes,
 488            "Cannot transfer (" SIZE_FORMAT ", " SIZE_FORMAT ") more than excess old (" SIZE_FORMAT ")",
 489            regions_to_xfer, region_size_bytes, excess_old);
 490     excess_old -= regions_to_xfer * region_size_bytes;
 491     log_debug(gc, ergo)("%s transferred " SIZE_FORMAT " excess regions to young before start of evacuation",
 492                        result? "Successfully": "Unsuccessfully", regions_to_xfer);
 493   }
 494 
 495   // Add in the excess_old memory to hold unanticipated promotions, if any.  If there are more unanticipated
 496   // promotions than fit in reserved memory, they will be deferred until a future GC pass.
 497   size_t total_promotion_reserve = young_advance_promoted_reserve_used + excess_old;
 498   old_generation->set_promoted_reserve(total_promotion_reserve);
 499   old_generation->reset_promoted_expended();
 500 }
 501 
 502 typedef struct {
 503   ShenandoahHeapRegion* _region;
 504   size_t _live_data;
 505 } AgedRegionData;
 506 
 507 static int compare_by_aged_live(AgedRegionData a, AgedRegionData b) {
 508   if (a._live_data < b._live_data)
 509     return -1;
 510   else if (a._live_data > b._live_data)
 511     return 1;
 512   else return 0;
 513 }
 514 
 515 inline void assert_no_in_place_promotions() {
 516 #ifdef ASSERT
 517   class ShenandoahNoInPlacePromotions : public ShenandoahHeapRegionClosure {
 518   public:
 519     void heap_region_do(ShenandoahHeapRegion *r) override {
 520       assert(r->get_top_before_promote() == nullptr,
 521              "Region " SIZE_FORMAT " should not be ready for in-place promotion", r->index());
 522     }
 523   } cl;
 524   ShenandoahHeap::heap()->heap_region_iterate(&cl);
 525 #endif
 526 }
 527 
 528 // Preselect for inclusion into the collection set regions whose age is at or above tenure age which contain more than
 529 // ShenandoahOldGarbageThreshold amounts of garbage.  We identify these regions by setting the appropriate entry of
 530 // the collection set's preselected regions array to true.  All entries are initialized to false before calling this
 531 // function.
 532 //
 533 // During the subsequent selection of the collection set, we give priority to these promotion set candidates.
 534 // Without this prioritization, we found that the aged regions tend to be ignored because they typically have
 535 // much less garbage and much more live data than the recently allocated "eden" regions.  When aged regions are
 536 // repeatedly excluded from the collection set, the amount of live memory within the young generation tends to
 537 // accumulate and this has the undesirable side effect of causing young-generation collections to require much more
 538 // CPU and wall-clock time.
 539 //
 540 // A second benefit of treating aged regions differently than other regions during collection set selection is
 541 // that this allows us to more accurately budget memory to hold the results of evacuation.  Memory for evacuation
 542 // of aged regions must be reserved in the old generation.  Memory for evacuation of all other regions must be
 543 // reserved in the young generation.
 544 size_t ShenandoahGeneration::select_aged_regions(size_t old_available) {
 545 
 546   // There should be no regions configured for subsequent in-place-promotions carried over from the previous cycle.
 547   assert_no_in_place_promotions();
 548 
 549   auto const heap = ShenandoahGenerationalHeap::heap();
 550   bool* const candidate_regions_for_promotion_by_copy = heap->collection_set()->preselected_regions();
 551   ShenandoahMarkingContext* const ctx = heap->marking_context();
 552 
 553   const uint tenuring_threshold = heap->age_census()->tenuring_threshold();
 554   const size_t old_garbage_threshold = (ShenandoahHeapRegion::region_size_bytes() * ShenandoahOldGarbageThreshold) / 100;
 555 
 556   size_t old_consumed = 0;
 557   size_t promo_potential = 0;
 558   size_t candidates = 0;
 559 
 560   // Tracks the padding of space above top in regions eligible for promotion in place
 561   size_t promote_in_place_pad = 0;
 562 
 563   // Sort the promotion-eligible regions in order of increasing live-data-bytes so that we can first reclaim regions that require
 564   // less evacuation effort.  This prioritizes garbage first, expanding the allocation pool early before we reclaim regions that
 565   // have more live data.
 566   const size_t num_regions = heap->num_regions();
 567 
 568   ResourceMark rm;
 569   AgedRegionData* sorted_regions = NEW_RESOURCE_ARRAY(AgedRegionData, num_regions);
 570 
 571   for (size_t i = 0; i < num_regions; i++) {
 572     ShenandoahHeapRegion* const r = heap->get_region(i);
 573     if (r->is_empty() || !r->has_live() || !r->is_young() || !r->is_regular()) {
 574       // skip over regions that aren't regular young with some live data
 575       continue;
 576     }
 577     if (r->age() >= tenuring_threshold) {
 578       if ((r->garbage() < old_garbage_threshold)) {
 579         // This tenure-worthy region has too little garbage, so we do not want to expend the copying effort to
 580         // reclaim the garbage; instead this region may be eligible for promotion-in-place to the
 581         // old generation.
 582         HeapWord* tams = ctx->top_at_mark_start(r);
 583         HeapWord* original_top = r->top();
 584         if (!heap->is_concurrent_old_mark_in_progress() && tams == original_top) {
 585           // No allocations from this region have been made during concurrent mark. It meets all the criteria
 586           // for in-place-promotion. Though we only need the value of top when we fill the end of the region,
 587           // we use this field to indicate that this region should be promoted in place during the evacuation
 588           // phase.
 589           r->save_top_before_promote();
 590 
 591           size_t remnant_size = r->free() / HeapWordSize;
 592           if (remnant_size > ShenandoahHeap::min_fill_size()) {
 593             ShenandoahHeap::fill_with_object(original_top, remnant_size);
 594             // Fill the remnant memory within this region to assure no allocations prior to promote in place.  Otherwise,
 595             // newly allocated objects will not be parsable when promote in place tries to register them.  Furthermore, any
 596             // new allocations would not necessarily be eligible for promotion.  This addresses both issues.
 597             r->set_top(r->end());
 598             promote_in_place_pad += remnant_size * HeapWordSize;
 599           } else {
 600             // Since the remnant is so small that it cannot be filled, we don't have to worry about any accidental
 601             // allocations occurring within this region before the region is promoted in place.
 602           }
 603         }
 604         // Else, we do not promote this region (either in place or by copy) because it has received new allocations.
 605 
 606         // During evacuation, we exclude from promotion regions for which age > tenure threshold, garbage < garbage-threshold,
 607         //  and get_top_before_promote() != tams
 608       } else {
 609         // Record this promotion-eligible candidate region. After sorting and selecting the best candidates below,
 610         // we may still decide to exclude this promotion-eligible region from the current collection set.  If this
 611         // happens, we will consider this region as part of the anticipated promotion potential for the next GC
 612         // pass; see further below.
 613         sorted_regions[candidates]._region = r;
 614         sorted_regions[candidates++]._live_data = r->get_live_data_bytes();
 615       }
 616     } else {
 617       // We only evacuate & promote objects from regular regions whose garbage() is above old-garbage-threshold.
 618       // Objects in tenure-worthy regions with less garbage are promoted in place. These take a different path to
 619       // old-gen.  Regions excluded from promotion because their garbage content is too low (causing us to anticipate that
 620       // the region would be promoted in place) may be eligible for evacuation promotion by the time promotion takes
 621       // place during a subsequent GC pass because more garbage is found within the region between now and then.  This
 622       // should not happen if we are properly adapting the tenure age.  The theory behind adaptive tenuring threshold
 623       // is to choose the youngest age that demonstrates no "significant" further loss of population since the previous
 624       // age.  If not this, we expect the tenure age to demonstrate linear population decay for at least two population
 625       // samples, whereas we expect to observe exponential population decay for ages younger than the tenure age.
 626       //
 627       // In the case that certain regions which were anticipated to be promoted in place need to be promoted by
 628       // evacuation, it may be the case that there is not sufficient reserve within old-gen to hold evacuation of
 629       // these regions.  The likely outcome is that these regions will not be selected for evacuation or promotion
 630       // in the current cycle and we will anticipate that they will be promoted in the next cycle.  This will cause
 631       // us to reserve more old-gen memory so that these objects can be promoted in the subsequent cycle.
 632       if (heap->is_aging_cycle() && (r->age() + 1 == tenuring_threshold)) {
 633         if (r->garbage() >= old_garbage_threshold) {
 634           promo_potential += r->get_live_data_bytes();
 635         }
 636       }
 637     }
 638     // Note that we keep going even if one region is excluded from selection.
 639     // Subsequent regions may be selected if they have smaller live data.
 640   }
 641   // Sort in increasing order according to live data bytes.  Note that candidates represents the number of regions
 642   // that qualify to be promoted by evacuation.
 643   if (candidates > 0) {
 644     size_t selected_regions = 0;
 645     size_t selected_live = 0;
 646     QuickSort::sort<AgedRegionData>(sorted_regions, candidates, compare_by_aged_live, false);
 647     for (size_t i = 0; i < candidates; i++) {
 648       ShenandoahHeapRegion* const region = sorted_regions[i]._region;
 649       size_t region_live_data = sorted_regions[i]._live_data;
 650       size_t promotion_need = (size_t) (region_live_data * ShenandoahPromoEvacWaste);
 651       if (old_consumed + promotion_need <= old_available) {
 652         old_consumed += promotion_need;
 653         candidate_regions_for_promotion_by_copy[region->index()] = true;
 654         selected_regions++;
 655         selected_live += region_live_data;
 656       } else {
 657         // We rejected this promotable region from the collection set because we had no room to hold its copy.
 658         // Add this region to promo potential for next GC.
 659         promo_potential += region_live_data;
 660         assert(!candidate_regions_for_promotion_by_copy[region->index()], "Shouldn't be selected");
 661       }
 662       // We keep going even if one region is excluded from selection because we need to accumulate all eligible
 663       // regions that are not preselected into promo_potential
 664     }
 665     log_debug(gc)("Preselected " SIZE_FORMAT " regions containing " SIZE_FORMAT " live bytes,"
 666                  " consuming: " SIZE_FORMAT " of budgeted: " SIZE_FORMAT,
 667                  selected_regions, selected_live, old_consumed, old_available);
 668   }
 669 
 670   heap->old_generation()->set_pad_for_promote_in_place(promote_in_place_pad);
 671   heap->old_generation()->set_promotion_potential(promo_potential);
 672   return old_consumed;
 673 }
 674 
 675 void ShenandoahGeneration::prepare_regions_and_collection_set(bool concurrent) {
 676   ShenandoahHeap* heap = ShenandoahHeap::heap();
 677   ShenandoahCollectionSet* collection_set = heap->collection_set();
 678   bool is_generational = heap->mode()->is_generational();
 679 
 680   assert(!heap->is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
 681   assert(!is_old(), "Only YOUNG and GLOBAL GC perform evacuations");
 682   {
 683     ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::final_update_region_states :
 684                             ShenandoahPhaseTimings::degen_gc_final_update_region_states);
 685     ShenandoahFinalMarkUpdateRegionStateClosure cl(complete_marking_context());
 686     parallel_heap_region_iterate(&cl);
 687 
 688     if (is_young()) {
 689       // We always need to update the watermark for old regions. If there
 690       // are mixed collections pending, we also need to synchronize the
 691       // pinned status for old regions. Since we are already visiting every
 692       // old region here, go ahead and sync the pin status too.
 693       ShenandoahFinalMarkUpdateRegionStateClosure old_cl(nullptr);
 694       heap->old_generation()->parallel_heap_region_iterate(&old_cl);
 695     }
 696   }
 697 
 698   // Tally the census counts and compute the adaptive tenuring threshold
 699   if (is_generational && ShenandoahGenerationalAdaptiveTenuring && !ShenandoahGenerationalCensusAtEvac) {
 700     // Objects above TAMS weren't included in the age census. Since they were all
 701     // allocated in this cycle they belong in the age 0 cohort. We walk over all
 702     // young regions and sum the volume of objects between TAMS and top.
 703     ShenandoahUpdateCensusZeroCohortClosure age0_cl(complete_marking_context());
 704     heap->young_generation()->heap_region_iterate(&age0_cl);
 705     size_t age0_pop = age0_cl.get_age0_population();
 706 
 707     // Update the global census, including the missed age 0 cohort above,
 708     // along with the census done during marking, and compute the tenuring threshold.
 709     ShenandoahAgeCensus* census = ShenandoahGenerationalHeap::heap()->age_census();
 710     census->update_census(age0_pop);
 711 #ifndef PRODUCT
 712     size_t total_pop = age0_cl.get_total_population();
 713     size_t total_census = census->get_total();
 714     // Usually total_pop > total_census, but not by too much.
 715     // We use integer division so anything up to just less than 2 is considered
 716     // reasonable, and the "+1" is to avoid divide-by-zero.
 717     assert((total_pop+1)/(total_census+1) ==  1, "Extreme divergence: "
 718            SIZE_FORMAT "/" SIZE_FORMAT, total_pop, total_census);
 719 #endif
 720   }
 721 
 722   {
 723     ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::choose_cset :
 724                             ShenandoahPhaseTimings::degen_gc_choose_cset);
 725 
 726     collection_set->clear();
 727     ShenandoahHeapLocker locker(heap->lock());
 728     if (is_generational) {
 729       // Seed the collection set with resource area-allocated
 730       // preselected regions, which are removed when we exit this scope.
 731       ShenandoahCollectionSetPreselector preselector(collection_set, heap->num_regions());
 732 
 733       // Find the amount that will be promoted, regions that will be promoted in
 734       // place, and preselect older regions that will be promoted by evacuation.
 735       compute_evacuation_budgets(heap);
 736 
 737       // Choose the collection set, including the regions preselected above for
 738       // promotion into the old generation.
 739       _heuristics->choose_collection_set(collection_set);
 740       if (!collection_set->is_empty()) {
 741         // only make use of evacuation budgets when we are evacuating
 742         adjust_evacuation_budgets(heap, collection_set);
 743       }
 744 
 745       if (is_global()) {
 746         // We have just chosen a collection set for a global cycle. The mark bitmap covering old regions is complete, so
 747         // the remembered set scan can use that to avoid walking into garbage. When the next old mark begins, we will
 748         // use the mark bitmap to make the old regions parsable by coalescing and filling any unmarked objects. Thus,
 749         // we prepare for old collections by remembering which regions are old at this time. Note that any objects
 750         // promoted into old regions will be above TAMS, and so will be considered marked. However, free regions that
 751         // become old after this point will not be covered correctly by the mark bitmap, so we must be careful not to
 752         // coalesce those regions. Only the old regions which are not part of the collection set at this point are
 753         // eligible for coalescing. As implemented now, this has the side effect of possibly initiating mixed-evacuations
 754         // after a global cycle for old regions that were not included in this collection set.
 755         heap->old_generation()->prepare_for_mixed_collections_after_global_gc();
 756       }
 757     } else {
 758       _heuristics->choose_collection_set(collection_set);
 759     }
 760   }
 761 
 762 
 763   {
 764     ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::final_rebuild_freeset :
 765                             ShenandoahPhaseTimings::degen_gc_final_rebuild_freeset);
 766     ShenandoahHeapLocker locker(heap->lock());
 767     size_t young_cset_regions, old_cset_regions;
 768 
 769     // We are preparing for evacuation.  At this time, we ignore cset region tallies.
 770     size_t first_old, last_old, num_old;
 771     heap->free_set()->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old, last_old, num_old);
 772     // Free set construction uses reserve quantities, because they are known to be valid here
 773     heap->free_set()->finish_rebuild(young_cset_regions, old_cset_regions, num_old, true);
 774   }
 775 }
 776 
 777 bool ShenandoahGeneration::is_bitmap_clear() {
 778   ShenandoahHeap* heap = ShenandoahHeap::heap();
 779   ShenandoahMarkingContext* context = heap->marking_context();
 780   const size_t num_regions = heap->num_regions();
 781   for (size_t idx = 0; idx < num_regions; idx++) {
 782     ShenandoahHeapRegion* r = heap->get_region(idx);
 783     if (contains(r) && r->is_affiliated()) {
 784       if (heap->is_bitmap_slice_committed(r) && (context->top_at_mark_start(r) > r->bottom()) &&
 785           !context->is_bitmap_range_within_region_clear(r->bottom(), r->end())) {
 786         return false;
 787       }
 788     }
 789   }
 790   return true;
 791 }
 792 
 793 bool ShenandoahGeneration::is_mark_complete() {
 794   return _is_marking_complete.is_set();
 795 }
 796 
 797 void ShenandoahGeneration::set_mark_complete() {
 798   _is_marking_complete.set();
 799 }
 800 
 801 void ShenandoahGeneration::set_mark_incomplete() {
 802   _is_marking_complete.unset();
 803 }
 804 
 805 ShenandoahMarkingContext* ShenandoahGeneration::complete_marking_context() {
 806   assert(is_mark_complete(), "Marking must be completed.");
 807   return ShenandoahHeap::heap()->marking_context();
 808 }
 809 
 810 void ShenandoahGeneration::cancel_marking() {
 811   log_info(gc)("Cancel marking: %s", name());
 812   if (is_concurrent_mark_in_progress()) {
 813     set_mark_incomplete();
 814   }
 815   _task_queues->clear();
 816   ref_processor()->abandon_partial_discovery();
 817   set_concurrent_mark_in_progress(false);
 818 }
 819 
 820 ShenandoahGeneration::ShenandoahGeneration(ShenandoahGenerationType type,
 821                                            uint max_workers,
 822                                            size_t max_capacity,
 823                                            size_t soft_max_capacity) :
 824   _type(type),
 825   _task_queues(new ShenandoahObjToScanQueueSet(max_workers)),
 826   _ref_processor(new ShenandoahReferenceProcessor(MAX2(max_workers, 1U))),
 827   _affiliated_region_count(0), _humongous_waste(0), _evacuation_reserve(0),
 828   _used(0), _bytes_allocated_since_gc_start(0),
 829   _max_capacity(max_capacity), _soft_max_capacity(soft_max_capacity),
 830   _heuristics(nullptr)
 831 {
 832   _is_marking_complete.set();
 833   assert(max_workers > 0, "At least one queue");
 834   for (uint i = 0; i < max_workers; ++i) {
 835     ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue();
 836     _task_queues->register_queue(i, task_queue);
 837   }
 838 }
 839 
 840 ShenandoahGeneration::~ShenandoahGeneration() {
 841   for (uint i = 0; i < _task_queues->size(); ++i) {
 842     ShenandoahObjToScanQueue* q = _task_queues->queue(i);
 843     delete q;
 844   }
 845   delete _task_queues;
 846 }
 847 
 848 void ShenandoahGeneration::reserve_task_queues(uint workers) {
 849   _task_queues->reserve(workers);
 850 }
 851 
 852 ShenandoahObjToScanQueueSet* ShenandoahGeneration::old_gen_task_queues() const {
 853   return nullptr;
 854 }
 855 
 856 void ShenandoahGeneration::scan_remembered_set(bool is_concurrent) {
 857   assert(is_young(), "Should only scan remembered set for young generation.");
 858 
 859   ShenandoahGenerationalHeap* const heap = ShenandoahGenerationalHeap::heap();
 860   uint nworkers = heap->workers()->active_workers();
 861   reserve_task_queues(nworkers);
 862 
 863   ShenandoahReferenceProcessor* rp = ref_processor();
 864   ShenandoahRegionChunkIterator work_list(nworkers);
 865   ShenandoahScanRememberedTask task(task_queues(), old_gen_task_queues(), rp, &work_list, is_concurrent);
 866   heap->assert_gc_workers(nworkers);
 867   heap->workers()->run_task(&task);
 868   if (ShenandoahEnableCardStats) {
 869     ShenandoahScanRemembered* scanner = heap->old_generation()->card_scan();
 870     assert(scanner != nullptr, "Not generational");
 871     scanner->log_card_stats(nworkers, CARD_STAT_SCAN_RS);
 872   }
 873 }
 874 
 875 size_t ShenandoahGeneration::increment_affiliated_region_count() {
 876   shenandoah_assert_heaplocked_or_safepoint();
 877   // During full gc, multiple GC worker threads may change region affiliations without a lock.  No lock is enforced
 878   // on read and write of _affiliated_region_count.  At the end of full gc, a single thread overwrites the count with
 879   // a coherent value.
 880   return Atomic::add(&_affiliated_region_count, (size_t) 1);
 881 }
 882 
 883 size_t ShenandoahGeneration::decrement_affiliated_region_count() {
 884   shenandoah_assert_heaplocked_or_safepoint();
 885   // During full gc, multiple GC worker threads may change region affiliations without a lock.  No lock is enforced
 886   // on read and write of _affiliated_region_count.  At the end of full gc, a single thread overwrites the count with
 887   // a coherent value.
 888   auto affiliated_region_count = Atomic::sub(&_affiliated_region_count, (size_t) 1);
 889   assert(ShenandoahHeap::heap()->is_full_gc_in_progress() ||
 890          (used() + _humongous_waste <= affiliated_region_count * ShenandoahHeapRegion::region_size_bytes()),
 891          "used + humongous cannot exceed regions");
 892   return affiliated_region_count;
 893 }
 894 
 895 size_t ShenandoahGeneration::decrement_affiliated_region_count_without_lock() {
 896   return Atomic::sub(&_affiliated_region_count, (size_t) 1);
 897 }
 898 
 899 size_t ShenandoahGeneration::increase_affiliated_region_count(size_t delta) {
 900   shenandoah_assert_heaplocked_or_safepoint();
 901   return Atomic::add(&_affiliated_region_count, delta);
 902 }
 903 
 904 size_t ShenandoahGeneration::decrease_affiliated_region_count(size_t delta) {
 905   shenandoah_assert_heaplocked_or_safepoint();
 906   assert(Atomic::load(&_affiliated_region_count) >= delta, "Affiliated region count cannot be negative");
 907 
 908   auto const affiliated_region_count = Atomic::sub(&_affiliated_region_count, delta);
 909   assert(ShenandoahHeap::heap()->is_full_gc_in_progress() ||
 910          (_used + _humongous_waste <= affiliated_region_count * ShenandoahHeapRegion::region_size_bytes()),
 911          "used + humongous cannot exceed regions");
 912   return affiliated_region_count;
 913 }
 914 
 915 void ShenandoahGeneration::establish_usage(size_t num_regions, size_t num_bytes, size_t humongous_waste) {
 916   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at a safepoint");
 917   Atomic::store(&_affiliated_region_count, num_regions);
 918   Atomic::store(&_used, num_bytes);
 919   _humongous_waste = humongous_waste;
 920 }
 921 
 922 void ShenandoahGeneration::increase_used(size_t bytes) {
 923   Atomic::add(&_used, bytes);
 924 }
 925 
 926 void ShenandoahGeneration::increase_humongous_waste(size_t bytes) {
 927   if (bytes > 0) {
 928     Atomic::add(&_humongous_waste, bytes);
 929   }
 930 }
 931 
 932 void ShenandoahGeneration::decrease_humongous_waste(size_t bytes) {
 933   if (bytes > 0) {
 934     assert(ShenandoahHeap::heap()->is_full_gc_in_progress() || (_humongous_waste >= bytes),
 935            "Waste (" SIZE_FORMAT ") cannot be negative (after subtracting " SIZE_FORMAT ")", _humongous_waste, bytes);
 936     Atomic::sub(&_humongous_waste, bytes);
 937   }
 938 }
 939 
 940 void ShenandoahGeneration::decrease_used(size_t bytes) {
 941   assert(ShenandoahHeap::heap()->is_full_gc_in_progress() ||
 942          (_used >= bytes), "cannot reduce bytes used by generation below zero");
 943   Atomic::sub(&_used, bytes);
 944 }
 945 
 946 size_t ShenandoahGeneration::used_regions() const {
 947   return Atomic::load(&_affiliated_region_count);
 948 }
 949 
 950 size_t ShenandoahGeneration::free_unaffiliated_regions() const {
 951   size_t result = max_capacity() / ShenandoahHeapRegion::region_size_bytes();
 952   auto const used_regions = this->used_regions();
 953   if (used_regions > result) {
 954     result = 0;
 955   } else {
 956     result -= used_regions;
 957   }
 958   return result;
 959 }
 960 
 961 size_t ShenandoahGeneration::used_regions_size() const {
 962   return used_regions() * ShenandoahHeapRegion::region_size_bytes();
 963 }
 964 
 965 size_t ShenandoahGeneration::available() const {
 966   return available(max_capacity());
 967 }
 968 
 969 // For ShenandoahYoungGeneration, Include the young available that may have been reserved for the Collector.
 970 size_t ShenandoahGeneration::available_with_reserve() const {
 971   return available(max_capacity());
 972 }
 973 
 974 size_t ShenandoahGeneration::soft_available() const {
 975   return available(soft_max_capacity());
 976 }
 977 
 978 size_t ShenandoahGeneration::available(size_t capacity) const {
 979   size_t in_use = used() + get_humongous_waste();
 980   return in_use > capacity ? 0 : capacity - in_use;
 981 }
 982 
 983 size_t ShenandoahGeneration::increase_capacity(size_t increment) {
 984   shenandoah_assert_heaplocked_or_safepoint();
 985 
 986   // We do not enforce that new capacity >= heap->max_size_for(this).  The maximum generation size is treated as a rule of thumb
 987   // which may be violated during certain transitions, such as when we are forcing transfers for the purpose of promoting regions
 988   // in place.
 989   assert(ShenandoahHeap::heap()->is_full_gc_in_progress() ||
 990          (_max_capacity + increment <= ShenandoahHeap::heap()->max_capacity()), "Generation cannot be larger than heap size");
 991   assert(increment % ShenandoahHeapRegion::region_size_bytes() == 0, "Generation capacity must be multiple of region size");
 992   _max_capacity += increment;
 993 
 994   // This detects arithmetic wraparound on _used
 995   assert(ShenandoahHeap::heap()->is_full_gc_in_progress() ||
 996          (used_regions_size() >= used()),
 997          "Affiliated regions must hold more than what is currently used");
 998   return _max_capacity;
 999 }
1000 
1001 size_t ShenandoahGeneration::set_capacity(size_t byte_size) {
1002   shenandoah_assert_heaplocked_or_safepoint();
1003   _max_capacity = byte_size;
1004   return _max_capacity;
1005 }
1006 
1007 size_t ShenandoahGeneration::decrease_capacity(size_t decrement) {
1008   shenandoah_assert_heaplocked_or_safepoint();
1009 
1010   // We do not enforce that new capacity >= heap->min_size_for(this).  The minimum generation size is treated as a rule of thumb
1011   // which may be violated during certain transitions, such as when we are forcing transfers for the purpose of promoting regions
1012   // in place.
1013   assert(decrement % ShenandoahHeapRegion::region_size_bytes() == 0, "Generation capacity must be multiple of region size");
1014   assert(_max_capacity >= decrement, "Generation capacity cannot be negative");
1015 
1016   _max_capacity -= decrement;
1017 
1018   // This detects arithmetic wraparound on _used
1019   assert(ShenandoahHeap::heap()->is_full_gc_in_progress() ||
1020          (used_regions_size() >= used()),
1021          "Affiliated regions must hold more than what is currently used");
1022   assert(ShenandoahHeap::heap()->is_full_gc_in_progress() ||
1023          (_used <= _max_capacity), "Cannot use more than capacity");
1024   assert(ShenandoahHeap::heap()->is_full_gc_in_progress() ||
1025          (used_regions_size() <= _max_capacity),
1026          "Cannot use more than capacity");
1027   return _max_capacity;
1028 }
1029 
1030 void ShenandoahGeneration::record_success_concurrent(bool abbreviated) {
1031   heuristics()->record_success_concurrent();
1032   ShenandoahHeap::heap()->shenandoah_policy()->record_success_concurrent(is_young(), abbreviated);
1033 }