1 /*
   2  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  27 #include "gc/shenandoah/shenandoahCollectionSetPreselector.hpp"
  28 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  29 #include "gc/shenandoah/shenandoahGeneration.hpp"
  30 #include "gc/shenandoah/shenandoahGenerationalHeap.hpp"
  31 #include "gc/shenandoah/shenandoahMarkClosures.hpp"
  32 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  33 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
  34 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
  35 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
  36 #include "gc/shenandoah/shenandoahTaskqueue.inline.hpp"
  37 #include "gc/shenandoah/shenandoahUtils.hpp"
  38 #include "gc/shenandoah/shenandoahVerifier.hpp"
  39 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
  40 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
  41 
  42 #include "utilities/quickSort.hpp"
  43 
  44 
  45 class ShenandoahResetUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
  46  private:
  47   ShenandoahHeap* _heap;
  48   ShenandoahMarkingContext* const _ctx;
  49  public:
  50   ShenandoahResetUpdateRegionStateClosure() :
  51     _heap(ShenandoahHeap::heap()),
  52     _ctx(_heap->marking_context()) {}
  53 
  54   void heap_region_do(ShenandoahHeapRegion* r) override {
  55     if (_heap->is_bitmap_slice_committed(r)) {
  56       _ctx->clear_bitmap(r);
  57     }
  58 
  59     if (r->is_active()) {
  60       // Reset live data and set TAMS optimistically. We would recheck these under the pause
  61       // anyway to capture any updates that happened since now.
  62       _ctx->capture_top_at_mark_start(r);
  63       r->clear_live_data();
  64     }
  65   }
  66 
  67   bool is_thread_safe() override { return true; }
  68 };
  69 
  70 class ShenandoahResetBitmapTask : public ShenandoahHeapRegionClosure {
  71  private:
  72   ShenandoahHeap* _heap;
  73   ShenandoahMarkingContext* const _ctx;
  74  public:
  75   ShenandoahResetBitmapTask() :
  76     _heap(ShenandoahHeap::heap()),
  77     _ctx(_heap->marking_context()) {}
  78 
  79   void heap_region_do(ShenandoahHeapRegion* region) {
  80     if (_heap->is_bitmap_slice_committed(region)) {
  81       _ctx->clear_bitmap(region);
  82     }
  83   }
  84 
  85   bool is_thread_safe() { return true; }
  86 };
  87 
  88 // Copy the write-version of the card-table into the read-version, clearing the
  89 // write-copy.
  90 class ShenandoahMergeWriteTable: public ShenandoahHeapRegionClosure {
  91  private:
  92   ShenandoahHeap* _heap;
  93   RememberedScanner* _scanner;
  94  public:
  95   ShenandoahMergeWriteTable() : _heap(ShenandoahHeap::heap()), _scanner(_heap->card_scan()) {}
  96 
  97   virtual void heap_region_do(ShenandoahHeapRegion* r) override {
  98     assert(r->is_old(), "Don't waste time doing this for non-old regions");
  99     _scanner->merge_write_table(r->bottom(), ShenandoahHeapRegion::region_size_words());
 100   }
 101 
 102   virtual bool is_thread_safe() override {
 103     return true;
 104   }
 105 };
 106 
 107 class ShenandoahSquirrelAwayCardTable: public ShenandoahHeapRegionClosure {
 108  private:
 109   ShenandoahHeap* _heap;
 110   RememberedScanner* _scanner;
 111  public:
 112   ShenandoahSquirrelAwayCardTable() :
 113     _heap(ShenandoahHeap::heap()),
 114     _scanner(_heap->card_scan()) {}
 115 
 116   void heap_region_do(ShenandoahHeapRegion* region) {
 117     assert(region->is_old(), "Don't waste time doing this for non-old regions");
 118     _scanner->reset_remset(region->bottom(), ShenandoahHeapRegion::region_size_words());
 119   }
 120 
 121   bool is_thread_safe() { return true; }
 122 };
 123 
 124 void ShenandoahGeneration::confirm_heuristics_mode() {
 125   if (_heuristics->is_diagnostic() && !UnlockDiagnosticVMOptions) {
 126     vm_exit_during_initialization(
 127             err_msg("Heuristics \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
 128                     _heuristics->name()));
 129   }
 130   if (_heuristics->is_experimental() && !UnlockExperimentalVMOptions) {
 131     vm_exit_during_initialization(
 132             err_msg("Heuristics \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
 133                     _heuristics->name()));
 134   }
 135 }
 136 
 137 ShenandoahHeuristics* ShenandoahGeneration::initialize_heuristics(ShenandoahMode* gc_mode) {
 138   _heuristics = gc_mode->initialize_heuristics(this);
 139   _heuristics->set_guaranteed_gc_interval(ShenandoahGuaranteedGCInterval);
 140   confirm_heuristics_mode();
 141   return _heuristics;
 142 }
 143 
 144 size_t ShenandoahGeneration::bytes_allocated_since_gc_start() const {
 145   return Atomic::load(&_bytes_allocated_since_gc_start);
 146 }
 147 
 148 void ShenandoahGeneration::reset_bytes_allocated_since_gc_start() {
 149   Atomic::store(&_bytes_allocated_since_gc_start, (size_t)0);
 150 }
 151 
 152 void ShenandoahGeneration::increase_allocated(size_t bytes) {
 153   Atomic::add(&_bytes_allocated_since_gc_start, bytes, memory_order_relaxed);
 154 }
 155 
 156 void ShenandoahGeneration::set_evacuation_reserve(size_t new_val) {
 157   _evacuation_reserve = new_val;
 158 }
 159 
 160 size_t ShenandoahGeneration::get_evacuation_reserve() const {
 161   return _evacuation_reserve;
 162 }
 163 
 164 void ShenandoahGeneration::augment_evacuation_reserve(size_t increment) {
 165   _evacuation_reserve += increment;
 166 }
 167 
 168 void ShenandoahGeneration::log_status(const char *msg) const {
 169   typedef LogTarget(Info, gc, ergo) LogGcInfo;
 170 
 171   if (!LogGcInfo::is_enabled()) {
 172     return;
 173   }
 174 
 175   // Not under a lock here, so read each of these once to make sure
 176   // byte size in proper unit and proper unit for byte size are consistent.
 177   size_t v_used = used();
 178   size_t v_used_regions = used_regions_size();
 179   size_t v_soft_max_capacity = soft_max_capacity();
 180   size_t v_max_capacity = max_capacity();
 181   size_t v_available = available();
 182   size_t v_humongous_waste = get_humongous_waste();
 183   LogGcInfo::print("%s: %s generation used: " SIZE_FORMAT "%s, used regions: " SIZE_FORMAT "%s, "
 184                    "humongous waste: " SIZE_FORMAT "%s, soft capacity: " SIZE_FORMAT "%s, max capacity: " SIZE_FORMAT "%s, "
 185                    "available: " SIZE_FORMAT "%s", msg, name(),
 186                    byte_size_in_proper_unit(v_used),              proper_unit_for_byte_size(v_used),
 187                    byte_size_in_proper_unit(v_used_regions),      proper_unit_for_byte_size(v_used_regions),
 188                    byte_size_in_proper_unit(v_humongous_waste),   proper_unit_for_byte_size(v_humongous_waste),
 189                    byte_size_in_proper_unit(v_soft_max_capacity), proper_unit_for_byte_size(v_soft_max_capacity),
 190                    byte_size_in_proper_unit(v_max_capacity),      proper_unit_for_byte_size(v_max_capacity),
 191                    byte_size_in_proper_unit(v_available),         proper_unit_for_byte_size(v_available));
 192 }
 193 
 194 void ShenandoahGeneration::reset_mark_bitmap() {
 195   ShenandoahHeap* heap = ShenandoahHeap::heap();
 196   heap->assert_gc_workers(heap->workers()->active_workers());
 197 
 198   set_mark_incomplete();
 199 
 200   ShenandoahResetBitmapTask task;
 201   parallel_heap_region_iterate(&task);
 202 }
 203 
 204 // The ideal is to swap the remembered set so the safepoint effort is no more than a few pointer manipulations.
 205 // However, limitations in the implementation of the mutator write-barrier make it difficult to simply change the
 206 // location of the card table.  So the interim implementation of swap_remembered_set will copy the write-table
 207 // onto the read-table and will then clear the write-table.
 208 void ShenandoahGeneration::swap_remembered_set() {
 209   // Must be sure that marking is complete before we swap remembered set.
 210   ShenandoahHeap* heap = ShenandoahHeap::heap();
 211   heap->assert_gc_workers(heap->workers()->active_workers());
 212   shenandoah_assert_safepoint();
 213 
 214   // TODO: Eventually, we want replace this with a constant-time exchange of pointers.
 215   ShenandoahSquirrelAwayCardTable task;
 216   heap->old_generation()->parallel_heap_region_iterate(&task);
 217 }
 218 
 219 // Copy the write-version of the card-table into the read-version, clearing the
 220 // write-version. The work is done at a safepoint and in parallel by the GC
 221 // worker threads.
 222 void ShenandoahGeneration::merge_write_table() {
 223   // This should only happen for degenerated cycles
 224   ShenandoahHeap* heap = ShenandoahHeap::heap();
 225   heap->assert_gc_workers(heap->workers()->active_workers());
 226   shenandoah_assert_safepoint();
 227 
 228   ShenandoahMergeWriteTable task;
 229   heap->old_generation()->parallel_heap_region_iterate(&task);
 230 }
 231 
 232 void ShenandoahGeneration::prepare_gc() {
 233   // Invalidate the marking context
 234   set_mark_incomplete();
 235 
 236   // Capture Top At Mark Start for this generation (typically young) and reset mark bitmap.
 237   ShenandoahResetUpdateRegionStateClosure cl;
 238   parallel_region_iterate_free(&cl);
 239 }
 240 
 241 void ShenandoahGeneration::parallel_region_iterate_free(ShenandoahHeapRegionClosure* cl) {
 242   ShenandoahHeap::heap()->parallel_heap_region_iterate(cl);
 243 }
 244 
 245 void ShenandoahGeneration::compute_evacuation_budgets(ShenandoahHeap* const heap) {
 246 
 247   ShenandoahOldGeneration* const old_generation = heap->old_generation();
 248   ShenandoahYoungGeneration* const young_generation = heap->young_generation();
 249 
 250   // During initialization and phase changes, it is more likely that fewer objects die young and old-gen
 251   // memory is not yet full (or is in the process of being replaced).  During these times especially, it
 252   // is beneficial to loan memory from old-gen to young-gen during the evacuation and update-refs phases
 253   // of execution.
 254 
 255   // Calculate EvacuationReserve before PromotionReserve.  Evacuation is more critical than promotion.
 256   // If we cannot evacuate old-gen, we will not be able to reclaim old-gen memory.  Promotions are less
 257   // critical.  If we cannot promote, there may be degradation of young-gen memory because old objects
 258   // accumulate there until they can be promoted.  This increases the young-gen marking and evacuation work.
 259 
 260   // First priority is to reclaim the easy garbage out of young-gen.
 261 
 262   // maximum_young_evacuation_reserve is upper bound on memory to be evacuated out of young
 263   const size_t maximum_young_evacuation_reserve = (young_generation->max_capacity() * ShenandoahEvacReserve) / 100;
 264   const size_t young_evacuation_reserve = MIN2(maximum_young_evacuation_reserve, young_generation->available_with_reserve());
 265 
 266   // maximum_old_evacuation_reserve is an upper bound on memory evacuated from old and evacuated to old (promoted),
 267   // clamped by the old generation space available.
 268   //
 269   // Here's the algebra.
 270   // Let SOEP = ShenandoahOldEvacRatioPercent,
 271   //     OE = old evac,
 272   //     YE = young evac, and
 273   //     TE = total evac = OE + YE
 274   // By definition:
 275   //            SOEP/100 = OE/TE
 276   //                     = OE/(OE+YE)
 277   //  => SOEP/(100-SOEP) = OE/((OE+YE)-OE)         // componendo-dividendo: If a/b = c/d, then a/(b-a) = c/(d-c)
 278   //                     = OE/YE
 279   //  =>              OE = YE*SOEP/(100-SOEP)
 280 
 281   // We have to be careful in the event that SOEP is set to 100 by the user.
 282   assert(ShenandoahOldEvacRatioPercent <= 100, "Error");
 283   const size_t old_available = old_generation->available();
 284   const size_t maximum_old_evacuation_reserve = (ShenandoahOldEvacRatioPercent == 100) ?
 285     old_available : MIN2((maximum_young_evacuation_reserve * ShenandoahOldEvacRatioPercent) / (100 - ShenandoahOldEvacRatioPercent),
 286                           old_available);
 287 
 288 
 289   // Second priority is to reclaim garbage out of old-gen if there are old-gen collection candidates.  Third priority
 290   // is to promote as much as we have room to promote.  However, if old-gen memory is in short supply, this means young
 291   // GC is operating under "duress" and was unable to transfer the memory that we would normally expect.  In this case,
 292   // old-gen will refrain from compacting itself in order to allow a quicker young-gen cycle (by avoiding the update-refs
 293   // through ALL of old-gen).  If there is some memory available in old-gen, we will use this for promotions as promotions
 294   // do not add to the update-refs burden of GC.
 295 
 296   size_t old_evacuation_reserve, old_promo_reserve;
 297   if (is_global()) {
 298     // Global GC is typically triggered by user invocation of System.gc(), and typically indicates that there is lots
 299     // of garbage to be reclaimed because we are starting a new phase of execution.  Marking for global GC may take
 300     // significantly longer than typical young marking because we must mark through all old objects.  To expedite
 301     // evacuation and update-refs, we give emphasis to reclaiming garbage first, wherever that garbage is found.
 302     // Global GC will adjust generation sizes to accommodate the collection set it chooses.
 303 
 304     // Set old_promo_reserve to enforce that no regions are preselected for promotion.  Such regions typically
 305     // have relatively high memory utilization.  We still call select_aged_regions() because this will prepare for
 306     // promotions in place, if relevant.
 307     old_promo_reserve = 0;
 308 
 309     // Dedicate all available old memory to old_evacuation reserve.  This may be small, because old-gen is only
 310     // expanded based on an existing mixed evacuation workload at the end of the previous GC cycle.  We'll expand
 311     // the budget for evacuation of old during GLOBAL cset selection.
 312     old_evacuation_reserve = maximum_old_evacuation_reserve;
 313   } else if (old_generation->has_unprocessed_collection_candidates()) {
 314     // We reserved all old-gen memory at end of previous GC to hold anticipated evacuations to old-gen.  If this is
 315     // mixed evacuation, reserve all of this memory for compaction of old-gen and do not promote.  Prioritize compaction
 316     // over promotion in order to defragment OLD so that it will be better prepared to efficiently receive promoted memory.
 317     old_evacuation_reserve = maximum_old_evacuation_reserve;
 318     old_promo_reserve = 0;
 319   } else {
 320     // Make all old-evacuation memory for promotion, but if we can't use it all for promotion, we'll allow some evacuation.
 321     old_evacuation_reserve = 0;
 322     old_promo_reserve = maximum_old_evacuation_reserve;
 323   }
 324   assert(old_evacuation_reserve <= old_available, "Error");
 325 
 326   // We see too many old-evacuation failures if we force ourselves to evacuate into regions that are not initially empty.
 327   // So we limit the old-evacuation reserve to unfragmented memory.  Even so, old-evacuation is free to fill in nooks and
 328   // crannies within existing partially used regions and it generally tries to do so.
 329   const size_t old_free_unfragmented = old_generation->free_unaffiliated_regions() * ShenandoahHeapRegion::region_size_bytes();
 330   if (old_evacuation_reserve > old_free_unfragmented) {
 331     const size_t delta = old_evacuation_reserve - old_free_unfragmented;
 332     old_evacuation_reserve -= delta;
 333     // Let promo consume fragments of old-gen memory if not global
 334     if (!is_global()) {
 335       old_promo_reserve += delta;
 336     }
 337   }
 338 
 339   // Preselect regions for promotion by evacuation (obtaining the live data to seed promoted_reserve),
 340   // and identify regions that will promote in place. These use the tenuring threshold.
 341   const size_t consumed_by_advance_promotion = select_aged_regions(old_promo_reserve);
 342   assert(consumed_by_advance_promotion <= maximum_old_evacuation_reserve, "Cannot promote more than available old-gen memory");
 343 
 344   // Note that unused old_promo_reserve might not be entirely consumed_by_advance_promotion.  Do not transfer this
 345   // to old_evacuation_reserve because this memory is likely very fragmented, and we do not want to increase the likelihood
 346   // of old evacuation failure.
 347   young_generation->set_evacuation_reserve(young_evacuation_reserve);
 348   old_generation->set_evacuation_reserve(old_evacuation_reserve);
 349   old_generation->set_promoted_reserve(consumed_by_advance_promotion);
 350 
 351   // There is no need to expand OLD because all memory used here was set aside at end of previous GC, except in the
 352   // case of a GLOBAL gc.  During choose_collection_set() of GLOBAL, old will be expanded on demand.
 353 }
 354 
 355 // Having chosen the collection set, adjust the budgets for generational mode based on its composition.  Note
 356 // that young_generation->available() now knows about recently discovered immediate garbage.
 357 //
 358 void ShenandoahGeneration::adjust_evacuation_budgets(ShenandoahHeap* const heap, ShenandoahCollectionSet* const collection_set) {
 359   // We may find that old_evacuation_reserve and/or loaned_for_young_evacuation are not fully consumed, in which case we may
 360   //  be able to increase regions_available_to_loan
 361 
 362   // The role of adjust_evacuation_budgets() is to compute the correct value of regions_available_to_loan and to make
 363   // effective use of this memory, including the remnant memory within these regions that may result from rounding loan to
 364   // integral number of regions.  Excess memory that is available to be loaned is applied to an allocation supplement,
 365   // which allows mutators to allocate memory beyond the current capacity of young-gen on the promise that the loan
 366   // will be repaid as soon as we finish updating references for the recently evacuated collection set.
 367 
 368   // We cannot recalculate regions_available_to_loan by simply dividing old_generation->available() by region_size_bytes
 369   // because the available memory may be distributed between many partially occupied regions that are already holding old-gen
 370   // objects.  Memory in partially occupied regions is not "available" to be loaned.  Note that an increase in old-gen
 371   // available that results from a decrease in memory consumed by old evacuation is not necessarily available to be loaned
 372   // to young-gen.
 373 
 374   size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
 375   ShenandoahOldGeneration* const old_generation = heap->old_generation();
 376   ShenandoahYoungGeneration* const young_generation = heap->young_generation();
 377 
 378   size_t old_evacuated = collection_set->get_old_bytes_reserved_for_evacuation();
 379   size_t old_evacuated_committed = (size_t) (ShenandoahOldEvacWaste * old_evacuated);
 380   size_t old_evacuation_reserve = old_generation->get_evacuation_reserve();
 381 
 382   if (old_evacuated_committed > old_evacuation_reserve) {
 383     // This should only happen due to round-off errors when enforcing ShenandoahOldEvacWaste
 384     assert(old_evacuated_committed <= (33 * old_evacuation_reserve) / 32,
 385            "Round-off errors should be less than 3.125%%, committed: " SIZE_FORMAT ", reserved: " SIZE_FORMAT,
 386            old_evacuated_committed, old_evacuation_reserve);
 387     old_evacuated_committed = old_evacuation_reserve;
 388     // Leave old_evac_reserve as previously configured
 389   } else if (old_evacuated_committed < old_evacuation_reserve) {
 390     // This happens if the old-gen collection consumes less than full budget.
 391     old_evacuation_reserve = old_evacuated_committed;
 392     old_generation->set_evacuation_reserve(old_evacuation_reserve);
 393   }
 394 
 395   size_t young_advance_promoted = collection_set->get_young_bytes_to_be_promoted();
 396   size_t young_advance_promoted_reserve_used = (size_t) (ShenandoahPromoEvacWaste * young_advance_promoted);
 397 
 398   size_t young_evacuated = collection_set->get_young_bytes_reserved_for_evacuation();
 399   size_t young_evacuated_reserve_used = (size_t) (ShenandoahEvacWaste * young_evacuated);
 400 
 401   size_t total_young_available = young_generation->available_with_reserve();
 402   assert(young_evacuated_reserve_used <= total_young_available, "Cannot evacuate more than is available in young");
 403   young_generation->set_evacuation_reserve(young_evacuated_reserve_used);
 404 
 405   size_t old_available = old_generation->available();
 406   // Now that we've established the collection set, we know how much memory is really required by old-gen for evacuation
 407   // and promotion reserves.  Try shrinking OLD now in case that gives us a bit more runway for mutator allocations during
 408   // evac and update phases.
 409   size_t old_consumed = old_evacuated_committed + young_advance_promoted_reserve_used;
 410 
 411   if (old_available < old_consumed) {
 412     // This can happen due to round-off errors when adding the results of truncated integer arithmetic.
 413     // We've already truncated old_evacuated_committed.  Truncate young_advance_promoted_reserve_used here.
 414     assert(young_advance_promoted_reserve_used <= (33 * (old_available - old_evacuated_committed)) / 32,
 415            "Round-off errors should be less than 3.125%%, committed: " SIZE_FORMAT ", reserved: " SIZE_FORMAT,
 416            young_advance_promoted_reserve_used, old_available - old_evacuated_committed);
 417     young_advance_promoted_reserve_used = old_available - old_evacuated_committed;
 418     old_consumed = old_evacuated_committed + young_advance_promoted_reserve_used;
 419   }
 420 
 421   assert(old_available >= old_consumed, "Cannot consume (" SIZE_FORMAT ") more than is available (" SIZE_FORMAT ")",
 422          old_consumed, old_available);
 423   size_t excess_old = old_available - old_consumed;
 424   size_t unaffiliated_old_regions = old_generation->free_unaffiliated_regions();
 425   size_t unaffiliated_old = unaffiliated_old_regions * region_size_bytes;
 426   assert(old_available >= unaffiliated_old, "Unaffiliated old is a subset of old available");
 427 
 428   // Make sure old_evac_committed is unaffiliated
 429   if (old_evacuated_committed > 0) {
 430     if (unaffiliated_old > old_evacuated_committed) {
 431       size_t giveaway = unaffiliated_old - old_evacuated_committed;
 432       size_t giveaway_regions = giveaway / region_size_bytes;  // round down
 433       if (giveaway_regions > 0) {
 434         excess_old = MIN2(excess_old, giveaway_regions * region_size_bytes);
 435       } else {
 436         excess_old = 0;
 437       }
 438     } else {
 439       excess_old = 0;
 440     }
 441   }
 442 
 443   // If we find that OLD has excess regions, give them back to YOUNG now to reduce likelihood we run out of allocation
 444   // runway during evacuation and update-refs.
 445   size_t regions_to_xfer = 0;
 446   if (excess_old > unaffiliated_old) {
 447     // we can give back unaffiliated_old (all of unaffiliated is excess)
 448     if (unaffiliated_old_regions > 0) {
 449       regions_to_xfer = unaffiliated_old_regions;
 450     }
 451   } else if (unaffiliated_old_regions > 0) {
 452     // excess_old < unaffiliated old: we can give back MIN(excess_old/region_size_bytes, unaffiliated_old_regions)
 453     size_t excess_regions = excess_old / region_size_bytes;
 454     size_t regions_to_xfer = MIN2(excess_regions, unaffiliated_old_regions);
 455   }
 456 
 457   if (regions_to_xfer > 0) {
 458     bool result = heap->generation_sizer()->transfer_to_young(regions_to_xfer);
 459     assert(excess_old > regions_to_xfer * region_size_bytes, "Cannot xfer more than excess old");
 460     excess_old -= regions_to_xfer * region_size_bytes;
 461     log_info(gc, ergo)("%s transferred " SIZE_FORMAT " excess regions to young before start of evacuation",
 462                        result? "Successfully": "Unsuccessfully", regions_to_xfer);
 463   }
 464 
 465   // Add in the excess_old memory to hold unanticipated promotions, if any.  If there are more unanticipated
 466   // promotions than fit in reserved memory, they will be deferred until a future GC pass.
 467   size_t total_promotion_reserve = young_advance_promoted_reserve_used + excess_old;
 468   old_generation->set_promoted_reserve(total_promotion_reserve);
 469   old_generation->reset_promoted_expended();
 470 }
 471 
 472 typedef struct {
 473   ShenandoahHeapRegion* _region;
 474   size_t _live_data;
 475 } AgedRegionData;
 476 
 477 static int compare_by_aged_live(AgedRegionData a, AgedRegionData b) {
 478   if (a._live_data < b._live_data)
 479     return -1;
 480   else if (a._live_data > b._live_data)
 481     return 1;
 482   else return 0;
 483 }
 484 
 485 inline void assert_no_in_place_promotions() {
 486 #ifdef ASSERT
 487   class ShenandoahNoInPlacePromotions : public ShenandoahHeapRegionClosure {
 488   public:
 489     void heap_region_do(ShenandoahHeapRegion *r) override {
 490       assert(r->get_top_before_promote() == nullptr,
 491              "Region " SIZE_FORMAT " should not be ready for in-place promotion", r->index());
 492     }
 493   } cl;
 494   ShenandoahHeap::heap()->heap_region_iterate(&cl);
 495 #endif
 496 }
 497 
 498 // Preselect for inclusion into the collection set regions whose age is at or above tenure age which contain more than
 499 // ShenandoahOldGarbageThreshold amounts of garbage.  We identify these regions by setting the appropriate entry of
 500 // the collection set's preselected regions array to true.  All entries are initialized to false before calling this
 501 // function.
 502 //
 503 // During the subsequent selection of the collection set, we give priority to these promotion set candidates.
 504 // Without this prioritization, we found that the aged regions tend to be ignored because they typically have
 505 // much less garbage and much more live data than the recently allocated "eden" regions.  When aged regions are
 506 // repeatedly excluded from the collection set, the amount of live memory within the young generation tends to
 507 // accumulate and this has the undesirable side effect of causing young-generation collections to require much more
 508 // CPU and wall-clock time.
 509 //
 510 // A second benefit of treating aged regions differently than other regions during collection set selection is
 511 // that this allows us to more accurately budget memory to hold the results of evacuation.  Memory for evacuation
 512 // of aged regions must be reserved in the old generation.  Memory for evacuation of all other regions must be
 513 // reserved in the young generation.
 514 size_t ShenandoahGeneration::select_aged_regions(size_t old_available) {
 515 
 516   // There should be no regions configured for subsequent in-place-promotions carried over from the previous cycle.
 517   assert_no_in_place_promotions();
 518 
 519   auto const heap = ShenandoahGenerationalHeap::heap();
 520   bool* const candidate_regions_for_promotion_by_copy = heap->collection_set()->preselected_regions();
 521   ShenandoahMarkingContext* const ctx = heap->marking_context();
 522 
 523   const uint tenuring_threshold = heap->age_census()->tenuring_threshold();
 524   const size_t old_garbage_threshold = (ShenandoahHeapRegion::region_size_bytes() * ShenandoahOldGarbageThreshold) / 100;
 525 
 526   size_t old_consumed = 0;
 527   size_t promo_potential = 0;
 528   size_t candidates = 0;
 529 
 530   // Tracks the padding of space above top in regions eligible for promotion in place
 531   size_t promote_in_place_pad = 0;
 532 
 533   // Sort the promotion-eligible regions in order of increasing live-data-bytes so that we can first reclaim regions that require
 534   // less evacuation effort.  This prioritizes garbage first, expanding the allocation pool early before we reclaim regions that
 535   // have more live data.
 536   const size_t num_regions = heap->num_regions();
 537 
 538   ResourceMark rm;
 539   AgedRegionData* sorted_regions = NEW_RESOURCE_ARRAY(AgedRegionData, num_regions);
 540 
 541   for (size_t i = 0; i < num_regions; i++) {
 542     ShenandoahHeapRegion* const r = heap->get_region(i);
 543     if (r->is_empty() || !r->has_live() || !r->is_young() || !r->is_regular()) {
 544       // skip over regions that aren't regular young with some live data
 545       continue;
 546     }
 547     if (r->age() >= tenuring_threshold) {
 548       if ((r->garbage() < old_garbage_threshold)) {
 549         // This tenure-worthy region has too little garbage, so we do not want to expend the copying effort to
 550         // reclaim the garbage; instead this region may be eligible for promotion-in-place to the
 551         // old generation.
 552         HeapWord* tams = ctx->top_at_mark_start(r);
 553         HeapWord* original_top = r->top();
 554         if (!heap->is_concurrent_old_mark_in_progress() && tams == original_top) {
 555           // No allocations from this region have been made during concurrent mark. It meets all the criteria
 556           // for in-place-promotion. Though we only need the value of top when we fill the end of the region,
 557           // we use this field to indicate that this region should be promoted in place during the evacuation
 558           // phase.
 559           r->save_top_before_promote();
 560 
 561           size_t remnant_size = r->free() / HeapWordSize;
 562           if (remnant_size > ShenandoahHeap::min_fill_size()) {
 563             ShenandoahHeap::fill_with_object(original_top, remnant_size);
 564             // Fill the remnant memory within this region to assure no allocations prior to promote in place.  Otherwise,
 565             // newly allocated objects will not be parsable when promote in place tries to register them.  Furthermore, any
 566             // new allocations would not necessarily be eligible for promotion.  This addresses both issues.
 567             r->set_top(r->end());
 568             promote_in_place_pad += remnant_size * HeapWordSize;
 569           } else {
 570             // Since the remnant is so small that it cannot be filled, we don't have to worry about any accidental
 571             // allocations occurring within this region before the region is promoted in place.
 572           }
 573         }
 574         // Else, we do not promote this region (either in place or by copy) because it has received new allocations.
 575 
 576         // During evacuation, we exclude from promotion regions for which age > tenure threshold, garbage < garbage-threshold,
 577         //  and get_top_before_promote() != tams
 578       } else {
 579         // Record this promotion-eligible candidate region. After sorting and selecting the best candidates below,
 580         // we may still decide to exclude this promotion-eligible region from the current collection set.  If this
 581         // happens, we will consider this region as part of the anticipated promotion potential for the next GC
 582         // pass; see further below.
 583         sorted_regions[candidates]._region = r;
 584         sorted_regions[candidates++]._live_data = r->get_live_data_bytes();
 585       }
 586     } else {
 587       // We only evacuate & promote objects from regular regions whose garbage() is above old-garbage-threshold.
 588       // Objects in tenure-worthy regions with less garbage are promoted in place. These take a different path to
 589       // old-gen.  Regions excluded from promotion because their garbage content is too low (causing us to anticipate that
 590       // the region would be promoted in place) may be eligible for evacuation promotion by the time promotion takes
 591       // place during a subsequent GC pass because more garbage is found within the region between now and then.  This
 592       // should not happen if we are properly adapting the tenure age.  The theory behind adaptive tenuring threshold
 593       // is to choose the youngest age that demonstrates no "significant" further loss of population since the previous
 594       // age.  If not this, we expect the tenure age to demonstrate linear population decay for at least two population
 595       // samples, whereas we expect to observe exponential population decay for ages younger than the tenure age.
 596       //
 597       // In the case that certain regions which were anticipated to be promoted in place need to be promoted by
 598       // evacuation, it may be the case that there is not sufficient reserve within old-gen to hold evacuation of
 599       // these regions.  The likely outcome is that these regions will not be selected for evacuation or promotion
 600       // in the current cycle and we will anticipate that they will be promoted in the next cycle.  This will cause
 601       // us to reserve more old-gen memory so that these objects can be promoted in the subsequent cycle.
 602       //
 603       // TODO:
 604       //   If we are auto-tuning the tenure age and regions that were anticipated to be promoted in place end up
 605       //   being promoted by evacuation, this event should feed into the tenure-age-selection heuristic so that
 606       //   the tenure age can be increased.
 607       if (heap->is_aging_cycle() && (r->age() + 1 == tenuring_threshold)) {
 608         if (r->garbage() >= old_garbage_threshold) {
 609           promo_potential += r->get_live_data_bytes();
 610         }
 611       }
 612     }
 613     // Note that we keep going even if one region is excluded from selection.
 614     // Subsequent regions may be selected if they have smaller live data.
 615   }
 616   // Sort in increasing order according to live data bytes.  Note that candidates represents the number of regions
 617   // that qualify to be promoted by evacuation.
 618   if (candidates > 0) {
 619     size_t selected_regions = 0;
 620     size_t selected_live = 0;
 621     QuickSort::sort<AgedRegionData>(sorted_regions, candidates, compare_by_aged_live, false);
 622     for (size_t i = 0; i < candidates; i++) {
 623       ShenandoahHeapRegion* const region = sorted_regions[i]._region;
 624       size_t region_live_data = sorted_regions[i]._live_data;
 625       size_t promotion_need = (size_t) (region_live_data * ShenandoahPromoEvacWaste);
 626       if (old_consumed + promotion_need <= old_available) {
 627         old_consumed += promotion_need;
 628         candidate_regions_for_promotion_by_copy[region->index()] = true;
 629         selected_regions++;
 630         selected_live += region_live_data;
 631       } else {
 632         // We rejected this promotable region from the collection set because we had no room to hold its copy.
 633         // Add this region to promo potential for next GC.
 634         promo_potential += region_live_data;
 635         assert(!candidate_regions_for_promotion_by_copy[region->index()], "Shouldn't be selected");
 636       }
 637       // We keep going even if one region is excluded from selection because we need to accumulate all eligible
 638       // regions that are not preselected into promo_potential
 639     }
 640     log_info(gc)("Preselected " SIZE_FORMAT " regions containing " SIZE_FORMAT " live bytes,"
 641                  " consuming: " SIZE_FORMAT " of budgeted: " SIZE_FORMAT,
 642                  selected_regions, selected_live, old_consumed, old_available);
 643   }
 644 
 645   heap->old_generation()->set_pad_for_promote_in_place(promote_in_place_pad);
 646   heap->old_generation()->set_promotion_potential(promo_potential);
 647   return old_consumed;
 648 }
 649 
 650 void ShenandoahGeneration::prepare_regions_and_collection_set(bool concurrent) {
 651   ShenandoahHeap* heap = ShenandoahHeap::heap();
 652   ShenandoahCollectionSet* collection_set = heap->collection_set();
 653   bool is_generational = heap->mode()->is_generational();
 654 
 655   assert(!heap->is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
 656   assert(!is_old(), "Only YOUNG and GLOBAL GC perform evacuations");
 657   {
 658     ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::final_update_region_states :
 659                             ShenandoahPhaseTimings::degen_gc_final_update_region_states);
 660     ShenandoahFinalMarkUpdateRegionStateClosure cl(complete_marking_context());
 661     parallel_heap_region_iterate(&cl);
 662 
 663     if (is_young()) {
 664       // We always need to update the watermark for old regions. If there
 665       // are mixed collections pending, we also need to synchronize the
 666       // pinned status for old regions. Since we are already visiting every
 667       // old region here, go ahead and sync the pin status too.
 668       ShenandoahFinalMarkUpdateRegionStateClosure old_cl(nullptr);
 669       heap->old_generation()->parallel_heap_region_iterate(&old_cl);
 670     }
 671   }
 672 
 673   // Tally the census counts and compute the adaptive tenuring threshold
 674   if (is_generational && ShenandoahGenerationalAdaptiveTenuring && !ShenandoahGenerationalCensusAtEvac) {
 675     // Objects above TAMS weren't included in the age census. Since they were all
 676     // allocated in this cycle they belong in the age 0 cohort. We walk over all
 677     // young regions and sum the volume of objects between TAMS and top.
 678     ShenandoahUpdateCensusZeroCohortClosure age0_cl(complete_marking_context());
 679     heap->young_generation()->heap_region_iterate(&age0_cl);
 680     size_t age0_pop = age0_cl.get_age0_population();
 681 
 682     // Update the global census, including the missed age 0 cohort above,
 683     // along with the census done during marking, and compute the tenuring threshold.
 684     heap->age_census()->update_census(age0_pop);
 685 #ifndef PRODUCT
 686     size_t total_pop = age0_cl.get_total_population();
 687     size_t total_census = heap->age_census()->get_total();
 688     // Usually total_pop > total_census, but not by too much.
 689     // We use integer division so anything up to just less than 2 is considered
 690     // reasonable, and the "+1" is to avoid divide-by-zero.
 691     assert((total_pop+1)/(total_census+1) ==  1, "Extreme divergence: "
 692            SIZE_FORMAT "/" SIZE_FORMAT, total_pop, total_census);
 693 #endif
 694   }
 695 
 696   {
 697     ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::choose_cset :
 698                             ShenandoahPhaseTimings::degen_gc_choose_cset);
 699 
 700     collection_set->clear();
 701     ShenandoahHeapLocker locker(heap->lock());
 702     if (is_generational) {
 703       // Seed the collection set with resource area-allocated
 704       // preselected regions, which are removed when we exit this scope.
 705       ResourceMark rm;
 706       ShenandoahCollectionSetPreselector preselector(collection_set, heap->num_regions());
 707 
 708       // TODO: young_available can include available (between top() and end()) within each young region that is not
 709       // part of the collection set.  Making this memory available to the young_evacuation_reserve allows a larger
 710       // young collection set to be chosen when available memory is under extreme pressure.  Implementing this "improvement"
 711       // is tricky, because the incremental construction of the collection set actually changes the amount of memory
 712       // available to hold evacuated young-gen objects.  As currently implemented, the memory that is available within
 713       // non-empty regions that are not selected as part of the collection set can be allocated by the mutator while
 714       // GC is evacuating and updating references.
 715 
 716       // Find the amount that will be promoted, regions that will be promoted in
 717       // place, and preselect older regions that will be promoted by evacuation.
 718       compute_evacuation_budgets(heap);
 719 
 720       // Choose the collection set, including the regions preselected above for
 721       // promotion into the old generation.
 722       _heuristics->choose_collection_set(collection_set);
 723       if (!collection_set->is_empty()) {
 724         // only make use of evacuation budgets when we are evacuating
 725         adjust_evacuation_budgets(heap, collection_set);
 726       }
 727 
 728       if (is_global()) {
 729         // We have just chosen a collection set for a global cycle. The mark bitmap covering old regions is complete, so
 730         // the remembered set scan can use that to avoid walking into garbage. When the next old mark begins, we will
 731         // use the mark bitmap to make the old regions parsable by coalescing and filling any unmarked objects. Thus,
 732         // we prepare for old collections by remembering which regions are old at this time. Note that any objects
 733         // promoted into old regions will be above TAMS, and so will be considered marked. However, free regions that
 734         // become old after this point will not be covered correctly by the mark bitmap, so we must be careful not to
 735         // coalesce those regions. Only the old regions which are not part of the collection set at this point are
 736         // eligible for coalescing. As implemented now, this has the side effect of possibly initiating mixed-evacuations
 737         // after a global cycle for old regions that were not included in this collection set.
 738         heap->old_generation()->prepare_for_mixed_collections_after_global_gc();
 739       }
 740     } else {
 741       _heuristics->choose_collection_set(collection_set);
 742     }
 743   }
 744 
 745 
 746   {
 747     ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::final_rebuild_freeset :
 748                             ShenandoahPhaseTimings::degen_gc_final_rebuild_freeset);
 749     ShenandoahHeapLocker locker(heap->lock());
 750     size_t young_cset_regions, old_cset_regions;
 751 
 752     // We are preparing for evacuation.  At this time, we ignore cset region tallies.
 753     size_t first_old, last_old, num_old;
 754     heap->free_set()->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old, last_old, num_old);
 755     // Free set construction uses reserve quantities, because they are known to be valid here
 756     heap->free_set()->rebuild(young_cset_regions, old_cset_regions, true);
 757   }
 758 }
 759 
 760 bool ShenandoahGeneration::is_bitmap_clear() {
 761   ShenandoahHeap* heap = ShenandoahHeap::heap();
 762   ShenandoahMarkingContext* context = heap->marking_context();
 763   const size_t num_regions = heap->num_regions();
 764   for (size_t idx = 0; idx < num_regions; idx++) {
 765     ShenandoahHeapRegion* r = heap->get_region(idx);
 766     if (contains(r) && r->is_affiliated()) {
 767       if (heap->is_bitmap_slice_committed(r) && (context->top_at_mark_start(r) > r->bottom()) &&
 768           !context->is_bitmap_clear_range(r->bottom(), r->end())) {
 769         return false;
 770       }
 771     }
 772   }
 773   return true;
 774 }
 775 
 776 bool ShenandoahGeneration::is_mark_complete() {
 777   return _is_marking_complete.is_set();
 778 }
 779 
 780 void ShenandoahGeneration::set_mark_complete() {
 781   _is_marking_complete.set();
 782 }
 783 
 784 void ShenandoahGeneration::set_mark_incomplete() {
 785   _is_marking_complete.unset();
 786 }
 787 
 788 ShenandoahMarkingContext* ShenandoahGeneration::complete_marking_context() {
 789   assert(is_mark_complete(), "Marking must be completed.");
 790   return ShenandoahHeap::heap()->marking_context();
 791 }
 792 
 793 void ShenandoahGeneration::cancel_marking() {
 794   log_info(gc)("Cancel marking: %s", name());
 795   if (is_concurrent_mark_in_progress()) {
 796     set_mark_incomplete();
 797   }
 798   _task_queues->clear();
 799   ref_processor()->abandon_partial_discovery();
 800   set_concurrent_mark_in_progress(false);
 801 }
 802 
 803 ShenandoahGeneration::ShenandoahGeneration(ShenandoahGenerationType type,
 804                                            uint max_workers,
 805                                            size_t max_capacity,
 806                                            size_t soft_max_capacity) :
 807   _type(type),
 808   _task_queues(new ShenandoahObjToScanQueueSet(max_workers)),
 809   _ref_processor(new ShenandoahReferenceProcessor(MAX2(max_workers, 1U))),
 810   _affiliated_region_count(0), _humongous_waste(0), _used(0), _bytes_allocated_since_gc_start(0),
 811   _max_capacity(max_capacity), _soft_max_capacity(soft_max_capacity),
 812   _heuristics(nullptr) {
 813   _is_marking_complete.set();
 814   assert(max_workers > 0, "At least one queue");
 815   for (uint i = 0; i < max_workers; ++i) {
 816     ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue();
 817     _task_queues->register_queue(i, task_queue);
 818   }
 819 }
 820 
 821 ShenandoahGeneration::~ShenandoahGeneration() {
 822   for (uint i = 0; i < _task_queues->size(); ++i) {
 823     ShenandoahObjToScanQueue* q = _task_queues->queue(i);
 824     delete q;
 825   }
 826   delete _task_queues;
 827 }
 828 
 829 void ShenandoahGeneration::reserve_task_queues(uint workers) {
 830   _task_queues->reserve(workers);
 831 }
 832 
 833 ShenandoahObjToScanQueueSet* ShenandoahGeneration::old_gen_task_queues() const {
 834   return nullptr;
 835 }
 836 
 837 void ShenandoahGeneration::scan_remembered_set(bool is_concurrent) {
 838   assert(is_young(), "Should only scan remembered set for young generation.");
 839 
 840   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 841   uint nworkers = heap->workers()->active_workers();
 842   reserve_task_queues(nworkers);
 843 
 844   ShenandoahReferenceProcessor* rp = ref_processor();
 845   ShenandoahRegionChunkIterator work_list(nworkers);
 846   ShenandoahScanRememberedTask task(task_queues(), old_gen_task_queues(), rp, &work_list, is_concurrent);
 847   heap->assert_gc_workers(nworkers);
 848   heap->workers()->run_task(&task);
 849   if (ShenandoahEnableCardStats) {
 850     assert(heap->card_scan() != nullptr, "Not generational");
 851     heap->card_scan()->log_card_stats(nworkers, CARD_STAT_SCAN_RS);
 852   }
 853 }
 854 
 855 size_t ShenandoahGeneration::increment_affiliated_region_count() {
 856   shenandoah_assert_heaplocked_or_fullgc_safepoint();
 857   // During full gc, multiple GC worker threads may change region affiliations without a lock.  No lock is enforced
 858   // on read and write of _affiliated_region_count.  At the end of full gc, a single thread overwrites the count with
 859   // a coherent value.
 860   _affiliated_region_count++;
 861   return _affiliated_region_count;
 862 }
 863 
 864 size_t ShenandoahGeneration::decrement_affiliated_region_count() {
 865   shenandoah_assert_heaplocked_or_fullgc_safepoint();
 866   // During full gc, multiple GC worker threads may change region affiliations without a lock.  No lock is enforced
 867   // on read and write of _affiliated_region_count.  At the end of full gc, a single thread overwrites the count with
 868   // a coherent value.
 869   _affiliated_region_count--;
 870   // TODO: REMOVE IS_GLOBAL() QUALIFIER AFTER WE FIX GLOBAL AFFILIATED REGION ACCOUNTING
 871   assert(is_global() || ShenandoahHeap::heap()->is_full_gc_in_progress() ||
 872          (_used + _humongous_waste <= _affiliated_region_count * ShenandoahHeapRegion::region_size_bytes()),
 873          "used + humongous cannot exceed regions");
 874   return _affiliated_region_count;
 875 }
 876 
 877 size_t ShenandoahGeneration::increase_affiliated_region_count(size_t delta) {
 878   shenandoah_assert_heaplocked_or_fullgc_safepoint();
 879   _affiliated_region_count += delta;
 880   return _affiliated_region_count;
 881 }
 882 
 883 size_t ShenandoahGeneration::decrease_affiliated_region_count(size_t delta) {
 884   shenandoah_assert_heaplocked_or_fullgc_safepoint();
 885   assert(_affiliated_region_count >= delta, "Affiliated region count cannot be negative");
 886 
 887   _affiliated_region_count -= delta;
 888   // TODO: REMOVE IS_GLOBAL() QUALIFIER AFTER WE FIX GLOBAL AFFILIATED REGION ACCOUNTING
 889   assert(is_global() || ShenandoahHeap::heap()->is_full_gc_in_progress() ||
 890          (_used + _humongous_waste <= _affiliated_region_count * ShenandoahHeapRegion::region_size_bytes()),
 891          "used + humongous cannot exceed regions");
 892   return _affiliated_region_count;
 893 }
 894 
 895 void ShenandoahGeneration::establish_usage(size_t num_regions, size_t num_bytes, size_t humongous_waste) {
 896   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at a safepoint");
 897   _affiliated_region_count = num_regions;
 898   _used = num_bytes;
 899   _humongous_waste = humongous_waste;
 900 }
 901 
 902 void ShenandoahGeneration::increase_used(size_t bytes) {
 903   Atomic::add(&_used, bytes);
 904 }
 905 
 906 void ShenandoahGeneration::increase_humongous_waste(size_t bytes) {
 907   if (bytes > 0) {
 908     Atomic::add(&_humongous_waste, bytes);
 909   }
 910 }
 911 
 912 void ShenandoahGeneration::decrease_humongous_waste(size_t bytes) {
 913   if (bytes > 0) {
 914     assert(ShenandoahHeap::heap()->is_full_gc_in_progress() || (_humongous_waste >= bytes),
 915            "Waste (" SIZE_FORMAT ") cannot be negative (after subtracting " SIZE_FORMAT ")", _humongous_waste, bytes);
 916     Atomic::sub(&_humongous_waste, bytes);
 917   }
 918 }
 919 
 920 void ShenandoahGeneration::decrease_used(size_t bytes) {
 921   // TODO: REMOVE IS_GLOBAL() QUALIFIER AFTER WE FIX GLOBAL AFFILIATED REGION ACCOUNTING
 922   assert(is_global() || ShenandoahHeap::heap()->is_full_gc_in_progress() ||
 923          (_used >= bytes), "cannot reduce bytes used by generation below zero");
 924   Atomic::sub(&_used, bytes);
 925 }
 926 
 927 size_t ShenandoahGeneration::used_regions() const {
 928   return _affiliated_region_count;
 929 }
 930 
 931 size_t ShenandoahGeneration::free_unaffiliated_regions() const {
 932   size_t result = max_capacity() / ShenandoahHeapRegion::region_size_bytes();
 933   if (_affiliated_region_count > result) {
 934     result = 0;
 935   } else {
 936     result -= _affiliated_region_count;
 937   }
 938   return result;
 939 }
 940 
 941 size_t ShenandoahGeneration::used_regions_size() const {
 942   return _affiliated_region_count * ShenandoahHeapRegion::region_size_bytes();
 943 }
 944 
 945 size_t ShenandoahGeneration::available() const {
 946   return available(max_capacity());
 947 }
 948 
 949 // For ShenandoahYoungGeneration, Include the young available that may have been reserved for the Collector.
 950 size_t ShenandoahGeneration::available_with_reserve() const {
 951   return available(max_capacity());
 952 }
 953 
 954 size_t ShenandoahGeneration::soft_available() const {
 955   return available(soft_max_capacity());
 956 }
 957 
 958 size_t ShenandoahGeneration::available(size_t capacity) const {
 959   size_t in_use = used() + get_humongous_waste();
 960   return in_use > capacity ? 0 : capacity - in_use;
 961 }
 962 
 963 void ShenandoahGeneration::increase_capacity(size_t increment) {
 964   shenandoah_assert_heaplocked_or_safepoint();
 965 
 966   // We do not enforce that new capacity >= heap->max_size_for(this).  The maximum generation size is treated as a rule of thumb
 967   // which may be violated during certain transitions, such as when we are forcing transfers for the purpose of promoting regions
 968   // in place.
 969   // TODO: REMOVE IS_GLOBAL() QUALIFIER AFTER WE FIX GLOBAL AFFILIATED REGION ACCOUNTING
 970   assert(is_global() || ShenandoahHeap::heap()->is_full_gc_in_progress() ||
 971          (_max_capacity + increment <= ShenandoahHeap::heap()->max_capacity()), "Generation cannot be larger than heap size");
 972   assert(increment % ShenandoahHeapRegion::region_size_bytes() == 0, "Generation capacity must be multiple of region size");
 973   _max_capacity += increment;
 974 
 975   // This detects arithmetic wraparound on _used
 976   // TODO: REMOVE IS_GLOBAL() QUALIFIER AFTER WE FIX GLOBAL AFFILIATED REGION ACCOUNTING
 977   assert(is_global() || ShenandoahHeap::heap()->is_full_gc_in_progress() ||
 978          (_affiliated_region_count * ShenandoahHeapRegion::region_size_bytes() >= _used),
 979          "Affiliated regions must hold more than what is currently used");
 980 }
 981 
 982 void ShenandoahGeneration::decrease_capacity(size_t decrement) {
 983   shenandoah_assert_heaplocked_or_safepoint();
 984 
 985   // We do not enforce that new capacity >= heap->min_size_for(this).  The minimum generation size is treated as a rule of thumb
 986   // which may be violated during certain transitions, such as when we are forcing transfers for the purpose of promoting regions
 987   // in place.
 988   assert(decrement % ShenandoahHeapRegion::region_size_bytes() == 0, "Generation capacity must be multiple of region size");
 989   assert(_max_capacity >= decrement, "Generation capacity cannot be negative");
 990 
 991   _max_capacity -= decrement;
 992 
 993   // This detects arithmetic wraparound on _used
 994   // TODO: REMOVE IS_GLOBAL() QUALIFIER AFTER WE FIX GLOBAL AFFILIATED REGION ACCOUNTING
 995   assert(is_global() || ShenandoahHeap::heap()->is_full_gc_in_progress() ||
 996          (_affiliated_region_count * ShenandoahHeapRegion::region_size_bytes() >= _used),
 997          "Affiliated regions must hold more than what is currently used");
 998   // TODO: REMOVE IS_GLOBAL() QUALIFIER AFTER WE FIX GLOBAL AFFILIATED REGION ACCOUNTING
 999   assert(is_global() || ShenandoahHeap::heap()->is_full_gc_in_progress() ||
1000          (_used <= _max_capacity), "Cannot use more than capacity");
1001   // TODO: REMOVE IS_GLOBAL() QUALIFIER AFTER WE FIX GLOBAL AFFILIATED REGION ACCOUNTING
1002   assert(is_global() || ShenandoahHeap::heap()->is_full_gc_in_progress() ||
1003          (_affiliated_region_count * ShenandoahHeapRegion::region_size_bytes() <= _max_capacity),
1004          "Cannot use more than capacity");
1005 }
1006 
1007 void ShenandoahGeneration::record_success_concurrent(bool abbreviated) {
1008   heuristics()->record_success_concurrent(abbreviated);
1009   ShenandoahHeap::heap()->shenandoah_policy()->record_success_concurrent(is_young(), abbreviated);
1010 }