1 /*
   2  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  27 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  28 #include "gc/shenandoah/shenandoahGeneration.hpp"
  29 #include "gc/shenandoah/shenandoahHeap.hpp"
  30 #include "gc/shenandoah/shenandoahMarkClosures.hpp"
  31 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  32 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
  33 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
  34 #include "gc/shenandoah/shenandoahTaskqueue.inline.hpp"
  35 #include "gc/shenandoah/shenandoahUtils.hpp"
  36 #include "gc/shenandoah/shenandoahVerifier.hpp"
  37 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
  38 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
  39 
  40 #include "utilities/quickSort.hpp"
  41 
  42 class ShenandoahResetUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
  43  private:
  44   ShenandoahHeap* _heap;
  45   ShenandoahMarkingContext* const _ctx;
  46  public:
  47   ShenandoahResetUpdateRegionStateClosure() :
  48     _heap(ShenandoahHeap::heap()),
  49     _ctx(_heap->marking_context()) {}
  50 
  51   void heap_region_do(ShenandoahHeapRegion* r) override {
  52     if (_heap->is_bitmap_slice_committed(r)) {
  53       _ctx->clear_bitmap(r);
  54     }
  55 
  56     if (r->is_active()) {
  57       // Reset live data and set TAMS optimistically. We would recheck these under the pause
  58       // anyway to capture any updates that happened since now.
  59       _ctx->capture_top_at_mark_start(r);
  60       r->clear_live_data();
  61     }
  62   }
  63 
  64   bool is_thread_safe() override { return true; }
  65 };
  66 
  67 class ShenandoahResetBitmapTask : public ShenandoahHeapRegionClosure {
  68  private:
  69   ShenandoahHeap* _heap;
  70   ShenandoahMarkingContext* const _ctx;
  71  public:
  72   ShenandoahResetBitmapTask() :
  73     _heap(ShenandoahHeap::heap()),
  74     _ctx(_heap->marking_context()) {}
  75 
  76   void heap_region_do(ShenandoahHeapRegion* region) {
  77     if (_heap->is_bitmap_slice_committed(region)) {
  78       _ctx->clear_bitmap(region);
  79     }
  80   }
  81 
  82   bool is_thread_safe() { return true; }
  83 };
  84 
  85 class ShenandoahMergeWriteTable: public ShenandoahHeapRegionClosure {
  86  private:
  87   ShenandoahHeap* _heap;
  88   RememberedScanner* _scanner;
  89  public:
  90   ShenandoahMergeWriteTable() : _heap(ShenandoahHeap::heap()), _scanner(_heap->card_scan()) {}
  91 
  92   virtual void heap_region_do(ShenandoahHeapRegion* r) override {
  93     if (r->is_old()) {
  94       _scanner->merge_write_table(r->bottom(), ShenandoahHeapRegion::region_size_words());
  95     }
  96   }
  97 
  98   virtual bool is_thread_safe() override {
  99     return true;
 100   }
 101 };
 102 
 103 class ShenandoahSquirrelAwayCardTable: public ShenandoahHeapRegionClosure {
 104  private:
 105   ShenandoahHeap* _heap;
 106   RememberedScanner* _scanner;
 107  public:
 108   ShenandoahSquirrelAwayCardTable() :
 109     _heap(ShenandoahHeap::heap()),
 110     _scanner(_heap->card_scan()) {}
 111 
 112   void heap_region_do(ShenandoahHeapRegion* region) {
 113     if (region->is_old()) {
 114       _scanner->reset_remset(region->bottom(), ShenandoahHeapRegion::region_size_words());
 115     }
 116   }
 117 
 118   bool is_thread_safe() { return true; }
 119 };
 120 
 121 void ShenandoahGeneration::confirm_heuristics_mode() {
 122   if (_heuristics->is_diagnostic() && !UnlockDiagnosticVMOptions) {
 123     vm_exit_during_initialization(
 124             err_msg("Heuristics \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
 125                     _heuristics->name()));
 126   }
 127   if (_heuristics->is_experimental() && !UnlockExperimentalVMOptions) {
 128     vm_exit_during_initialization(
 129             err_msg("Heuristics \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
 130                     _heuristics->name()));
 131   }
 132 }
 133 
 134 ShenandoahHeuristics* ShenandoahGeneration::initialize_heuristics(ShenandoahMode* gc_mode) {
 135   _heuristics = gc_mode->initialize_heuristics(this);
 136   _heuristics->set_guaranteed_gc_interval(ShenandoahGuaranteedGCInterval);
 137   confirm_heuristics_mode();
 138   return _heuristics;
 139 }
 140 
 141 size_t ShenandoahGeneration::bytes_allocated_since_gc_start() const {
 142   return Atomic::load(&_bytes_allocated_since_gc_start);
 143 }
 144 
 145 void ShenandoahGeneration::reset_bytes_allocated_since_gc_start() {
 146   Atomic::store(&_bytes_allocated_since_gc_start, (size_t)0);
 147 }
 148 
 149 void ShenandoahGeneration::increase_allocated(size_t bytes) {
 150   Atomic::add(&_bytes_allocated_since_gc_start, bytes, memory_order_relaxed);
 151 }
 152 
 153 void ShenandoahGeneration::log_status(const char *msg) const {
 154   typedef LogTarget(Info, gc, ergo) LogGcInfo;
 155 
 156   if (!LogGcInfo::is_enabled()) {
 157     return;
 158   }
 159 
 160   // Not under a lock here, so read each of these once to make sure
 161   // byte size in proper unit and proper unit for byte size are consistent.
 162   size_t v_used = used();
 163   size_t v_used_regions = used_regions_size();
 164   size_t v_soft_max_capacity = soft_max_capacity();
 165   size_t v_max_capacity = max_capacity();
 166   size_t v_available = available();
 167   size_t v_humongous_waste = get_humongous_waste();
 168   LogGcInfo::print("%s: %s generation used: " SIZE_FORMAT "%s, used regions: " SIZE_FORMAT "%s, "
 169                    "humongous waste: " SIZE_FORMAT "%s, soft capacity: " SIZE_FORMAT "%s, max capacity: " SIZE_FORMAT "%s, "
 170                    "available: " SIZE_FORMAT "%s", msg, name(),
 171                    byte_size_in_proper_unit(v_used),              proper_unit_for_byte_size(v_used),
 172                    byte_size_in_proper_unit(v_used_regions),      proper_unit_for_byte_size(v_used_regions),
 173                    byte_size_in_proper_unit(v_humongous_waste),   proper_unit_for_byte_size(v_humongous_waste),
 174                    byte_size_in_proper_unit(v_soft_max_capacity), proper_unit_for_byte_size(v_soft_max_capacity),
 175                    byte_size_in_proper_unit(v_max_capacity),      proper_unit_for_byte_size(v_max_capacity),
 176                    byte_size_in_proper_unit(v_available),         proper_unit_for_byte_size(v_available));
 177 }
 178 
 179 void ShenandoahGeneration::reset_mark_bitmap() {
 180   ShenandoahHeap* heap = ShenandoahHeap::heap();
 181   heap->assert_gc_workers(heap->workers()->active_workers());
 182 
 183   set_mark_incomplete();
 184 
 185   ShenandoahResetBitmapTask task;
 186   parallel_heap_region_iterate(&task);
 187 }
 188 
 189 // The ideal is to swap the remembered set so the safepoint effort is no more than a few pointer manipulations.
 190 // However, limitations in the implementation of the mutator write-barrier make it difficult to simply change the
 191 // location of the card table.  So the interim implementation of swap_remembered_set will copy the write-table
 192 // onto the read-table and will then clear the write-table.
 193 void ShenandoahGeneration::swap_remembered_set() {
 194   // Must be sure that marking is complete before we swap remembered set.
 195   ShenandoahHeap* heap = ShenandoahHeap::heap();
 196   heap->assert_gc_workers(heap->workers()->active_workers());
 197   shenandoah_assert_safepoint();
 198 
 199   // TODO: Eventually, we want replace this with a constant-time exchange of pointers.
 200   ShenandoahSquirrelAwayCardTable task;
 201   heap->old_generation()->parallel_heap_region_iterate(&task);
 202 }
 203 
 204 // If a concurrent cycle fails _after_ the card table has been swapped we need to update the read card
 205 // table with any writes that have occurred during the transition to the degenerated cycle. Without this,
 206 // newly created objects which are only referenced by old objects could be lost when the remembered set
 207 // is scanned during the degenerated mark.
 208 void ShenandoahGeneration::merge_write_table() {
 209   // This should only happen for degenerated cycles
 210   ShenandoahHeap* heap = ShenandoahHeap::heap();
 211   heap->assert_gc_workers(heap->workers()->active_workers());
 212   shenandoah_assert_safepoint();
 213 
 214   ShenandoahMergeWriteTable task;
 215   heap->old_generation()->parallel_heap_region_iterate(&task);
 216 }
 217 
 218 void ShenandoahGeneration::prepare_gc() {
 219   // Invalidate the marking context
 220   set_mark_incomplete();
 221 
 222   // Capture Top At Mark Start for this generation (typically young) and reset mark bitmap.
 223   ShenandoahResetUpdateRegionStateClosure cl;
 224   parallel_heap_region_iterate(&cl);
 225 }
 226 
 227 void ShenandoahGeneration::compute_evacuation_budgets(ShenandoahHeap* heap, bool* preselected_regions,
 228                                                       ShenandoahCollectionSet* collection_set,
 229                                                       size_t &consumed_by_advance_promotion) {
 230   size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
 231   size_t regions_available_to_loan = 0;
 232   size_t minimum_evacuation_reserve = ShenandoahOldCompactionReserve * region_size_bytes;
 233   size_t old_regions_loaned_for_young_evac = 0;
 234   consumed_by_advance_promotion = 0;
 235 
 236   ShenandoahGeneration* old_generation = heap->old_generation();
 237   ShenandoahYoungGeneration* young_generation = heap->young_generation();
 238   size_t old_evacuation_reserve = 0;
 239   size_t num_regions = heap->num_regions();
 240 
 241   // During initialization and phase changes, it is more likely that fewer objects die young and old-gen
 242   // memory is not yet full (or is in the process of being replaced).  During these times especially, it
 243   // is beneficial to loan memory from old-gen to young-gen during the evacuation and update-refs phases
 244   // of execution.
 245 
 246   // Calculate EvacuationReserve before PromotionReserve.  Evacuation is more critical than promotion.
 247   // If we cannot evacuate old-gen, we will not be able to reclaim old-gen memory.  Promotions are less
 248   // critical.  If we cannot promote, there may be degradation of young-gen memory because old objects
 249   // accumulate there until they can be promoted.  This increases the young-gen marking and evacuation work.
 250 
 251   // Do not fill up old-gen memory with promotions.  Reserve some amount of memory for compaction purposes.
 252   size_t young_evac_reserve_max = 0;
 253 
 254   // First priority is to reclaim the easy garbage out of young-gen.
 255 
 256   // maximum_young_evacuation_reserve is upper bound on memory to be evacuated out of young
 257   size_t maximum_young_evacuation_reserve = (young_generation->max_capacity() * ShenandoahEvacReserve) / 100;
 258   size_t young_evacuation_reserve = maximum_young_evacuation_reserve;
 259   size_t excess_young;
 260 
 261   size_t total_young_available = young_generation->available_with_reserve();
 262   if (total_young_available > young_evacuation_reserve) {
 263     excess_young = total_young_available - young_evacuation_reserve;
 264   } else {
 265     young_evacuation_reserve = total_young_available;
 266     excess_young = 0;
 267   }
 268   size_t unaffiliated_young = young_generation->free_unaffiliated_regions() * region_size_bytes;
 269   if (excess_young > unaffiliated_young) {
 270     excess_young = unaffiliated_young;
 271   } else {
 272     // round down to multiple of region size
 273     excess_young /= region_size_bytes;
 274     excess_young *= region_size_bytes;
 275   }
 276   // excess_young is available to be transferred to OLD.  Assume that OLD will not request any more than had
 277   // already been set aside for its promotion and evacuation needs at the end of previous GC.  No need to
 278   // hold back memory for allocation runway.
 279 
 280   // TODO: excess_young is unused.  Did we want to add it old_promo_reserve and/or old_evacuation_reserve?
 281 
 282   ShenandoahOldHeuristics* old_heuristics = heap->old_heuristics();
 283 
 284   // maximum_old_evacuation_reserve is an upper bound on memory evacuated from old and evacuated to old (promoted).
 285   size_t maximum_old_evacuation_reserve =
 286     maximum_young_evacuation_reserve * ShenandoahOldEvacRatioPercent / (100 - ShenandoahOldEvacRatioPercent);
 287   // Here's the algebra:
 288   //  TotalEvacuation = OldEvacuation + YoungEvacuation
 289   //  OldEvacuation = TotalEvacuation * (ShenandoahOldEvacRatioPercent/100)
 290   //  OldEvacuation = YoungEvacuation * (ShenandoahOldEvacRatioPercent/100)/(1 - ShenandoahOldEvacRatioPercent/100)
 291   //  OldEvacuation = YoungEvacuation * ShenandoahOldEvacRatioPercent/(100 - ShenandoahOldEvacRatioPercent)
 292 
 293   if (maximum_old_evacuation_reserve > old_generation->available()) {
 294     maximum_old_evacuation_reserve = old_generation->available();
 295   }
 296 
 297   // Second priority is to reclaim garbage out of old-gen if there are old-gen collection candidates.  Third priority
 298   // is to promote as much as we have room to promote.  However, if old-gen memory is in short supply, this means young
 299   // GC is operating under "duress" and was unable to transfer the memory that we would normally expect.  In this case,
 300   // old-gen will refrain from compacting itself in order to allow a quicker young-gen cycle (by avoiding the update-refs
 301   // through ALL of old-gen).  If there is some memory available in old-gen, we will use this for promotions as promotions
 302   // do not add to the update-refs burden of GC.
 303 
 304   size_t old_promo_reserve;
 305   if (is_global()) {
 306     // Global GC is typically triggered by user invocation of System.gc(), and typically indicates that there is lots
 307     // of garbage to be reclaimed because we are starting a new phase of execution.  Marking for global GC may take
 308     // significantly longer than typical young marking because we must mark through all old objects.  To expedite
 309     // evacuation and update-refs, we give emphasis to reclaiming garbage first, wherever that garbage is found.
 310     // Global GC will adjust generation sizes to accommodate the collection set it chooses.
 311 
 312     // Set old_promo_reserve to enforce that no regions are preselected for promotion.  Such regions typically
 313     // have relatively high memory utilization.  We still call select_aged_regions() because this will prepare for
 314     // promotions in place, if relevant.
 315     old_promo_reserve = 0;
 316 
 317     // Dedicate all available old memory to old_evacuation reserve.  This may be small, because old-gen is only
 318     // expanded based on an existing mixed evacuation workload at the end of the previous GC cycle.  We'll expand
 319     // the budget for evacuation of old during GLOBAL cset selection.
 320     old_evacuation_reserve = maximum_old_evacuation_reserve;
 321   } else if (old_heuristics->unprocessed_old_collection_candidates() > 0) {
 322     // We reserved all old-gen memory at end of previous GC to hold anticipated evacuations to old-gen.  If this is
 323     // mixed evacuation, reserve all of this memory for compaction of old-gen and do not promote.  Prioritize compaction
 324     // over promotion in order to defragment OLD so that it will be better prepared to efficiently receive promoted memory.
 325     old_evacuation_reserve = maximum_old_evacuation_reserve;
 326     old_promo_reserve = 0;
 327   } else {
 328     // Make all old-evacuation memory for promotion, but if we can't use it all for promotion, we'll allow some evacuation.
 329     old_evacuation_reserve = 0;
 330     old_promo_reserve = maximum_old_evacuation_reserve;
 331   }
 332 
 333   // We see too many old-evacuation failures if we force ourselves to evacuate into regions that are not initially empty.
 334   // So we limit the old-evacuation reserve to unfragmented memory.  Even so, old-evacuation is free to fill in nooks and
 335   // crannies within existing partially used regions and it generally tries to do so.
 336   size_t old_free_regions = old_generation->free_unaffiliated_regions();
 337   size_t old_free_unfragmented = old_free_regions * region_size_bytes;
 338   if (old_evacuation_reserve > old_free_unfragmented) {
 339     size_t delta = old_evacuation_reserve - old_free_unfragmented;
 340     old_evacuation_reserve -= delta;
 341 
 342     // Let promo consume fragments of old-gen memory if not global
 343     if (!is_global()) {
 344       old_promo_reserve += delta;
 345     }
 346   }
 347   collection_set->establish_preselected(preselected_regions);
 348   consumed_by_advance_promotion = select_aged_regions(old_promo_reserve, num_regions, preselected_regions);
 349   assert(consumed_by_advance_promotion <= maximum_old_evacuation_reserve, "Cannot promote more than available old-gen memory");
 350 
 351   // Note that unused old_promo_reserve might not be entirely consumed_by_advance_promotion.  Do not transfer this
 352   // to old_evacuation_reserve because this memory is likely very fragmented, and we do not want to increase the likelihood
 353   // of old evacuatino failure.
 354 
 355   heap->set_young_evac_reserve(young_evacuation_reserve);
 356   heap->set_old_evac_reserve(old_evacuation_reserve);
 357   heap->set_promoted_reserve(consumed_by_advance_promotion);
 358 
 359   // There is no need to expand OLD because all memory used here was set aside at end of previous GC, except in the
 360   // case of a GLOBAL gc.  During choose_collection_set() of GLOBAL, old will be expanded on demand.
 361 }
 362 
 363 // Having chosen the collection set, adjust the budgets for generational mode based on its composition.  Note
 364 // that young_generation->available() now knows about recently discovered immediate garbage.
 365 
 366 void ShenandoahGeneration::adjust_evacuation_budgets(ShenandoahHeap* heap, ShenandoahCollectionSet* collection_set,
 367                                                      size_t consumed_by_advance_promotion) {
 368   // We may find that old_evacuation_reserve and/or loaned_for_young_evacuation are not fully consumed, in which case we may
 369   //  be able to increase regions_available_to_loan
 370 
 371   // The role of adjust_evacuation_budgets() is to compute the correct value of regions_available_to_loan and to make
 372   // effective use of this memory, including the remnant memory within these regions that may result from rounding loan to
 373   // integral number of regions.  Excess memory that is available to be loaned is applied to an allocation supplement,
 374   // which allows mutators to allocate memory beyond the current capacity of young-gen on the promise that the loan
 375   // will be repaid as soon as we finish updating references for the recently evacuated collection set.
 376 
 377   // We cannot recalculate regions_available_to_loan by simply dividing old_generation->available() by region_size_bytes
 378   // because the available memory may be distributed between many partially occupied regions that are already holding old-gen
 379   // objects.  Memory in partially occupied regions is not "available" to be loaned.  Note that an increase in old-gen
 380   // available that results from a decrease in memory consumed by old evacuation is not necessarily available to be loaned
 381   // to young-gen.
 382 
 383   size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
 384   ShenandoahOldGeneration* old_generation = heap->old_generation();
 385   ShenandoahYoungGeneration* young_generation = heap->young_generation();
 386 
 387   // Preselected regions have been inserted into the collection set, so we no longer need the preselected array.
 388   collection_set->abandon_preselected();
 389 
 390   size_t old_evacuated = collection_set->get_old_bytes_reserved_for_evacuation();
 391   size_t old_evacuated_committed = (size_t) (ShenandoahOldEvacWaste * old_evacuated);
 392   size_t old_evacuation_reserve = heap->get_old_evac_reserve();
 393 
 394   if (old_evacuated_committed > old_evacuation_reserve) {
 395     // This should only happen due to round-off errors when enforcing ShenandoahOldEvacWaste
 396     assert(old_evacuated_committed <= (33 * old_evacuation_reserve) / 32,
 397            "Round-off errors should be less than 3.125%%, committed: " SIZE_FORMAT ", reserved: " SIZE_FORMAT,
 398            old_evacuated_committed, old_evacuation_reserve);
 399     old_evacuated_committed = old_evacuation_reserve;
 400     // Leave old_evac_reserve as previously configured
 401   } else if (old_evacuated_committed < old_evacuation_reserve) {
 402     // This happens if the old-gen collection consumes less than full budget.
 403     old_evacuation_reserve = old_evacuated_committed;
 404     heap->set_old_evac_reserve(old_evacuation_reserve);
 405   }
 406 
 407   size_t young_advance_promoted = collection_set->get_young_bytes_to_be_promoted();
 408   size_t young_advance_promoted_reserve_used = (size_t) (ShenandoahPromoEvacWaste * young_advance_promoted);
 409 
 410   size_t young_evacuated = collection_set->get_young_bytes_reserved_for_evacuation();
 411   size_t young_evacuated_reserve_used = (size_t) (ShenandoahEvacWaste * young_evacuated);
 412 
 413   size_t total_young_available = young_generation->available_with_reserve();
 414   assert(young_evacuated_reserve_used <= total_young_available, "Cannot evacuate more than is available in young");
 415   heap->set_young_evac_reserve(young_evacuated_reserve_used);
 416 
 417   size_t old_available = old_generation->available();
 418   // Now that we've established the collection set, we know how much memory is really required by old-gen for evacuation
 419   // and promotion reserves.  Try shrinking OLD now in case that gives us a bit more runway for mutator allocations during
 420   // evac and update phases.
 421   size_t old_consumed = old_evacuated_committed + young_advance_promoted_reserve_used;
 422 
 423   if (old_available < old_consumed) {
 424     // This can happen due to round-off errors when adding the results of truncated integer arithmetic.
 425     // We've already truncated old_evacuated_committed.  Truncate young_advance_promoted_reserve_used here.
 426     assert(young_advance_promoted_reserve_used <= (33 * (old_available - old_evacuated_committed)) / 32,
 427            "Round-off errors should be less than 3.125%%, committed: " SIZE_FORMAT ", reserved: " SIZE_FORMAT,
 428            young_advance_promoted_reserve_used, old_available - old_evacuated_committed);
 429     young_advance_promoted_reserve_used = old_available - old_evacuated_committed;
 430     old_consumed = old_evacuated_committed + young_advance_promoted_reserve_used;
 431   }
 432 
 433   assert(old_available >= old_consumed, "Cannot consume (" SIZE_FORMAT ") more than is available (" SIZE_FORMAT ")",
 434          old_consumed, old_available);
 435   size_t excess_old = old_available - old_consumed;
 436   size_t unaffiliated_old_regions = old_generation->free_unaffiliated_regions();
 437   size_t unaffiliated_old = unaffiliated_old_regions * region_size_bytes;
 438   assert(old_available >= unaffiliated_old, "Unaffiliated old is a subset of old available");
 439 
 440   // Make sure old_evac_committed is unaffiliated
 441   if (old_evacuated_committed > 0) {
 442     if (unaffiliated_old > old_evacuated_committed) {
 443       size_t giveaway = unaffiliated_old - old_evacuated_committed;
 444       size_t giveaway_regions = giveaway / region_size_bytes;  // round down
 445       if (giveaway_regions > 0) {
 446         excess_old = MIN2(excess_old, giveaway_regions * region_size_bytes);
 447       } else {
 448         excess_old = 0;
 449       }
 450     } else {
 451       excess_old = 0;
 452     }
 453   }
 454 
 455   // If we find that OLD has excess regions, give them back to YOUNG now to reduce likelihood we run out of allocation
 456   // runway during evacuation and update-refs.
 457   size_t regions_to_xfer = 0;
 458   if (excess_old > unaffiliated_old) {
 459     // we can give back unaffiliated_old (all of unaffiliated is excess)
 460     if (unaffiliated_old_regions > 0) {
 461       regions_to_xfer = unaffiliated_old_regions;
 462     }
 463   } else if (unaffiliated_old_regions > 0) {
 464     // excess_old < unaffiliated old: we can give back MIN(excess_old/region_size_bytes, unaffiliated_old_regions)
 465     size_t excess_regions = excess_old / region_size_bytes;
 466     size_t regions_to_xfer = MIN2(excess_regions, unaffiliated_old_regions);
 467   }
 468 
 469   if (regions_to_xfer > 0) {
 470     bool result = heap->generation_sizer()->transfer_to_young(regions_to_xfer);
 471     assert(excess_old > regions_to_xfer * region_size_bytes, "Cannot xfer more than excess old");
 472     excess_old -= regions_to_xfer * region_size_bytes;
 473     log_info(gc, ergo)("%s transferred " SIZE_FORMAT " excess regions to young before start of evacuation",
 474                        result? "Successfully": "Unsuccessfully", regions_to_xfer);
 475   }
 476 
 477   // Add in the excess_old memory to hold unanticipated promotions, if any.  If there are more unanticipated
 478   // promotions than fit in reserved memory, they will be deferred until a future GC pass.
 479   size_t total_promotion_reserve = young_advance_promoted_reserve_used + excess_old;
 480   heap->set_promoted_reserve(total_promotion_reserve);
 481   heap->reset_promoted_expended();
 482 }
 483 
 484 typedef struct {
 485   ShenandoahHeapRegion* _region;
 486   size_t _live_data;
 487 } AgedRegionData;
 488 
 489 static int compare_by_aged_live(AgedRegionData a, AgedRegionData b) {
 490   if (a._live_data < b._live_data)
 491     return -1;
 492   else if (a._live_data > b._live_data)
 493     return 1;
 494   else return 0;
 495 }
 496 
 497 inline void assert_no_in_place_promotions() {
 498 #ifdef ASSERT
 499   class ShenandoahNoInPlacePromotions : public ShenandoahHeapRegionClosure {
 500   public:
 501     void heap_region_do(ShenandoahHeapRegion *r) override {
 502       assert(r->get_top_before_promote() == nullptr,
 503              "Region " SIZE_FORMAT " should not be ready for in-place promotion", r->index());
 504     }
 505   } cl;
 506   ShenandoahHeap::heap()->heap_region_iterate(&cl);
 507 #endif
 508 }
 509 
 510 // Preselect for inclusion into the collection set regions whose age is at or above tenure age which contain more than
 511 // ShenandoahOldGarbageThreshold amounts of garbage.  We identify these regions by setting the appropriate entry of
 512 // candidate_regions_for_promotion_by_copy[] to true.  All entries are initialized to false before calling this
 513 // function.
 514 //
 515 // During the subsequent selection of the collection set, we give priority to these promotion set candidates.
 516 // Without this prioritization, we found that the aged regions tend to be ignored because they typically have
 517 // much less garbage and much more live data than the recently allocated "eden" regions.  When aged regions are
 518 // repeatedly excluded from the collection set, the amount of live memory within the young generation tends to
 519 // accumulate and this has the undesirable side effect of causing young-generation collections to require much more
 520 // CPU and wall-clock time.
 521 //
 522 // A second benefit of treating aged regions differently than other regions during collection set selection is
 523 // that this allows us to more accurately budget memory to hold the results of evacuation.  Memory for evacuation
 524 // of aged regions must be reserved in the old generations.  Memory for evacuation of all other regions must be
 525 // reserved in the young generation.
 526 size_t ShenandoahGeneration::select_aged_regions(size_t old_available, size_t num_regions,
 527                                                  bool candidate_regions_for_promotion_by_copy[]) {
 528 
 529   // There should be no regions configured for subsequent in-place-promotions carried over from the previous cycle.
 530   assert_no_in_place_promotions();
 531 
 532   ShenandoahHeap* heap = ShenandoahHeap::heap();
 533   assert(heap->mode()->is_generational(), "Only in generational mode");
 534   ShenandoahMarkingContext* const ctx = heap->marking_context();
 535 
 536   const uint tenuring_threshold = heap->age_census()->tenuring_threshold();
 537 
 538   size_t old_consumed = 0;
 539   size_t promo_potential = 0;
 540 
 541   heap->clear_promotion_potential();
 542   size_t candidates = 0;
 543   size_t candidates_live = 0;
 544   size_t old_garbage_threshold = (ShenandoahHeapRegion::region_size_bytes() * ShenandoahOldGarbageThreshold) / 100;
 545   size_t promote_in_place_regions = 0;
 546   size_t promote_in_place_live = 0;
 547   size_t promote_in_place_pad = 0;
 548   size_t anticipated_candidates = 0;
 549   size_t anticipated_promote_in_place_regions = 0;
 550 
 551   // Sort the promotion-eligible regions according to live-data-bytes so that we can first reclaim regions that require
 552   // less evacuation effort.  This prioritizes garbage first, expanding the allocation pool before we begin the work of
 553   // reclaiming regions that require more effort.
 554   AgedRegionData* sorted_regions = (AgedRegionData*) alloca(num_regions * sizeof(AgedRegionData));
 555   for (size_t i = 0; i < num_regions; i++) {
 556     ShenandoahHeapRegion* r = heap->get_region(i);
 557     if (r->is_empty() || !r->has_live() || !r->is_young() || !r->is_regular()) {
 558       continue;
 559     }
 560     if (r->age() >= tenuring_threshold) {
 561       if ((r->garbage() < old_garbage_threshold)) {
 562         HeapWord* tams = ctx->top_at_mark_start(r);
 563         HeapWord* original_top = r->top();
 564         if (tams == original_top) {
 565           // No allocations from this region have been made during concurrent mark. It meets all the criteria
 566           // for in-place-promotion. Though we only need the value of top when we fill the end of the region,
 567           // we use this field to indicate that this region should be promoted in place during the evacuation
 568           // phase.
 569           r->save_top_before_promote();
 570 
 571           size_t remnant_size = r->free() / HeapWordSize;
 572           if (remnant_size > ShenandoahHeap::min_fill_size()) {
 573             ShenandoahHeap::fill_with_object(original_top, remnant_size);
 574             // Fill the remnant memory within this region to assure no allocations prior to promote in place.  Otherwise,
 575             // newly allocated objects will not be parseable when promote in place tries to register them.  Furthermore, any
 576             // new allocations would not necessarily be eligible for promotion.  This addresses both issues.
 577             r->set_top(r->end());
 578             promote_in_place_pad += remnant_size * HeapWordSize;
 579           } else {
 580             // Since the remnant is so small that it cannot be filled, we don't have to worry about any accidental
 581             // allocations occurring within this region before the region is promoted in place.
 582           }
 583           promote_in_place_regions++;
 584           promote_in_place_live += r->get_live_data_bytes();
 585         }
 586         // Else, we do not promote this region (either in place or by copy) because it has received new allocations.
 587 
 588         // During evacuation, we exclude from promotion regions for which age > tenure threshold, garbage < garbage-threshold,
 589         //  and get_top_before_promote() != tams
 590       } else {
 591         // After sorting and selecting best candidates below, we may decide to exclude this promotion-eligible region
 592         // from the current collection sets.  If this happens, we will consider this region as part of the anticipated
 593         // promotion potential for the next GC pass.
 594         size_t live_data = r->get_live_data_bytes();
 595         candidates_live += live_data;
 596         sorted_regions[candidates]._region = r;
 597         sorted_regions[candidates++]._live_data = live_data;
 598       }
 599     } else {
 600       // We only anticipate to promote regular regions if garbage() is above threshold.  Tenure-aged regions with less
 601       // garbage are promoted in place.  These take a different path to old-gen.  Note that certain regions that are
 602       // excluded from anticipated promotion because their garbage content is too low (causing us to anticipate that
 603       // the region would be promoted in place) may be eligible for evacuation promotion by the time promotion takes
 604       // place during a subsequent GC pass because more garbage is found within the region between now and then.  This
 605       // should not happen if we are properly adapting the tenure age.  The theory behind adaptive tenuring threshold
 606       // is to choose the youngest age that demonstrates no "significant" futher loss of population since the previous
 607       // age.  If not this, we expect the tenure age to demonstrate linear population decay for at least two population
 608       // samples, whereas we expect to observe exponetial population decay for ages younger than the tenure age.
 609       //
 610       // In the case that certain regions which were anticipated to be promoted in place need to be promoted by
 611       // evacuation, it may be the case that there is not sufficient reserve within old-gen to hold evacuation of
 612       // these regions.  The likely outcome is that these regions will not be selected for evacuation or promotion
 613       // in the current cycle and we will anticipate that they will be promoted in the next cycle.  This will cause
 614       // us to reserve more old-gen memory so that these objects can be promoted in the subsequent cycle.
 615       //
 616       // TODO:
 617       //   If we are auto-tuning the tenure age and regions that were anticipated to be promoted in place end up
 618       //   being promoted by evacuation, this event should feed into the tenure-age-selection heuristic so that
 619       //   the tenure age can be increased.
 620       if (heap->is_aging_cycle() && (r->age() + 1 == tenuring_threshold)) {
 621         if (r->garbage() >= old_garbage_threshold) {
 622           anticipated_candidates++;
 623           promo_potential += r->get_live_data_bytes();
 624         }
 625         else {
 626           anticipated_promote_in_place_regions++;
 627         }
 628       }
 629     }
 630     // Note that we keep going even if one region is excluded from selection.
 631     // Subsequent regions may be selected if they have smaller live data.
 632   }
 633   // Sort in increasing order according to live data bytes.  Note that candidates represents the number of regions
 634   // that qualify to be promoted by evacuation.
 635   if (candidates > 0) {
 636     size_t selected_regions = 0;
 637     size_t selected_live = 0;
 638     QuickSort::sort<AgedRegionData>(sorted_regions, candidates, compare_by_aged_live, false);
 639     for (size_t i = 0; i < candidates; i++) {
 640       size_t region_live_data = sorted_regions[i]._live_data;
 641       size_t promotion_need = (size_t) (region_live_data * ShenandoahPromoEvacWaste);
 642       if (old_consumed + promotion_need <= old_available) {
 643         ShenandoahHeapRegion* region = sorted_regions[i]._region;
 644         old_consumed += promotion_need;
 645         candidate_regions_for_promotion_by_copy[region->index()] = true;
 646         selected_regions++;
 647         selected_live += region_live_data;
 648       } else {
 649         // We rejected this promotable region from the collection set because we had no room to hold its copy.
 650         // Add this region to promo potential for next GC.
 651         promo_potential += region_live_data;
 652       }
 653       // We keep going even if one region is excluded from selection because we need to accumulate all eligible
 654       // regions that are not preselected into promo_potential
 655     }
 656     log_info(gc)("Preselected " SIZE_FORMAT " regions containing " SIZE_FORMAT " live bytes,"
 657                  " consuming: " SIZE_FORMAT " of budgeted: " SIZE_FORMAT,
 658                  selected_regions, selected_live, old_consumed, old_available);
 659   }
 660   heap->set_pad_for_promote_in_place(promote_in_place_pad);
 661   heap->set_promotion_potential(promo_potential);
 662   return old_consumed;
 663 }
 664 
 665 void ShenandoahGeneration::prepare_regions_and_collection_set(bool concurrent) {
 666   ShenandoahHeap* heap = ShenandoahHeap::heap();
 667   ShenandoahCollectionSet* collection_set = heap->collection_set();
 668   bool is_generational = heap->mode()->is_generational();
 669 
 670   assert(!heap->is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
 671   assert(!is_old(), "Only YOUNG and GLOBAL GC perform evacuations");
 672   {
 673     ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::final_update_region_states :
 674                             ShenandoahPhaseTimings::degen_gc_final_update_region_states);
 675     ShenandoahFinalMarkUpdateRegionStateClosure cl(complete_marking_context());
 676     parallel_heap_region_iterate(&cl);
 677 
 678     if (is_young()) {
 679       // We always need to update the watermark for old regions. If there
 680       // are mixed collections pending, we also need to synchronize the
 681       // pinned status for old regions. Since we are already visiting every
 682       // old region here, go ahead and sync the pin status too.
 683       ShenandoahFinalMarkUpdateRegionStateClosure old_cl(nullptr);
 684       heap->old_generation()->parallel_heap_region_iterate(&old_cl);
 685     }
 686   }
 687 
 688   // Tally the census counts and compute the adaptive tenuring threshold
 689   if (is_generational && ShenandoahGenerationalAdaptiveTenuring && !ShenandoahGenerationalCensusAtEvac) {
 690     // Objects above TAMS weren't included in the age census. Since they were all
 691     // allocated in this cycle they belong in the age 0 cohort. We walk over all
 692     // young regions and sum the volume of objects between TAMS and top.
 693     ShenandoahUpdateCensusZeroCohortClosure age0_cl(complete_marking_context());
 694     heap->young_generation()->heap_region_iterate(&age0_cl);
 695     size_t age0_pop = age0_cl.get_population();
 696 
 697     // Age table updates
 698     ShenandoahAgeCensus* census = heap->age_census();
 699     census->prepare_for_census_update();
 700     // Update the global census, including the missed age 0 cohort above,
 701     // along with the census during marking, and compute the tenuring threshold
 702     census->update_census(age0_pop);
 703   }
 704 
 705   {
 706     ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::choose_cset :
 707                             ShenandoahPhaseTimings::degen_gc_choose_cset);
 708 
 709     collection_set->clear();
 710     ShenandoahHeapLocker locker(heap->lock());
 711     if (is_generational) {
 712       size_t consumed_by_advance_promotion;
 713       bool* preselected_regions = (bool*) alloca(heap->num_regions() * sizeof(bool));
 714       for (unsigned int i = 0; i < heap->num_regions(); i++) {
 715         preselected_regions[i] = false;
 716       }
 717 
 718       // TODO: young_available can include available (between top() and end()) within each young region that is not
 719       // part of the collection set.  Making this memory available to the young_evacuation_reserve allows a larger
 720       // young collection set to be chosen when available memory is under extreme pressure.  Implementing this "improvement"
 721       // is tricky, because the incremental construction of the collection set actually changes the amount of memory
 722       // available to hold evacuated young-gen objects.  As currently implemented, the memory that is available within
 723       // non-empty regions that are not selected as part of the collection set can be allocated by the mutator while
 724       // GC is evacuating and updating references.
 725 
 726       // Budgeting parameters to compute_evacuation_budgets are passed by reference.
 727       compute_evacuation_budgets(heap, preselected_regions, collection_set, consumed_by_advance_promotion);
 728       _heuristics->choose_collection_set(collection_set);
 729       if (!collection_set->is_empty()) {
 730         // only make use of evacuation budgets when we are evacuating
 731         adjust_evacuation_budgets(heap, collection_set, consumed_by_advance_promotion);
 732       }
 733 
 734       if (is_global()) {
 735         // We have just chosen a collection set for a global cycle. The mark bitmap covering old regions is complete, so
 736         // the remembered set scan can use that to avoid walking into garbage. When the next old mark begins, we will
 737         // use the mark bitmap to make the old regions parseable by coalescing and filling any unmarked objects. Thus,
 738         // we prepare for old collections by remembering which regions are old at this time. Note that any objects
 739         // promoted into old regions will be above TAMS, and so will be considered marked. However, free regions that
 740         // become old after this point will not be covered correctly by the mark bitmap, so we must be careful not to
 741         // coalesce those regions. Only the old regions which are not part of the collection set at this point are
 742         // eligible for coalescing. As implemented now, this has the side effect of possibly initiating mixed-evacuations
 743         // after a global cycle for old regions that were not included in this collection set.
 744         assert(heap->old_generation()->is_mark_complete(), "Expected old generation mark to be complete after global cycle.");
 745         heap->old_heuristics()->prepare_for_old_collections();
 746         log_info(gc)("After choosing global collection set, mixed candidates: " UINT32_FORMAT ", coalescing candidates: " SIZE_FORMAT,
 747                      heap->old_heuristics()->unprocessed_old_collection_candidates(),
 748                      heap->old_heuristics()->coalesce_and_fill_candidates_count());
 749       }
 750     } else {
 751       _heuristics->choose_collection_set(collection_set);
 752     }
 753   }
 754 
 755   // Freeset construction uses reserve quantities if they are valid
 756   heap->set_evacuation_reserve_quantities(true);
 757   {
 758     ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::final_rebuild_freeset :
 759                             ShenandoahPhaseTimings::degen_gc_final_rebuild_freeset);
 760     ShenandoahHeapLocker locker(heap->lock());
 761     size_t young_cset_regions, old_cset_regions;
 762 
 763     // We are preparing for evacuation.  At this time, we ignore cset region tallies.
 764     heap->free_set()->prepare_to_rebuild(young_cset_regions, old_cset_regions);
 765     heap->free_set()->rebuild(young_cset_regions, old_cset_regions);
 766   }
 767   heap->set_evacuation_reserve_quantities(false);
 768 }
 769 
 770 bool ShenandoahGeneration::is_bitmap_clear() {
 771   ShenandoahHeap* heap = ShenandoahHeap::heap();
 772   ShenandoahMarkingContext* context = heap->marking_context();
 773   size_t num_regions = heap->num_regions();
 774   for (size_t idx = 0; idx < num_regions; idx++) {
 775     ShenandoahHeapRegion* r = heap->get_region(idx);
 776     if (contains(r) && r->is_affiliated()) {
 777       if (heap->is_bitmap_slice_committed(r) && (context->top_at_mark_start(r) > r->bottom()) &&
 778           !context->is_bitmap_clear_range(r->bottom(), r->end())) {
 779         return false;
 780       }
 781     }
 782   }
 783   return true;
 784 }
 785 
 786 bool ShenandoahGeneration::is_mark_complete() {
 787   return _is_marking_complete.is_set();
 788 }
 789 
 790 void ShenandoahGeneration::set_mark_complete() {
 791   _is_marking_complete.set();
 792 }
 793 
 794 void ShenandoahGeneration::set_mark_incomplete() {
 795   _is_marking_complete.unset();
 796 }
 797 
 798 ShenandoahMarkingContext* ShenandoahGeneration::complete_marking_context() {
 799   assert(is_mark_complete(), "Marking must be completed.");
 800   return ShenandoahHeap::heap()->marking_context();
 801 }
 802 
 803 void ShenandoahGeneration::cancel_marking() {
 804   log_info(gc)("Cancel marking: %s", name());
 805   if (is_concurrent_mark_in_progress()) {
 806     set_mark_incomplete();
 807   }
 808   _task_queues->clear();
 809   ref_processor()->abandon_partial_discovery();
 810   set_concurrent_mark_in_progress(false);
 811 }
 812 
 813 ShenandoahGeneration::ShenandoahGeneration(ShenandoahGenerationType type,
 814                                            uint max_workers,
 815                                            size_t max_capacity,
 816                                            size_t soft_max_capacity) :
 817   _type(type),
 818   _task_queues(new ShenandoahObjToScanQueueSet(max_workers)),
 819   _ref_processor(new ShenandoahReferenceProcessor(MAX2(max_workers, 1U))),
 820   _affiliated_region_count(0), _humongous_waste(0), _used(0), _bytes_allocated_since_gc_start(0),
 821   _max_capacity(max_capacity), _soft_max_capacity(soft_max_capacity),
 822   _heuristics(nullptr) {
 823   _is_marking_complete.set();
 824   assert(max_workers > 0, "At least one queue");
 825   for (uint i = 0; i < max_workers; ++i) {
 826     ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue();
 827     _task_queues->register_queue(i, task_queue);
 828   }
 829 }
 830 
 831 ShenandoahGeneration::~ShenandoahGeneration() {
 832   for (uint i = 0; i < _task_queues->size(); ++i) {
 833     ShenandoahObjToScanQueue* q = _task_queues->queue(i);
 834     delete q;
 835   }
 836   delete _task_queues;
 837 }
 838 
 839 void ShenandoahGeneration::reserve_task_queues(uint workers) {
 840   _task_queues->reserve(workers);
 841 }
 842 
 843 ShenandoahObjToScanQueueSet* ShenandoahGeneration::old_gen_task_queues() const {
 844   return nullptr;
 845 }
 846 
 847 void ShenandoahGeneration::scan_remembered_set(bool is_concurrent) {
 848   assert(is_young(), "Should only scan remembered set for young generation.");
 849 
 850   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 851   uint nworkers = heap->workers()->active_workers();
 852   reserve_task_queues(nworkers);
 853 
 854   ShenandoahReferenceProcessor* rp = ref_processor();
 855   ShenandoahRegionChunkIterator work_list(nworkers);
 856   ShenandoahScanRememberedTask task(task_queues(), old_gen_task_queues(), rp, &work_list, is_concurrent);
 857   heap->assert_gc_workers(nworkers);
 858   heap->workers()->run_task(&task);
 859   if (ShenandoahEnableCardStats) {
 860     assert(heap->card_scan() != nullptr, "Not generational");
 861     heap->card_scan()->log_card_stats(nworkers, CARD_STAT_SCAN_RS);
 862   }
 863 }
 864 
 865 size_t ShenandoahGeneration::increment_affiliated_region_count() {
 866   shenandoah_assert_heaplocked_or_fullgc_safepoint();
 867   // During full gc, multiple GC worker threads may change region affiliations without a lock.  No lock is enforced
 868   // on read and write of _affiliated_region_count.  At the end of full gc, a single thread overwrites the count with
 869   // a coherent value.
 870   _affiliated_region_count++;
 871   return _affiliated_region_count;
 872 }
 873 
 874 size_t ShenandoahGeneration::decrement_affiliated_region_count() {
 875   shenandoah_assert_heaplocked_or_fullgc_safepoint();
 876   // During full gc, multiple GC worker threads may change region affiliations without a lock.  No lock is enforced
 877   // on read and write of _affiliated_region_count.  At the end of full gc, a single thread overwrites the count with
 878   // a coherent value.
 879   _affiliated_region_count--;
 880   // TODO: REMOVE IS_GLOBAL() QUALIFIER AFTER WE FIX GLOBAL AFFILIATED REGION ACCOUNTING
 881   assert(is_global() || ShenandoahHeap::heap()->is_full_gc_in_progress() ||
 882          (_used + _humongous_waste <= _affiliated_region_count * ShenandoahHeapRegion::region_size_bytes()),
 883          "used + humongous cannot exceed regions");
 884   return _affiliated_region_count;
 885 }
 886 
 887 size_t ShenandoahGeneration::increase_affiliated_region_count(size_t delta) {
 888   shenandoah_assert_heaplocked_or_fullgc_safepoint();
 889   _affiliated_region_count += delta;
 890   return _affiliated_region_count;
 891 }
 892 
 893 size_t ShenandoahGeneration::decrease_affiliated_region_count(size_t delta) {
 894   shenandoah_assert_heaplocked_or_fullgc_safepoint();
 895   assert(_affiliated_region_count >= delta, "Affiliated region count cannot be negative");
 896 
 897   _affiliated_region_count -= delta;
 898   // TODO: REMOVE IS_GLOBAL() QUALIFIER AFTER WE FIX GLOBAL AFFILIATED REGION ACCOUNTING
 899   assert(is_global() || ShenandoahHeap::heap()->is_full_gc_in_progress() ||
 900          (_used + _humongous_waste <= _affiliated_region_count * ShenandoahHeapRegion::region_size_bytes()),
 901          "used + humongous cannot exceed regions");
 902   return _affiliated_region_count;
 903 }
 904 
 905 void ShenandoahGeneration::establish_usage(size_t num_regions, size_t num_bytes, size_t humongous_waste) {
 906   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at a safepoint");
 907   _affiliated_region_count = num_regions;
 908   _used = num_bytes;
 909   _humongous_waste = humongous_waste;
 910 }
 911 
 912 void ShenandoahGeneration::increase_used(size_t bytes) {
 913   Atomic::add(&_used, bytes);
 914 }
 915 
 916 void ShenandoahGeneration::increase_humongous_waste(size_t bytes) {
 917   if (bytes > 0) {
 918     Atomic::add(&_humongous_waste, bytes);
 919   }
 920 }
 921 
 922 void ShenandoahGeneration::decrease_humongous_waste(size_t bytes) {
 923   if (bytes > 0) {
 924     assert(ShenandoahHeap::heap()->is_full_gc_in_progress() || (_humongous_waste >= bytes),
 925            "Waste (" SIZE_FORMAT ") cannot be negative (after subtracting " SIZE_FORMAT ")", _humongous_waste, bytes);
 926     Atomic::sub(&_humongous_waste, bytes);
 927   }
 928 }
 929 
 930 void ShenandoahGeneration::decrease_used(size_t bytes) {
 931   // TODO: REMOVE IS_GLOBAL() QUALIFIER AFTER WE FIX GLOBAL AFFILIATED REGION ACCOUNTING
 932   assert(is_global() || ShenandoahHeap::heap()->is_full_gc_in_progress() ||
 933          (_used >= bytes), "cannot reduce bytes used by generation below zero");
 934   Atomic::sub(&_used, bytes);
 935 }
 936 
 937 size_t ShenandoahGeneration::used_regions() const {
 938   return _affiliated_region_count;
 939 }
 940 
 941 size_t ShenandoahGeneration::free_unaffiliated_regions() const {
 942   size_t result = max_capacity() / ShenandoahHeapRegion::region_size_bytes();
 943   if (_affiliated_region_count > result) {
 944     result = 0;
 945   } else {
 946     result -= _affiliated_region_count;
 947   }
 948   return result;
 949 }
 950 
 951 size_t ShenandoahGeneration::used_regions_size() const {
 952   return _affiliated_region_count * ShenandoahHeapRegion::region_size_bytes();
 953 }
 954 
 955 size_t ShenandoahGeneration::available() const {
 956   return available(max_capacity());
 957 }
 958 
 959 // For ShenandoahYoungGeneration, Include the young available that may have been reserved for the Collector.
 960 size_t ShenandoahGeneration::available_with_reserve() const {
 961   return available(max_capacity());
 962 }
 963 
 964 size_t ShenandoahGeneration::soft_available() const {
 965   return available(soft_max_capacity());
 966 }
 967 
 968 size_t ShenandoahGeneration::available(size_t capacity) const {
 969   size_t in_use = used() + get_humongous_waste();
 970   return in_use > capacity ? 0 : capacity - in_use;
 971 }
 972 
 973 void ShenandoahGeneration::increase_capacity(size_t increment) {
 974   shenandoah_assert_heaplocked_or_safepoint();
 975 
 976   // We do not enforce that new capacity >= heap->max_size_for(this).  The maximum generation size is treated as a rule of thumb
 977   // which may be violated during certain transitions, such as when we are forcing transfers for the purpose of promoting regions
 978   // in place.
 979   // TODO: REMOVE IS_GLOBAL() QUALIFIER AFTER WE FIX GLOBAL AFFILIATED REGION ACCOUNTING
 980   assert(is_global() || ShenandoahHeap::heap()->is_full_gc_in_progress() ||
 981          (_max_capacity + increment <= ShenandoahHeap::heap()->max_capacity()), "Generation cannot be larger than heap size");
 982   assert(increment % ShenandoahHeapRegion::region_size_bytes() == 0, "Generation capacity must be multiple of region size");
 983   _max_capacity += increment;
 984 
 985   // This detects arithmetic wraparound on _used
 986   // TODO: REMOVE IS_GLOBAL() QUALIFIER AFTER WE FIX GLOBAL AFFILIATED REGION ACCOUNTING
 987   assert(is_global() || ShenandoahHeap::heap()->is_full_gc_in_progress() ||
 988          (_affiliated_region_count * ShenandoahHeapRegion::region_size_bytes() >= _used),
 989          "Affiliated regions must hold more than what is currently used");
 990 }
 991 
 992 void ShenandoahGeneration::decrease_capacity(size_t decrement) {
 993   shenandoah_assert_heaplocked_or_safepoint();
 994 
 995   // We do not enforce that new capacity >= heap->min_size_for(this).  The minimum generation size is treated as a rule of thumb
 996   // which may be violated during certain transitions, such as when we are forcing transfers for the purpose of promoting regions
 997   // in place.
 998   assert(decrement % ShenandoahHeapRegion::region_size_bytes() == 0, "Generation capacity must be multiple of region size");
 999   assert(_max_capacity >= decrement, "Generation capacity cannot be negative");
1000 
1001   _max_capacity -= decrement;
1002 
1003   // This detects arithmetic wraparound on _used
1004   // TODO: REMOVE IS_GLOBAL() QUALIFIER AFTER WE FIX GLOBAL AFFILIATED REGION ACCOUNTING
1005   assert(is_global() || ShenandoahHeap::heap()->is_full_gc_in_progress() ||
1006          (_affiliated_region_count * ShenandoahHeapRegion::region_size_bytes() >= _used),
1007          "Affiliated regions must hold more than what is currently used");
1008   // TODO: REMOVE IS_GLOBAL() QUALIFIER AFTER WE FIX GLOBAL AFFILIATED REGION ACCOUNTING
1009   assert(is_global() || ShenandoahHeap::heap()->is_full_gc_in_progress() ||
1010          (_used <= _max_capacity), "Cannot use more than capacity");
1011   // TODO: REMOVE IS_GLOBAL() QUALIFIER AFTER WE FIX GLOBAL AFFILIATED REGION ACCOUNTING
1012   assert(is_global() || ShenandoahHeap::heap()->is_full_gc_in_progress() ||
1013          (_affiliated_region_count * ShenandoahHeapRegion::region_size_bytes() <= _max_capacity),
1014          "Cannot use more than capacity");
1015 }
1016 
1017 void ShenandoahGeneration::record_success_concurrent(bool abbreviated) {
1018   heuristics()->record_success_concurrent(abbreviated);
1019   ShenandoahHeap::heap()->shenandoah_policy()->record_success_concurrent();
1020 }
1021 
1022 void ShenandoahGeneration::record_success_degenerated() {
1023   heuristics()->record_success_degenerated();
1024   ShenandoahHeap::heap()->shenandoah_policy()->record_success_degenerated();
1025 }