1 /*
   2  * Copyright (c) 2020, 2021 Amazon.com, Inc. and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  27 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  28 #include "gc/shenandoah/shenandoahGeneration.hpp"
  29 #include "gc/shenandoah/shenandoahHeap.hpp"
  30 #include "gc/shenandoah/shenandoahMarkClosures.hpp"
  31 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  32 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
  33 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
  34 #include "gc/shenandoah/shenandoahTaskqueue.inline.hpp"
  35 #include "gc/shenandoah/shenandoahUtils.hpp"
  36 #include "gc/shenandoah/shenandoahVerifier.hpp"
  37 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
  38 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
  39 
  40 class ShenandoahResetUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
  41  private:
  42   ShenandoahMarkingContext* const _ctx;
  43  public:
  44   ShenandoahResetUpdateRegionStateClosure() :
  45     _ctx(ShenandoahHeap::heap()->marking_context()) {}
  46 
  47   void heap_region_do(ShenandoahHeapRegion* r) {
  48     if (r->is_active()) {
  49       // Reset live data and set TAMS optimistically. We would recheck these under the pause
  50       // anyway to capture any updates that happened since now.
  51       _ctx->capture_top_at_mark_start(r);
  52       r->clear_live_data();
  53     }
  54   }
  55 
  56   bool is_thread_safe() { return true; }
  57 };
  58 
  59 class ShenandoahResetBitmapTask : public ShenandoahHeapRegionClosure {
  60  private:
  61   ShenandoahHeap* _heap;
  62   ShenandoahMarkingContext* const _ctx;
  63  public:
  64   ShenandoahResetBitmapTask() :
  65     _heap(ShenandoahHeap::heap()),
  66     _ctx(_heap->marking_context()) {}
  67 
  68   void heap_region_do(ShenandoahHeapRegion* region) {
  69     if (_heap->is_bitmap_slice_committed(region)) {
  70       _ctx->clear_bitmap(region);
  71     }
  72   }
  73 
  74   bool is_thread_safe() { return true; }
  75 };
  76 
  77 class ShenandoahMergeWriteTable: public ShenandoahHeapRegionClosure {
  78  private:
  79   ShenandoahHeap* _heap;
  80   RememberedScanner* _scanner;
  81  public:
  82   ShenandoahMergeWriteTable() : _heap(ShenandoahHeap::heap()), _scanner(_heap->card_scan()) {}
  83 
  84   virtual void heap_region_do(ShenandoahHeapRegion* r) override {
  85     if (r->is_old()) {
  86       _scanner->merge_write_table(r->bottom(), ShenandoahHeapRegion::region_size_words());
  87     }
  88   }
  89 
  90   virtual bool is_thread_safe() override {
  91     return true;
  92   }
  93 };
  94 
  95 class ShenandoahSquirrelAwayCardTable: public ShenandoahHeapRegionClosure {
  96  private:
  97   ShenandoahHeap* _heap;
  98   RememberedScanner* _scanner;
  99  public:
 100   ShenandoahSquirrelAwayCardTable() :
 101     _heap(ShenandoahHeap::heap()),
 102     _scanner(_heap->card_scan()) {}
 103 
 104   void heap_region_do(ShenandoahHeapRegion* region) {
 105     if (region->is_old()) {
 106       _scanner->reset_remset(region->bottom(), ShenandoahHeapRegion::region_size_words());
 107     }
 108   }
 109 
 110   bool is_thread_safe() { return true; }
 111 };
 112 
 113 void ShenandoahGeneration::confirm_heuristics_mode() {
 114   if (_heuristics->is_diagnostic() && !UnlockDiagnosticVMOptions) {
 115     vm_exit_during_initialization(
 116             err_msg("Heuristics \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
 117                     _heuristics->name()));
 118   }
 119   if (_heuristics->is_experimental() && !UnlockExperimentalVMOptions) {
 120     vm_exit_during_initialization(
 121             err_msg("Heuristics \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
 122                     _heuristics->name()));
 123   }
 124 }
 125 
 126 ShenandoahHeuristics* ShenandoahGeneration::initialize_heuristics(ShenandoahMode* gc_mode) {
 127   _heuristics = gc_mode->initialize_heuristics(this);
 128   _heuristics->set_guaranteed_gc_interval(ShenandoahGuaranteedGCInterval);
 129   confirm_heuristics_mode();
 130   return _heuristics;
 131 }
 132 
 133 size_t ShenandoahGeneration::bytes_allocated_since_gc_start() {
 134   return Atomic::load(&_bytes_allocated_since_gc_start);;
 135 }
 136 
 137 void ShenandoahGeneration::reset_bytes_allocated_since_gc_start() {
 138   Atomic::store(&_bytes_allocated_since_gc_start, (size_t)0);
 139 }
 140 
 141 void ShenandoahGeneration::increase_allocated(size_t bytes) {
 142   Atomic::add(&_bytes_allocated_since_gc_start, bytes, memory_order_relaxed);
 143 }
 144 
 145 void ShenandoahGeneration::log_status(const char *msg) const {
 146   typedef LogTarget(Info, gc, ergo) LogGcInfo;
 147 
 148   if (!LogGcInfo::is_enabled()) {
 149     return;
 150   }
 151 
 152   // Not under a lock here, so read each of these once to make sure
 153   // byte size in proper unit and proper unit for byte size are consistent.
 154   size_t v_used = used();
 155   size_t v_used_regions = used_regions_size();
 156   size_t v_soft_max_capacity = soft_max_capacity();
 157   size_t v_max_capacity = max_capacity();
 158   size_t v_available = available();
 159   size_t v_adjusted_avail = adjusted_available();
 160   LogGcInfo::print("%s: %s generation used: " SIZE_FORMAT "%s, used regions: " SIZE_FORMAT "%s, "
 161                    "soft capacity: " SIZE_FORMAT "%s, max capacity: " SIZE_FORMAT "%s, available: " SIZE_FORMAT "%s, "
 162                    "adjusted available: " SIZE_FORMAT "%s",
 163                    msg, name(),
 164                    byte_size_in_proper_unit(v_used), proper_unit_for_byte_size(v_used),
 165                    byte_size_in_proper_unit(v_used_regions), proper_unit_for_byte_size(v_used_regions),
 166                    byte_size_in_proper_unit(v_soft_max_capacity), proper_unit_for_byte_size(v_soft_max_capacity),
 167                    byte_size_in_proper_unit(v_max_capacity), proper_unit_for_byte_size(v_max_capacity),
 168                    byte_size_in_proper_unit(v_available), proper_unit_for_byte_size(v_available),
 169                    byte_size_in_proper_unit(v_adjusted_avail), proper_unit_for_byte_size(v_adjusted_avail));
 170 }
 171 
 172 void ShenandoahGeneration::reset_mark_bitmap() {
 173   ShenandoahHeap* heap = ShenandoahHeap::heap();
 174   heap->assert_gc_workers(heap->workers()->active_workers());
 175 
 176   set_mark_incomplete();
 177 
 178   ShenandoahResetBitmapTask task;
 179   parallel_heap_region_iterate(&task);
 180 }
 181 
 182 // The ideal is to swap the remembered set so the safepoint effort is no more than a few pointer manipulations.
 183 // However, limitations in the implementation of the mutator write-barrier make it difficult to simply change the
 184 // location of the card table.  So the interim implementation of swap_remembered_set will copy the write-table
 185 // onto the read-table and will then clear the write-table.
 186 void ShenandoahGeneration::swap_remembered_set() {
 187   // Must be sure that marking is complete before we swap remembered set.
 188   ShenandoahHeap* heap = ShenandoahHeap::heap();
 189   heap->assert_gc_workers(heap->workers()->active_workers());
 190   shenandoah_assert_safepoint();
 191 
 192   // TODO: Eventually, we want replace this with a constant-time exchange of pointers.
 193   ShenandoahSquirrelAwayCardTable task;
 194   heap->old_generation()->parallel_heap_region_iterate(&task);
 195 }
 196 
 197 // If a concurrent cycle fails _after_ the card table has been swapped we need to update the read card
 198 // table with any writes that have occurred during the transition to the degenerated cycle. Without this,
 199 // newly created objects which are only referenced by old objects could be lost when the remembered set
 200 // is scanned during the degenerated mark.
 201 void ShenandoahGeneration::merge_write_table() {
 202   // This should only happen for degenerated cycles
 203   ShenandoahHeap* heap = ShenandoahHeap::heap();
 204   heap->assert_gc_workers(heap->workers()->active_workers());
 205   shenandoah_assert_safepoint();
 206 
 207   ShenandoahMergeWriteTable task;
 208   heap->old_generation()->parallel_heap_region_iterate(&task);
 209 }
 210 
 211 void ShenandoahGeneration::prepare_gc() {
 212   // Reset mark bitmap for this generation (typically young)
 213   reset_mark_bitmap();
 214   // Capture Top At Mark Start for this generation (typically young)
 215   ShenandoahResetUpdateRegionStateClosure cl;
 216   parallel_heap_region_iterate(&cl);
 217 }
 218 
 219 void ShenandoahGeneration::compute_evacuation_budgets(ShenandoahHeap* heap, bool* preselected_regions,
 220                                                       ShenandoahCollectionSet* collection_set,
 221                                                       size_t &consumed_by_advance_promotion) {
 222   assert(heap->mode()->is_generational(), "Only generational mode uses evacuation budgets.");
 223   size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
 224   size_t regions_available_to_loan = 0;
 225   size_t minimum_evacuation_reserve = ShenandoahOldCompactionReserve * region_size_bytes;
 226   size_t old_regions_loaned_for_young_evac = 0;
 227   consumed_by_advance_promotion = 0;
 228 
 229   ShenandoahGeneration* old_generation = heap->old_generation();
 230   ShenandoahYoungGeneration* young_generation = heap->young_generation();
 231   size_t old_evacuation_reserve = 0;
 232   size_t num_regions = heap->num_regions();
 233 
 234   // During initialization and phase changes, it is more likely that fewer objects die young and old-gen
 235   // memory is not yet full (or is in the process of being replaced).  During these times especially, it
 236   // is beneficial to loan memory from old-gen to young-gen during the evacuation and update-refs phases
 237   // of execution.
 238 
 239   // Calculate EvacuationReserve before PromotionReserve.  Evacuation is more critical than promotion.
 240   // If we cannot evacuate old-gen, we will not be able to reclaim old-gen memory.  Promotions are less
 241   // critical.  If we cannot promote, there may be degradation of young-gen memory because old objects
 242   // accumulate there until they can be promoted.  This increases the young-gen marking and evacuation work.
 243 
 244   // Do not fill up old-gen memory with promotions.  Reserve some amount of memory for compaction purposes.
 245   ShenandoahOldHeuristics* old_heuristics = heap->old_heuristics();
 246   size_t young_evac_reserve_max = 0;
 247   if (old_heuristics->unprocessed_old_collection_candidates() > 0) {
 248     // Compute old_evacuation_reserve: how much memory are we reserving to hold the results of
 249     // evacuating old-gen heap regions?  In order to sustain a consistent pace of young-gen collections,
 250     // the goal is to maintain a consistent value for this parameter (when the candidate set is not
 251     // empty).  This value is the minimum of:
 252     //   1. old_gen->available()
 253     //   2. old-gen->capacity() * ShenandoahOldEvacReserve) / 100
 254     //       (e.g. old evacuation should be no larger than 5% of old_gen capacity)
 255     //   3. ((young_gen->capacity * ShenandoahEvacReserve / 100) * ShenandoahOldEvacRatioPercent) / 100
 256     //       (e.g. old evacuation should be no larger than 12% of young-gen evacuation)
 257     old_evacuation_reserve = old_generation->available();
 258     assert(old_evacuation_reserve > minimum_evacuation_reserve, "Old-gen available has not been preserved!");
 259     size_t old_evac_reserve_max = old_generation->soft_max_capacity() * ShenandoahOldEvacReserve / 100;
 260     if (old_evac_reserve_max < old_evacuation_reserve) {
 261       old_evacuation_reserve = old_evac_reserve_max;
 262     }
 263     young_evac_reserve_max =
 264       (((young_generation->soft_max_capacity() * ShenandoahEvacReserve) / 100) * ShenandoahOldEvacRatioPercent) / 100;
 265     if (young_evac_reserve_max < old_evacuation_reserve) {
 266       old_evacuation_reserve = young_evac_reserve_max;
 267     }
 268   }
 269 
 270   if (minimum_evacuation_reserve > old_generation->available()) {
 271     // Due to round-off errors during enforcement of minimum_evacuation_reserve during previous GC passes,
 272     // there can be slight discrepancies here.
 273     minimum_evacuation_reserve = old_generation->available();
 274   }
 275 
 276   heap->set_old_evac_reserve(old_evacuation_reserve);
 277   heap->reset_old_evac_expended();
 278 
 279   // Compute the young evacuation reserve: This is how much memory is available for evacuating young-gen objects.
 280   // We ignore the possible effect of promotions, which reduce demand for young-gen evacuation memory.
 281   //
 282   // TODO: We could give special treatment to the regions that have reached promotion age, because we know their
 283   // live data is entirely eligible for promotion.  This knowledge can feed both into calculations of young-gen
 284   // evacuation reserve and promotion reserve.
 285   //
 286   //  young_evacuation_reserve for young generation: how much memory are we reserving to hold the results
 287   //  of evacuating young collection set regions?  This is typically smaller than the total amount
 288   //  of available memory, and is also smaller than the total amount of marked live memory within
 289   //  young-gen.  This value is the smaller of
 290   //
 291   //    1. (young_gen->capacity() * ShenandoahEvacReserve) / 100
 292   //    2. (young_gen->available() + old_gen_memory_available_to_be_loaned
 293   //
 294   //  ShenandoahEvacReserve represents the configured target size of the evacuation region.  We can only honor
 295   //  this target if there is memory available to hold the evacuations.  Memory is available if it is already
 296   //  free within young gen, or if it can be borrowed from old gen.  Since we have not yet chosen the collection
 297   //  sets, we do not yet know the exact accounting of how many regions will be freed by this collection pass.
 298   //  What we do know is that there will be at least one evacuated young-gen region for each old-gen region that
 299   //  is loaned to the evacuation effort (because regions to be collected consume more memory than the compacted
 300   //  regions that will replace them).  In summary, if there are old-gen regions that are available to hold the
 301   //  results of young-gen evacuations, it is safe to loan them for this purpose.  At this point, we have not yet
 302   //  established a promoted_reserve.  We'll do that after we choose the collection set and analyze its impact
 303   //  on available memory.
 304   //
 305   // We do not know the evacuation_supplement until after we have computed the collection set.  It is not always
 306   // the case that young-regions inserted into the collection set will result in net decrease of in-use regions
 307   // because ShenandoahEvacWaste times multiplied by memory within the region may be larger than the region size.
 308   // The problem is especially relevant to regions that have been inserted into the collection set because they have
 309   // reached tenure age.  These regions tend to have much higher utilization (e.g. 95%).  These regions also offer
 310   // a unique opportunity because we know that every live object contained within the region is elgible to be
 311   // promoted.  Thus, the following implementation treats these regions specially:
 312   //
 313   //  1. Before beginning collection set selection, we tally the total amount of live memory held within regions
 314   //     that are known to have reached tenure age.  If this memory times ShenandoahEvacWaste is available within
 315   //     old-gen memory, establish an advance promotion reserve to hold all or some percentage of these objects.
 316   //     This advance promotion reserve is excluded from memory available for holding old-gen evacuations and cannot
 317   //     be "loaned" to young gen.
 318   //
 319   //  2. Tenure-aged regions are included in the collection set iff their evacuation size * ShenandoahEvacWaste fits
 320   //     within the advance promotion reserve.  It is counter productive to evacuate these regions if they cannot be
 321   //     evacuated directly into old-gen memory.  So if there is not sufficient memory to hold copies of their
 322   //     live data right now, we'll just let these regions remain in young for now, to be evacuated by a subsequent
 323   //     evacuation pass.
 324   //
 325   //  3. Next, we calculate a young-gen evacuation budget, which is the smaller of the two quantities mentioned
 326   //     above.  old_gen_memory_available_to_be_loaned is calculated as:
 327   //       old_gen->available - (advance-promotion-reserve + old-gen_evacuation_reserve)
 328   //
 329   //  4. When choosing the collection set, special care is taken to assure that the amount of loaned memory required to
 330   //     hold the results of evacuation is smaller than the total memory occupied by the regions added to the collection
 331   //     set.  We need to take these precautions because we do not know how much memory will be reclaimed by evacuation
 332   //     until after the collection set has been constructed.  The algorithm is as follows:
 333   //
 334   //     a. We feed into the algorithm (i) young available at the start of evacuation and (ii) the amount of memory
 335   //        loaned from old-gen that is available to hold the results of evacuation.
 336   //     b. As candidate regions are added into the young-gen collection set, we maintain accumulations of the amount
 337   //        of memory spanned by the collection set regions and the amount of memory that must be reserved to hold
 338   //        evacuation results (by multiplying live-data size by ShenandoahEvacWaste).  We process candidate regions
 339   //        in order of decreasing amounts of garbage.  We skip over (and do not include into the collection set) any
 340   //        regions that do not satisfy all of the following conditions:
 341   //
 342   //          i. The amount of live data within the region as scaled by ShenandoahEvacWaste must fit within the
 343   //             relevant evacuation reserve (live data of old-gen regions must fit within the old-evac-reserve, live
 344   //             data of young-gen tenure-aged regions must fit within the advance promotion reserve, live data within
 345   //             other young-gen regions must fit within the youn-gen evacuation reserve).
 346   //         ii. The accumulation of memory consumed by evacuation must not exceed the accumulation of memory reclaimed
 347   //             through evacuation by more than young-gen available.
 348   //        iii. Other conditions may be enforced as appropriate for specific heuristics.
 349   //
 350   //       Note that regions are considered for inclusion in the selection set in order of decreasing amounts of garbage.
 351   //       It is possible that a region with a larger amount of garbage will be rejected because it also has a larger
 352   //       amount of live data and some region that follows this region in candidate order is included in the collection
 353   //       set (because it has less live data and thus can fit within the evacuation limits even though it has less
 354   //       garbage).
 355 
 356   size_t young_evacuation_reserve = (young_generation->max_capacity() * ShenandoahEvacReserve) / 100;
 357   // old evacuation can pack into existing partially used regions.  young evacuation and loans for young allocations
 358   // need to target regions that do not already hold any old-gen objects.  Round down.
 359   regions_available_to_loan = old_generation->free_unaffiliated_regions();
 360 
 361   size_t required_evacuation_reserve;
 362   // Memory evacuated from old-gen on this pass will be available to hold old-gen evacuations in next pass.
 363   if (old_evacuation_reserve > minimum_evacuation_reserve) {
 364     required_evacuation_reserve = 0;
 365   } else {
 366     required_evacuation_reserve = minimum_evacuation_reserve - old_evacuation_reserve;
 367   }
 368 
 369   consumed_by_advance_promotion = _heuristics->select_aged_regions(
 370     old_generation->available() - old_evacuation_reserve - required_evacuation_reserve, num_regions, preselected_regions);
 371   size_t net_available_old_regions =
 372     (old_generation->available() - old_evacuation_reserve - consumed_by_advance_promotion) / region_size_bytes;
 373 
 374  if (regions_available_to_loan > net_available_old_regions) {
 375     regions_available_to_loan = net_available_old_regions;
 376   }
 377 
 378   // Otherwise, regions_available_to_loan is less than net_available_old_regions because available memory is
 379   // scattered between multiple partially used regions.
 380 
 381   if (young_evacuation_reserve > young_generation->available()) {
 382     size_t short_fall = young_evacuation_reserve - young_generation->available();
 383     if (regions_available_to_loan * region_size_bytes >= short_fall) {
 384       old_regions_loaned_for_young_evac = (short_fall + region_size_bytes - 1) / region_size_bytes;
 385       regions_available_to_loan -= old_regions_loaned_for_young_evac;
 386     } else {
 387       old_regions_loaned_for_young_evac = regions_available_to_loan;
 388       regions_available_to_loan = 0;
 389       young_evacuation_reserve = young_generation->available() + old_regions_loaned_for_young_evac * region_size_bytes;
 390       // In this case, there's no memory available for new allocations while evacuating and updating, unless we
 391       // find more old-gen memory to borrow below.
 392     }
 393   }
 394   // In generational mode, we may end up choosing a young collection set that contains so many promotable objects
 395   // that there is not sufficient space in old generation to hold the promoted objects.  That is ok because we have
 396   // assured there is sufficient space in young generation to hold the rejected promotion candidates.  These rejected
 397   // promotion candidates will presumably be promoted in a future evacuation cycle.
 398   heap->set_young_evac_reserve(young_evacuation_reserve);
 399   collection_set->establish_preselected(preselected_regions);
 400 }
 401 
 402 // Having chosen the collection set, adjust the budgets for generatioal mode based on its composition.  Note
 403 // that young_generation->available() now knows about recently discovered immediate garbage.
 404 
 405 void ShenandoahGeneration::adjust_evacuation_budgets(ShenandoahHeap* heap, ShenandoahCollectionSet* collection_set,
 406                                                      size_t consumed_by_advance_promotion) {
 407   // We may find that old_evacuation_reserve and/or loaned_for_young_evacuation are not fully consumed, in which case we may
 408   //  be able to increase regions_available_to_loan
 409 
 410   // The role of adjust_evacuation_budgets() is to compute the correct value of regions_available_to_loan and to make
 411   // effective use of this memory, including the remnant memory within these regions that may result from rounding loan to
 412   // integral number of regions.  Excess memory that is available to be loaned is applied to an allocation supplement,
 413   // which allows mutators to allocate memory beyond the current capacity of young-gen on the promise that the loan
 414   // will be repaid as soon as we finish updating references for the recently evacuated collection set.
 415 
 416   // We cannot recalculate regions_available_to_loan by simply dividing old_generation->available() by region_size_bytes
 417   // because the available memory may be distributed between many partially occupied regions that are already holding old-gen
 418   // objects.  Memory in partially occupied regions is not "available" to be loaned.  Note that an increase in old-gen
 419   // available that results from a decrease in memory consumed by old evacuation is not necessarily available to be loaned
 420   // to young-gen.
 421 
 422   assert(heap->mode()->is_generational(), "Only generational mode uses evacuation budgets.");
 423   size_t old_regions_loaned_for_young_evac, regions_available_to_loan;
 424   size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
 425   ShenandoahOldGeneration* old_generation = heap->old_generation();
 426   ShenandoahYoungGeneration* young_generation = heap->young_generation();
 427   size_t old_evacuated = collection_set->get_old_bytes_reserved_for_evacuation();
 428   size_t old_evacuated_committed = (size_t) (ShenandoahEvacWaste * old_evacuated);
 429   size_t old_evacuation_reserve = heap->get_old_evac_reserve();
 430   // Immediate garbage found during choose_collection_set() is all young
 431   size_t immediate_garbage = collection_set->get_immediate_trash();
 432   size_t old_available = old_generation->available();
 433   size_t young_available = young_generation->available() + immediate_garbage;
 434   size_t loaned_regions = 0;
 435   size_t available_loan_remnant = 0; // loaned memory that is not yet dedicated to any particular budget
 436 
 437   assert(((consumed_by_advance_promotion * 33) / 32) >= collection_set->get_young_bytes_to_be_promoted() * ShenandoahEvacWaste,
 438          "Advance promotion (" SIZE_FORMAT ") should be at least young_bytes_to_be_promoted (" SIZE_FORMAT
 439          ")* ShenandoahEvacWaste, totalling: " SIZE_FORMAT ", within round-off errors of up to 3.125%%",
 440          consumed_by_advance_promotion, collection_set->get_young_bytes_to_be_promoted(),
 441          (size_t) (collection_set->get_young_bytes_to_be_promoted() * ShenandoahEvacWaste));
 442 
 443   assert(consumed_by_advance_promotion <= (collection_set->get_young_bytes_to_be_promoted() * ShenandoahEvacWaste * 33) / 32,
 444          "Round-off errors should be less than 3.125%%, consumed by advance: " SIZE_FORMAT ", promoted: " SIZE_FORMAT,
 445          consumed_by_advance_promotion, (size_t) (collection_set->get_young_bytes_to_be_promoted() * ShenandoahEvacWaste));
 446 
 447   collection_set->abandon_preselected();
 448 
 449   if (old_evacuated_committed > old_evacuation_reserve) {
 450     // This should only happen due to round-off errors when enforcing ShenandoahEvacWaste
 451     assert(old_evacuated_committed <= (33 * old_evacuation_reserve) / 32,
 452            "Round-off errors should be less than 3.125%%, committed: " SIZE_FORMAT ", reserved: " SIZE_FORMAT,
 453            old_evacuated_committed, old_evacuation_reserve);
 454     old_evacuated_committed = old_evacuation_reserve;
 455   } else if (old_evacuated_committed < old_evacuation_reserve) {
 456     // This may happen if the old-gen collection consumes less than full budget.
 457     old_evacuation_reserve = old_evacuated_committed;
 458     heap->set_old_evac_reserve(old_evacuation_reserve);
 459   }
 460 
 461   // Recompute old_regions_loaned_for_young_evac because young-gen collection set may not need all the memory
 462   // originally reserved.
 463   size_t young_promoted = collection_set->get_young_bytes_to_be_promoted();
 464   size_t young_promoted_reserve_used = (size_t) (ShenandoahEvacWaste * young_promoted);
 465 
 466   size_t young_evacuated = collection_set->get_young_bytes_reserved_for_evacuation();
 467   size_t young_evacuated_reserve_used = (size_t) (ShenandoahEvacWaste * young_evacuated);
 468 
 469   // We'll invoke heap->set_young_evac_reserve() further below, after we make additional adjustments to its value
 470 
 471   // Adjust old_regions_loaned_for_young_evac to feed into calculations of promoted_reserve
 472   if (young_evacuated_reserve_used > young_available) {
 473     size_t short_fall = young_evacuated_reserve_used - young_available;
 474 
 475     // region_size_bytes is a power of 2.  loan an integral number of regions.
 476     size_t revised_loan_for_young_evacuation = (short_fall + region_size_bytes - 1) / region_size_bytes;
 477 
 478     // available_loan_remnant represents memory loaned from old-gen but not required for young evacuation.
 479     // This is the excess loaned memory that results from rounding the required loan up to an integral number
 480     // of heap regions.  This will be dedicated to alloc_supplement below.
 481     available_loan_remnant = (revised_loan_for_young_evacuation * region_size_bytes) - short_fall;
 482 
 483     // We previously loaned more than was required by young-gen evacuation.  So claw some of this memory back.
 484     old_regions_loaned_for_young_evac = revised_loan_for_young_evacuation;
 485     loaned_regions = old_regions_loaned_for_young_evac;
 486   } else {
 487     // Undo the prevous loan, if any.
 488     old_regions_loaned_for_young_evac = 0;
 489     loaned_regions = 0;
 490   }
 491 
 492   size_t old_bytes_loaned_for_young_evac = old_regions_loaned_for_young_evac * region_size_bytes - available_loan_remnant;
 493 
 494   // Recompute regions_available_to_loan based on possible changes to old_regions_loaned_for_young_evac and
 495   // old_evacuation_reserve.
 496 
 497   // Any decrease in old_regions_loaned_for_young_evac are immediately available to be loaned
 498   // However, a change to old_evacuation_reserve() is not necessarily available to loan, because this memory may
 499   // reside within many fragments scattered throughout old-gen.
 500 
 501   regions_available_to_loan = old_generation->free_unaffiliated_regions();
 502   size_t working_old_available = old_generation->available();
 503 
 504   assert(regions_available_to_loan * region_size_bytes <= working_old_available,
 505          "Regions available to loan  must be less than available memory");
 506 
 507   // fragmented_old_total is the amount of memory in old-gen beyond regions_available_to_loan that is otherwise not
 508   // yet dedicated to a particular budget.  This memory can be used for promotion_reserve.
 509   size_t fragmented_old_total = working_old_available - regions_available_to_loan * region_size_bytes;
 510 
 511   // fragmented_old_usage is the memory that is dedicated to holding evacuated old-gen objects, which does not need
 512   // to be an integral number of regions.
 513   size_t fragmented_old_usage = old_evacuated_committed + consumed_by_advance_promotion;
 514 
 515 
 516 
 517   if (fragmented_old_total >= fragmented_old_usage) {
 518     // Seems this will be rare.  In this case, all of the memory required for old-gen evacuations and promotions can be
 519     // taken from the existing fragments within old-gen.  Reduce this fragmented total by this amount.
 520     fragmented_old_total -= fragmented_old_usage;
 521     // And reduce regions_available_to_loan by the regions dedicated to young_evac.
 522     regions_available_to_loan -= old_regions_loaned_for_young_evac;
 523   } else {
 524     // In this case, we need to dedicate some of the regions_available_to_loan to hold the results of old-gen evacuations
 525     // and promotions.
 526 
 527     size_t unaffiliated_memory_required_for_old = fragmented_old_usage - fragmented_old_total;
 528     size_t unaffiliated_regions_used_by_old = (unaffiliated_memory_required_for_old + region_size_bytes - 1) / region_size_bytes;
 529     regions_available_to_loan -= (unaffiliated_regions_used_by_old + old_regions_loaned_for_young_evac);
 530 
 531     size_t memory_for_promotions_and_old_evac = fragmented_old_total + unaffiliated_regions_used_by_old;
 532     size_t memory_required_for_promotions_and_old_evac = fragmented_old_usage;
 533     size_t excess_fragmented = memory_for_promotions_and_old_evac - memory_required_for_promotions_and_old_evac;
 534     fragmented_old_total = excess_fragmented;
 535   }
 536 
 537   // Subtract from working_old_available old_evacuated_committed and consumed_by_advance_promotion
 538   working_old_available -= fragmented_old_usage;
 539   // And also subtract out the regions loaned for young evacuation
 540   working_old_available -= old_regions_loaned_for_young_evac * region_size_bytes;
 541 
 542   // Assure that old_evacuated_committed + old_bytes_loaned_for_young_evac >= the minimum evacuation reserve
 543   // in order to prevent promotion reserve from violating minimum evacuation reserve.
 544   size_t old_regions_reserved_for_alloc_supplement = 0;
 545   size_t old_bytes_reserved_for_alloc_supplement = 0;
 546   size_t reserved_bytes_for_future_old_evac = 0;
 547 
 548   old_bytes_reserved_for_alloc_supplement = available_loan_remnant;
 549   available_loan_remnant = 0;
 550 
 551   // Memory that has been loaned for young evacuations and old-gen regions in the current mixed-evacuation collection
 552   // set will be available to hold future old-gen evacuations.  If this memory is less than the desired amount of memory
 553   // set aside for old-gen compaction reserve, try to set aside additional memory so that it will be available during
 554   // the next mixed evacuation cycle.  Note that memory loaned to young-gen for allocation supplement is excluded from
 555   // the old-gen promotion reserve.
 556   size_t future_evac_reserve_regions = old_regions_loaned_for_young_evac + collection_set->get_old_region_count();
 557   size_t collected_regions = collection_set->get_young_region_count();
 558 
 559   if (future_evac_reserve_regions < ShenandoahOldCompactionReserve) {
 560     // Require that we loan more memory for holding young evacuations to assure that we have adequate reserves to receive
 561     // old-gen evacuations during subsequent collections.  Loaning this memory for an allocation supplement does not
 562     // satisfy our needs because newly allocated objects are not necessarily counter-balanced by reclaimed collection
 563     // set regions.
 564 
 565     // Put this memory into reserve by identifying it as old_regions_loaned_for_young_evac
 566     size_t additional_regions_to_loan = ShenandoahOldCompactionReserve - future_evac_reserve_regions;
 567 
 568     // We can loan additional regions to be repaid from the anticipated recycling of young collection set regions
 569     // provided that these regions are currently available within old-gen memory.
 570     size_t collected_regions_to_loan;
 571     if (collected_regions >= additional_regions_to_loan) {
 572       collected_regions_to_loan = additional_regions_to_loan;
 573       additional_regions_to_loan = 0;
 574     } else if (collected_regions > 0) {
 575       collected_regions_to_loan = collected_regions;
 576       additional_regions_to_loan -= collected_regions_to_loan;
 577     } else {
 578       collected_regions_to_loan = 0;
 579     }
 580 
 581     if (collected_regions_to_loan > 0) {
 582       // We're evacuating at least this many regions, it's ok to use these regions for allocation supplement since
 583       // we'll be able to repay the loan at end of this GC pass, assuming the regions are available.
 584       if (collected_regions_to_loan > regions_available_to_loan) {
 585         collected_regions_to_loan = regions_available_to_loan;
 586       }
 587       old_bytes_reserved_for_alloc_supplement += collected_regions_to_loan * region_size_bytes;
 588       regions_available_to_loan -= collected_regions_to_loan;
 589       loaned_regions += collected_regions_to_loan;
 590       working_old_available -= collected_regions_to_loan * region_size_bytes;
 591     }
 592 
 593     // If there's still memory that we want to exclude from the current promotion reserve, but we are unable to loan
 594     // this memory because fully empty old-gen regions are not available, decrement the working_old_available to make
 595     // sure that this memory is not used to hold the results of old-gen evacuation.
 596     if (additional_regions_to_loan > regions_available_to_loan) {
 597       size_t unloaned_regions = additional_regions_to_loan - regions_available_to_loan;
 598       size_t unloaned_bytes = unloaned_regions * region_size_bytes;
 599 
 600       if (working_old_available < unloaned_bytes) {
 601         // We're in dire straits.  We won't be able to reserve all the memory that we want to make available for the
 602         // next old-gen evacuation.  We'll reserve as much of it as possible.  Setting working_old_available to zero
 603         // means there will be no promotion except for the advance promotion.  Note that if some advance promotion fails,
 604         // the object will be evacuated to young-gen so we should still end up reclaiming the entire advance promotion
 605         // collection set.
 606         reserved_bytes_for_future_old_evac = working_old_available;
 607         working_old_available = 0;
 608       } else {
 609         reserved_bytes_for_future_old_evac = unloaned_bytes;
 610         working_old_available -= unloaned_bytes;
 611       }
 612       size_t regions_reserved_for_future_old_evac =
 613         (reserved_bytes_for_future_old_evac + region_size_bytes - 1) / region_size_bytes;
 614 
 615       if (regions_reserved_for_future_old_evac < regions_available_to_loan) {
 616         regions_available_to_loan -= regions_reserved_for_future_old_evac;
 617       } else {
 618         regions_available_to_loan = 0;
 619       }
 620 
 621       // Since we're in dire straits, zero out fragmented_old_total so this won't be used for promotion;
 622       if (working_old_available > fragmented_old_total) {
 623         working_old_available -= fragmented_old_total;
 624       } else {
 625         working_old_available = 0;
 626       }
 627       fragmented_old_total = 0;
 628     }
 629   }
 630 
 631   // Establish young_evac_reserve so that this young-gen memory is not used for new allocations, allowing the memory
 632   // to be returned to old-gen as soon as the current collection set regions are reclaimed.
 633   heap->set_young_evac_reserve(young_evacuated_reserve_used);
 634 
 635   // Limit promoted_reserve so that we can set aside memory to be loaned from old-gen to young-gen.  This
 636   // value is not "critical".  If we underestimate, certain promotions will simply be deferred.  If we put
 637   // "all the rest" of old-gen memory into the promotion reserve, we'll have nothing left to loan to young-gen
 638   // during the evac and update phases of GC.  So we "limit" the sizes of the promotion budget to be the smaller of:
 639   //
 640   //  1. old_available
 641   //     (old_available is old_gen->available() -
 642   //      (old_evacuated_committed + consumed_by_advance_promotion + loaned_for_young_evac + reserved_for_alloc_supplement))
 643   //  2. young bytes reserved for evacuation (we can't promote more than young is evacuating)
 644   size_t promotion_reserve = working_old_available;
 645 
 646   // We experimented with constraining promoted_reserve to be no larger than 4 times the size of previously_promoted,
 647   // but this constraint was too limiting, resulting in failure of legitimate promotions.  This was tried before we
 648   // had special handling in place for advance promotion.  We should retry now that advance promotion is handled
 649   // specially.
 650 
 651   // We had also experimented with constraining promoted_reserve to be no more than young_evacuation_committed
 652   // divided by promotion_divisor, where:
 653   //  size_t promotion_divisor = (0x02 << InitialTenuringThreshold) - 1;
 654   // This also was found to be too limiting, resulting in failure of legitimate promotions.
 655   //
 656   // Both experiments were conducted in the presence of other bugs which could have been the root cause for
 657   // the failures identified above as being "too limiting".  TODO: conduct new experiments with the more limiting
 658   // values of young_evacuation_reserved_used.
 659 
 660   // young_evacuation_reserve_used already excludes bytes known to be promoted, which equals consumed_by_advance_promotion
 661   if (young_evacuated_reserve_used < promotion_reserve) {
 662     // Shrink promotion_reserve if it is larger than the memory to be consumed by evacuating all young objects in
 663     // collection set, including anticipated waste.  There's no benefit in using a larger promotion_reserve.
 664     // young_evacuation_reserve_used does not include live memory within tenure-aged regions.
 665     promotion_reserve = young_evacuated_reserve_used;
 666   }
 667   assert(working_old_available >= promotion_reserve, "Cannot reserve for promotion more than is available");
 668   working_old_available -= promotion_reserve;
 669   // Having reserved this memory for promotion, the regions are no longer available to be loaned.
 670   size_t regions_consumed_by_promotion_reserve = (promotion_reserve + region_size_bytes - 1) / region_size_bytes;
 671   if (regions_consumed_by_promotion_reserve > regions_available_to_loan) {
 672     // This can happen if the promotion reserve makes use of memory that is fragmented between many partially available
 673     // old-gen regions.
 674     regions_available_to_loan = 0;
 675   } else {
 676     regions_available_to_loan -= regions_consumed_by_promotion_reserve;
 677   }
 678 
 679   log_debug(gc)("old_gen->available(): " SIZE_FORMAT " divided between promotion reserve: " SIZE_FORMAT
 680                 ", old evacuation reserve: " SIZE_FORMAT ", advance promotion reserve supplement: " SIZE_FORMAT
 681                 ", old loaned for young evacuation: " SIZE_FORMAT ", old reserved for alloc supplement: " SIZE_FORMAT,
 682                 old_generation->available(), promotion_reserve, old_evacuated_committed, consumed_by_advance_promotion,
 683                 old_regions_loaned_for_young_evac * region_size_bytes, old_bytes_reserved_for_alloc_supplement);
 684 
 685   promotion_reserve += consumed_by_advance_promotion;
 686   heap->set_promoted_reserve(promotion_reserve);
 687 
 688   heap->reset_promoted_expended();
 689   if (collection_set->get_old_bytes_reserved_for_evacuation() == 0) {
 690     // Setting old evacuation reserve to zero denotes that there is no old-gen evacuation in this pass.
 691     heap->set_old_evac_reserve(0);
 692   }
 693 
 694   size_t old_gen_usage_base = old_generation->used() - collection_set->get_old_garbage();
 695   heap->capture_old_usage(old_gen_usage_base);
 696 
 697   // Compute additional evacuation supplement, which is extra memory borrowed from old-gen that can be allocated
 698   // by mutators while GC is working on evacuation and update-refs.  This memory can be temporarily borrowed
 699   // from old-gen allotment, then repaid at the end of update-refs from the recycled collection set.  After
 700   // we have computed the collection set based on the parameters established above, we can make additional
 701   // loans based on our knowledge of the collection set to determine how much allocation we can allow
 702   // during the evacuation and update-refs phases of execution.  The total available supplement is the result
 703   // of adding old_bytes_reserved_for_alloc_supplement to the smaller of:
 704   //
 705   //   1. regions_available_to_loan * region_size_bytes
 706   //   2. The replenishment budget (number of regions in collection set - the number of regions already
 707   //         under lien for the young_evacuation_reserve)
 708   //
 709 
 710   // Regardless of how many regions may be available to be loaned, we can loan no more regions than
 711   // the total number of young regions to be evacuated.  Call this the regions_for_runway.
 712 
 713   if (regions_available_to_loan > 0 && (collected_regions > loaned_regions)) {
 714     assert(regions_available_to_loan * region_size_bytes <= working_old_available,
 715            "regions_available_to_loan should not exceed working_old_available");
 716 
 717     size_t additional_regions_to_loan = collected_regions - loaned_regions;
 718     if (additional_regions_to_loan > regions_available_to_loan) {
 719       additional_regions_to_loan = regions_available_to_loan;
 720     }
 721     loaned_regions += additional_regions_to_loan;
 722     old_bytes_reserved_for_alloc_supplement += additional_regions_to_loan * region_size_bytes;
 723     working_old_available -= additional_regions_to_loan * region_size_bytes;
 724   }
 725   size_t allocation_supplement = old_bytes_reserved_for_alloc_supplement;
 726   heap->set_alloc_supplement_reserve(allocation_supplement);
 727 
 728   // TODO: young_available, which feeds into alloc_budget_evac_and_update is lacking memory available within
 729   // existing young-gen regions that were not selected for the collection set.  Add this in and adjust the
 730   // log message (where it says "empty-region allocation budget").
 731 
 732   log_debug(gc)("Memory reserved for young evacuation: " SIZE_FORMAT "%s for evacuating " SIZE_FORMAT
 733                 "%s out of young available: " SIZE_FORMAT "%s",
 734                 byte_size_in_proper_unit(young_evacuated_reserve_used),
 735                 proper_unit_for_byte_size(young_evacuated_reserve_used),
 736                 byte_size_in_proper_unit(young_evacuated), proper_unit_for_byte_size(young_evacuated),
 737                 byte_size_in_proper_unit(young_available), proper_unit_for_byte_size(young_available));
 738 
 739   log_debug(gc)("Memory reserved for old evacuation: " SIZE_FORMAT "%s for evacuating " SIZE_FORMAT
 740                 "%s out of old available: " SIZE_FORMAT "%s",
 741                 byte_size_in_proper_unit(old_evacuated), proper_unit_for_byte_size(old_evacuated),
 742                 byte_size_in_proper_unit(old_evacuated), proper_unit_for_byte_size(old_evacuated),
 743                 byte_size_in_proper_unit(old_available), proper_unit_for_byte_size(old_available));
 744 
 745   size_t regular_promotion = promotion_reserve - consumed_by_advance_promotion;
 746   size_t excess =
 747     old_available - (old_evacuation_reserve + promotion_reserve + old_bytes_loaned_for_young_evac + allocation_supplement);
 748   log_info(gc, ergo)("Old available: " SIZE_FORMAT "%s is partitioned into old evacuation budget: " SIZE_FORMAT
 749                      "%s, aged region promotion budget: " SIZE_FORMAT
 750                      "%s, regular region promotion budget: " SIZE_FORMAT
 751                      "%s, loaned for young evacuation: " SIZE_FORMAT
 752                      "%s, loaned for young allocations: " SIZE_FORMAT
 753                      "%s, excess: " SIZE_FORMAT "%s",
 754                      byte_size_in_proper_unit(old_available), proper_unit_for_byte_size(old_available),
 755                      byte_size_in_proper_unit(old_evacuation_reserve), proper_unit_for_byte_size(old_evacuation_reserve),
 756                      byte_size_in_proper_unit(consumed_by_advance_promotion),
 757                      proper_unit_for_byte_size(consumed_by_advance_promotion),
 758                      byte_size_in_proper_unit(regular_promotion), proper_unit_for_byte_size(regular_promotion),
 759                      byte_size_in_proper_unit(old_bytes_loaned_for_young_evac),
 760                      proper_unit_for_byte_size(old_bytes_loaned_for_young_evac),
 761                      byte_size_in_proper_unit(allocation_supplement), proper_unit_for_byte_size(allocation_supplement),
 762                      byte_size_in_proper_unit(excess), proper_unit_for_byte_size(excess));
 763 }
 764 
 765 void ShenandoahGeneration::prepare_regions_and_collection_set(bool concurrent) {
 766   ShenandoahHeap* heap = ShenandoahHeap::heap();
 767   ShenandoahCollectionSet* collection_set = heap->collection_set();
 768 
 769   assert(!heap->is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
 770   assert(generation_mode() != OLD, "Only YOUNG and GLOBAL GC perform evacuations");
 771   {
 772     ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::final_update_region_states :
 773                             ShenandoahPhaseTimings::degen_gc_final_update_region_states);
 774     ShenandoahFinalMarkUpdateRegionStateClosure cl(complete_marking_context());
 775     parallel_heap_region_iterate(&cl);
 776 
 777     if (generation_mode() == YOUNG) {
 778       // We always need to update the watermark for old regions. If there
 779       // are mixed collections pending, we also need to synchronize the
 780       // pinned status for old regions. Since we are already visiting every
 781       // old region here, go ahead and sync the pin status too.
 782       ShenandoahFinalMarkUpdateRegionStateClosure old_cl(nullptr);
 783       heap->old_generation()->parallel_heap_region_iterate(&old_cl);
 784     }
 785   }
 786 
 787   {
 788     ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::choose_cset :
 789                             ShenandoahPhaseTimings::degen_gc_choose_cset);
 790 
 791     collection_set->clear();
 792     ShenandoahHeapLocker locker(heap->lock());
 793     if (heap->mode()->is_generational()) {
 794       size_t consumed_by_advance_promotion;
 795       bool* preselected_regions = (bool*) alloca(heap->num_regions() * sizeof(bool));
 796       for (unsigned int i = 0; i < heap->num_regions(); i++) {
 797         preselected_regions[i] = false;
 798       }
 799 
 800       // TODO: young_available can include available (between top() and end()) within each young region that is not
 801       // part of the collection set.  Making this memory available to the young_evacuation_reserve allows a larger
 802       // young collection set to be chosen when available memory is under extreme pressure.  Implementing this "improvement"
 803       // is tricky, because the incremental construction of the collection set actually changes the amount of memory
 804       // available to hold evacuated young-gen objects.  As currently implemented, the memory that is available within
 805       // non-empty regions that are not selected as part of the collection set can be allocated by the mutator while
 806       // GC is evacuating and updating references.
 807 
 808       // Budgeting parameters to compute_evacuation_budgets are passed by reference.
 809       compute_evacuation_budgets(heap, preselected_regions, collection_set, consumed_by_advance_promotion);
 810 
 811       _heuristics->choose_collection_set(collection_set, heap->old_heuristics());
 812       if (!collection_set->is_empty()) {
 813         // only make use of evacuation budgets when we are evacuating
 814         adjust_evacuation_budgets(heap, collection_set, consumed_by_advance_promotion);
 815       }
 816     } else {
 817       _heuristics->choose_collection_set(collection_set, heap->old_heuristics());
 818     }
 819   }
 820 
 821   {
 822     ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::final_rebuild_freeset :
 823                             ShenandoahPhaseTimings::degen_gc_final_rebuild_freeset);
 824     ShenandoahHeapLocker locker(heap->lock());
 825     heap->free_set()->rebuild();
 826   }
 827 }
 828 
 829 bool ShenandoahGeneration::is_bitmap_clear() {
 830   ShenandoahHeap* heap = ShenandoahHeap::heap();
 831   ShenandoahMarkingContext* context = heap->marking_context();
 832   size_t num_regions = heap->num_regions();
 833   for (size_t idx = 0; idx < num_regions; idx++) {
 834     ShenandoahHeapRegion* r = heap->get_region(idx);
 835     if (contains(r) && (r->affiliation() != FREE)) {
 836       if (heap->is_bitmap_slice_committed(r) && (context->top_at_mark_start(r) > r->bottom()) &&
 837           !context->is_bitmap_clear_range(r->bottom(), r->end())) {
 838         return false;
 839       }
 840     }
 841   }
 842   return true;
 843 }
 844 
 845 bool ShenandoahGeneration::is_mark_complete() {
 846   return _is_marking_complete.is_set();
 847 }
 848 
 849 void ShenandoahGeneration::set_mark_complete() {
 850   _is_marking_complete.set();
 851 }
 852 
 853 void ShenandoahGeneration::set_mark_incomplete() {
 854   _is_marking_complete.unset();
 855 }
 856 
 857 ShenandoahMarkingContext* ShenandoahGeneration::complete_marking_context() {
 858   assert(is_mark_complete(), "Marking must be completed.");
 859   return ShenandoahHeap::heap()->marking_context();
 860 }
 861 
 862 void ShenandoahGeneration::cancel_marking() {
 863   log_info(gc)("Cancel marking: %s", name());
 864   if (is_concurrent_mark_in_progress()) {
 865     set_mark_incomplete();
 866   }
 867   _task_queues->clear();
 868   ref_processor()->abandon_partial_discovery();
 869   set_concurrent_mark_in_progress(false);
 870 }
 871 
 872 ShenandoahGeneration::ShenandoahGeneration(GenerationMode generation_mode,
 873                                            uint max_workers,
 874                                            size_t max_capacity,
 875                                            size_t soft_max_capacity) :
 876   _generation_mode(generation_mode),
 877   _task_queues(new ShenandoahObjToScanQueueSet(max_workers)),
 878   _ref_processor(new ShenandoahReferenceProcessor(MAX2(max_workers, 1U))),
 879   _collection_thread_time_s(0.0),
 880   _affiliated_region_count(0), _used(0), _bytes_allocated_since_gc_start(0),
 881   _max_capacity(max_capacity), _soft_max_capacity(soft_max_capacity),
 882   _adjusted_capacity(soft_max_capacity), _heuristics(nullptr) {
 883   _is_marking_complete.set();
 884   assert(max_workers > 0, "At least one queue");
 885   for (uint i = 0; i < max_workers; ++i) {
 886     ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue();
 887     _task_queues->register_queue(i, task_queue);
 888   }
 889 }
 890 
 891 ShenandoahGeneration::~ShenandoahGeneration() {
 892   for (uint i = 0; i < _task_queues->size(); ++i) {
 893     ShenandoahObjToScanQueue* q = _task_queues->queue(i);
 894     delete q;
 895   }
 896   delete _task_queues;
 897 }
 898 
 899 void ShenandoahGeneration::reserve_task_queues(uint workers) {
 900   _task_queues->reserve(workers);
 901 }
 902 
 903 ShenandoahObjToScanQueueSet* ShenandoahGeneration::old_gen_task_queues() const {
 904   return nullptr;
 905 }
 906 
 907 void ShenandoahGeneration::scan_remembered_set(bool is_concurrent) {
 908   assert(generation_mode() == YOUNG, "Should only scan remembered set for young generation.");
 909 
 910   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 911   uint nworkers = heap->workers()->active_workers();
 912   reserve_task_queues(nworkers);
 913 
 914   ShenandoahReferenceProcessor* rp = ref_processor();
 915   ShenandoahRegionChunkIterator work_list(nworkers);
 916   ShenandoahScanRememberedTask task(task_queues(), old_gen_task_queues(), rp, &work_list, is_concurrent);
 917   heap->assert_gc_workers(nworkers);
 918   heap->workers()->run_task(&task);
 919   if (ShenandoahEnableCardStats) {
 920     assert(heap->card_scan() != NULL, "Not generational");
 921     heap->card_scan()->log_card_stats(nworkers, CARD_STAT_SCAN_RS);
 922   }
 923 }
 924 
 925 size_t ShenandoahGeneration::increment_affiliated_region_count() {
 926   _affiliated_region_count++;
 927   return _affiliated_region_count;
 928 }
 929 
 930 size_t ShenandoahGeneration::decrement_affiliated_region_count() {
 931   _affiliated_region_count--;
 932   return _affiliated_region_count;
 933 }
 934 
 935 void ShenandoahGeneration::clear_used() {
 936   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at a safepoint");
 937   // Do this atomically to assure visibility to other threads, even though these other threads may be idle "right now"..
 938   Atomic::store(&_used, (size_t)0);
 939 }
 940 
 941 void ShenandoahGeneration::increase_used(size_t bytes) {
 942   Atomic::add(&_used, bytes);
 943 }
 944 
 945 void ShenandoahGeneration::decrease_used(size_t bytes) {
 946   assert(_used >= bytes, "cannot reduce bytes used by generation below zero");
 947   Atomic::sub(&_used, bytes);
 948 }
 949 
 950 size_t ShenandoahGeneration::used_regions() const {
 951   return _affiliated_region_count;
 952 }
 953 
 954 size_t ShenandoahGeneration::free_unaffiliated_regions() const {
 955   size_t result = soft_max_capacity() / ShenandoahHeapRegion::region_size_bytes();
 956   if (_affiliated_region_count > result) {
 957     result = 0;                 // If old-gen is loaning regions to young-gen, affiliated regions may exceed capacity temporarily.
 958   } else {
 959     result -= _affiliated_region_count;
 960   }
 961   return result;
 962 }
 963 
 964 size_t ShenandoahGeneration::used_regions_size() const {
 965   return _affiliated_region_count * ShenandoahHeapRegion::region_size_bytes();
 966 }
 967 
 968 size_t ShenandoahGeneration::available() const {
 969   size_t in_use = used();
 970   size_t soft_capacity = soft_max_capacity();
 971   return in_use > soft_capacity ? 0 : soft_capacity - in_use;
 972 }
 973 
 974 size_t ShenandoahGeneration::adjust_available(intptr_t adjustment) {
 975   _adjusted_capacity = soft_max_capacity() + adjustment;
 976   return _adjusted_capacity;
 977 }
 978 
 979 size_t ShenandoahGeneration::unadjust_available() {
 980   _adjusted_capacity = soft_max_capacity();
 981   return _adjusted_capacity;
 982 }
 983 
 984 size_t ShenandoahGeneration::adjusted_available() const {
 985   size_t in_use = used();
 986   size_t capacity = _adjusted_capacity;
 987   return in_use > capacity ? 0 : capacity - in_use;
 988 }
 989 
 990 size_t ShenandoahGeneration::adjusted_capacity() const {
 991   return _adjusted_capacity;
 992 }
 993 
 994 size_t ShenandoahGeneration::adjusted_unaffiliated_regions() const {
 995   assert(adjusted_capacity() >= used_regions_size(), "adjusted_unaffiliated_regions() cannot return negative");
 996   assert((adjusted_capacity() - used_regions_size()) % ShenandoahHeapRegion::region_size_bytes() == 0,
 997          "adjusted capacity (" SIZE_FORMAT ") and used regions size (" SIZE_FORMAT ") should be multiples of region_size_bytes",
 998          adjusted_capacity(), used_regions_size());
 999   return (adjusted_capacity() - used_regions_size()) / ShenandoahHeapRegion::region_size_bytes();
1000 }
1001 
1002 void ShenandoahGeneration::increase_capacity(size_t increment) {
1003   shenandoah_assert_heaplocked_or_safepoint();
1004   assert(_max_capacity + increment <= ShenandoahHeap::heap()->max_size_for(this), "Cannot increase generation capacity beyond maximum.");
1005   _max_capacity += increment;
1006   _soft_max_capacity += increment;
1007   _adjusted_capacity += increment;
1008 }
1009 
1010 void ShenandoahGeneration::decrease_capacity(size_t decrement) {
1011   shenandoah_assert_heaplocked_or_safepoint();
1012   assert(_max_capacity - decrement >= ShenandoahHeap::heap()->min_size_for(this), "Cannot decrease generation capacity beyond minimum.");
1013   _max_capacity -= decrement;
1014   _soft_max_capacity -= decrement;
1015   _adjusted_capacity -= decrement;
1016 }
1017 
1018 void ShenandoahGeneration::record_success_concurrent(bool abbreviated) {
1019   heuristics()->record_success_concurrent(abbreviated);
1020   ShenandoahHeap::heap()->shenandoah_policy()->record_success_concurrent();
1021 }
1022 
1023 void ShenandoahGeneration::record_success_degenerated() {
1024   heuristics()->record_success_degenerated();
1025   ShenandoahHeap::heap()->shenandoah_policy()->record_success_degenerated();
1026 }
1027 
1028 void ShenandoahGeneration::add_collection_time(double time_seconds) {
1029   shenandoah_assert_control_or_vm_thread();
1030   _collection_thread_time_s += time_seconds;
1031 }
1032 
1033 double ShenandoahGeneration::reset_collection_time() {
1034   double t = _collection_thread_time_s;
1035   _collection_thread_time_s = 0.0;
1036   return t;
1037 }
1038