1 /*
   2  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 
  27 #include "gc/shenandoah/shenandoahAgeCensus.hpp"
  28 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  29 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  30 #include "gc/shenandoah/shenandoahGenerationalControlThread.hpp"
  31 #include "gc/shenandoah/shenandoahGenerationalEvacuationTask.hpp"
  32 #include "gc/shenandoah/shenandoahGenerationalHeap.hpp"
  33 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  34 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
  35 #include "gc/shenandoah/shenandoahInitLogger.hpp"
  36 #include "gc/shenandoah/shenandoahMemoryPool.hpp"
  37 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  38 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
  39 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  40 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  41 #include "gc/shenandoah/shenandoahRegulatorThread.hpp"
  42 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
  43 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  44 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
  45 #include "gc/shenandoah/shenandoahUtils.hpp"
  46 #include "logging/log.hpp"
  47 #include "utilities/events.hpp"
  48 
  49 
  50 class ShenandoahGenerationalInitLogger : public ShenandoahInitLogger {
  51 public:
  52   static void print() {
  53     ShenandoahGenerationalInitLogger logger;
  54     logger.print_all();
  55   }
  56 
  57   void print_heap() override {
  58     ShenandoahInitLogger::print_heap();
  59 
  60     ShenandoahGenerationalHeap* heap = ShenandoahGenerationalHeap::heap();
  61 
  62     ShenandoahYoungGeneration* young = heap->young_generation();
  63     log_info(gc, init)("Young Generation Soft Size: " EXACTFMT, EXACTFMTARGS(young->soft_max_capacity()));
  64     log_info(gc, init)("Young Generation Max: " EXACTFMT, EXACTFMTARGS(young->max_capacity()));
  65 
  66     ShenandoahOldGeneration* old = heap->old_generation();
  67     log_info(gc, init)("Old Generation Soft Size: " EXACTFMT, EXACTFMTARGS(old->soft_max_capacity()));
  68     log_info(gc, init)("Old Generation Max: " EXACTFMT, EXACTFMTARGS(old->max_capacity()));
  69   }
  70 
  71 protected:
  72   void print_gc_specific() override {
  73     ShenandoahInitLogger::print_gc_specific();
  74 
  75     ShenandoahGenerationalHeap* heap = ShenandoahGenerationalHeap::heap();
  76     log_info(gc, init)("Young Heuristics: %s", heap->young_generation()->heuristics()->name());
  77     log_info(gc, init)("Old Heuristics: %s", heap->old_generation()->heuristics()->name());
  78   }
  79 };
  80 
  81 ShenandoahGenerationalHeap* ShenandoahGenerationalHeap::heap() {
  82   shenandoah_assert_generational();
  83   CollectedHeap* heap = Universe::heap();
  84   return cast(heap);
  85 }
  86 
  87 ShenandoahGenerationalHeap* ShenandoahGenerationalHeap::cast(CollectedHeap* heap) {
  88   shenandoah_assert_generational();
  89   return checked_cast<ShenandoahGenerationalHeap*>(heap);
  90 }
  91 
  92 size_t ShenandoahGenerationalHeap::calculate_min_plab() {
  93   return align_up(PLAB::min_size(), CardTable::card_size_in_words());
  94 }
  95 
  96 size_t ShenandoahGenerationalHeap::calculate_max_plab() {
  97   size_t MaxTLABSizeWords = ShenandoahHeapRegion::max_tlab_size_words();
  98   return align_down(MaxTLABSizeWords, CardTable::card_size_in_words());
  99 }
 100 
 101 // Returns size in bytes
 102 size_t ShenandoahGenerationalHeap::unsafe_max_tlab_alloc(Thread *thread) const {
 103   return MIN2(ShenandoahHeapRegion::max_tlab_size_bytes(), young_generation()->available());
 104 }
 105 
 106 ShenandoahGenerationalHeap::ShenandoahGenerationalHeap(ShenandoahCollectorPolicy* policy) :
 107   ShenandoahHeap(policy),
 108   _age_census(nullptr),
 109   _min_plab_size(calculate_min_plab()),
 110   _max_plab_size(calculate_max_plab()),
 111   _regulator_thread(nullptr),
 112   _young_gen_memory_pool(nullptr),
 113   _old_gen_memory_pool(nullptr) {
 114   assert(is_aligned(_min_plab_size, CardTable::card_size_in_words()), "min_plab_size must be aligned");
 115   assert(is_aligned(_max_plab_size, CardTable::card_size_in_words()), "max_plab_size must be aligned");
 116 }
 117 
 118 void ShenandoahGenerationalHeap::post_initialize() {
 119   ShenandoahHeap::post_initialize();
 120   _age_census = new ShenandoahAgeCensus();
 121 }
 122 
 123 void ShenandoahGenerationalHeap::print_init_logger() const {
 124   ShenandoahGenerationalInitLogger logger;
 125   logger.print_all();
 126 }
 127 
 128 void ShenandoahGenerationalHeap::initialize_heuristics() {
 129   // Initialize global generation and heuristics even in generational mode.
 130   ShenandoahHeap::initialize_heuristics();
 131 
 132   // Max capacity is the maximum _allowed_ capacity. That is, the maximum allowed capacity
 133   // for old would be total heap - minimum capacity of young. This means the sum of the maximum
 134   // allowed for old and young could exceed the total heap size. It remains the case that the
 135   // _actual_ capacity of young + old = total.
 136   _generation_sizer.heap_size_changed(max_capacity());
 137   size_t initial_capacity_young = _generation_sizer.max_young_size();
 138   size_t max_capacity_young = _generation_sizer.max_young_size();
 139   size_t initial_capacity_old = max_capacity() - max_capacity_young;
 140   size_t max_capacity_old = max_capacity() - initial_capacity_young;
 141 
 142   _young_generation = new ShenandoahYoungGeneration(max_workers(), max_capacity_young, initial_capacity_young);
 143   _old_generation = new ShenandoahOldGeneration(max_workers(), max_capacity_old, initial_capacity_old);
 144   _young_generation->initialize_heuristics(mode());
 145   _old_generation->initialize_heuristics(mode());
 146 }
 147 
 148 void ShenandoahGenerationalHeap::initialize_serviceability() {
 149   assert(mode()->is_generational(), "Only for the generational mode");
 150   _young_gen_memory_pool = new ShenandoahYoungGenMemoryPool(this);
 151   _old_gen_memory_pool = new ShenandoahOldGenMemoryPool(this);
 152   cycle_memory_manager()->add_pool(_young_gen_memory_pool);
 153   cycle_memory_manager()->add_pool(_old_gen_memory_pool);
 154   stw_memory_manager()->add_pool(_young_gen_memory_pool);
 155   stw_memory_manager()->add_pool(_old_gen_memory_pool);
 156 }
 157 
 158 GrowableArray<MemoryPool*> ShenandoahGenerationalHeap::memory_pools() {
 159   assert(mode()->is_generational(), "Only for the generational mode");
 160   GrowableArray<MemoryPool*> memory_pools(2);
 161   memory_pools.append(_young_gen_memory_pool);
 162   memory_pools.append(_old_gen_memory_pool);
 163   return memory_pools;
 164 }
 165 
 166 void ShenandoahGenerationalHeap::initialize_controller() {
 167   auto control_thread = new ShenandoahGenerationalControlThread();
 168   _control_thread = control_thread;
 169   _regulator_thread = new ShenandoahRegulatorThread(control_thread);
 170 }
 171 
 172 void ShenandoahGenerationalHeap::gc_threads_do(ThreadClosure* tcl) const {
 173   if (!shenandoah_policy()->is_at_shutdown()) {
 174     ShenandoahHeap::gc_threads_do(tcl);
 175     tcl->do_thread(regulator_thread());
 176   }
 177 }
 178 
 179 void ShenandoahGenerationalHeap::stop() {
 180   regulator_thread()->stop();
 181   ShenandoahHeap::stop();
 182 }
 183 
 184 oop ShenandoahGenerationalHeap::evacuate_object(oop p, Thread* thread) {
 185   assert(thread == Thread::current(), "Expected thread parameter to be current thread.");
 186   if (ShenandoahThreadLocalData::is_oom_during_evac(thread)) {
 187     // This thread went through the OOM during evac protocol and it is safe to return
 188     // the forward pointer. It must not attempt to evacuate anymore.
 189     return ShenandoahBarrierSet::resolve_forwarded(p);
 190   }
 191 
 192   assert(ShenandoahThreadLocalData::is_evac_allowed(thread), "must be enclosed in oom-evac scope");
 193 
 194   ShenandoahHeapRegion* r = heap_region_containing(p);
 195   assert(!r->is_humongous(), "never evacuate humongous objects");
 196 
 197   ShenandoahAffiliation target_gen = r->affiliation();
 198   // gc_generation() can change asynchronously and should not be used here.
 199   assert(active_generation() != nullptr, "Error");
 200   if (active_generation()->is_young() && target_gen == YOUNG_GENERATION) {
 201     markWord mark = p->mark();
 202     if (mark.is_marked()) {
 203       // Already forwarded.
 204       return ShenandoahBarrierSet::resolve_forwarded(p);
 205     }
 206 
 207     if (mark.has_displaced_mark_helper()) {
 208       // We don't want to deal with MT here just to ensure we read the right mark word.
 209       // Skip the potential promotion attempt for this one.
 210     } else if (r->age() + mark.age() >= age_census()->tenuring_threshold()) {
 211       oop result = try_evacuate_object(p, thread, r, OLD_GENERATION);
 212       if (result != nullptr) {
 213         return result;
 214       }
 215       // If we failed to promote this aged object, we'll fall through to code below and evacuate to young-gen.
 216     }
 217   }
 218   return try_evacuate_object(p, thread, r, target_gen);
 219 }
 220 
 221 // try_evacuate_object registers the object and dirties the associated remembered set information when evacuating
 222 // to OLD_GENERATION.
 223 oop ShenandoahGenerationalHeap::try_evacuate_object(oop p, Thread* thread, ShenandoahHeapRegion* from_region,
 224                                         ShenandoahAffiliation target_gen) {
 225   bool alloc_from_lab = true;
 226   bool has_plab = false;
 227   HeapWord* copy = nullptr;
 228   size_t size = p->size();
 229   bool is_promotion = (target_gen == OLD_GENERATION) && from_region->is_young();
 230 
 231 #ifdef ASSERT
 232   if (ShenandoahOOMDuringEvacALot &&
 233       (os::random() & 1) == 0) { // Simulate OOM every ~2nd slow-path call
 234     copy = nullptr;
 235   } else {
 236 #endif
 237     if (UseTLAB) {
 238       switch (target_gen) {
 239         case YOUNG_GENERATION: {
 240           copy = allocate_from_gclab(thread, size);
 241           if ((copy == nullptr) && (size < ShenandoahThreadLocalData::gclab_size(thread))) {
 242             // GCLAB allocation failed because we are bumping up against the limit on young evacuation reserve.  Try resetting
 243             // the desired GCLAB size and retry GCLAB allocation to avoid cascading of shared memory allocations.
 244             ShenandoahThreadLocalData::set_gclab_size(thread, PLAB::min_size());
 245             copy = allocate_from_gclab(thread, size);
 246             // If we still get nullptr, we'll try a shared allocation below.
 247           }
 248           break;
 249         }
 250         case OLD_GENERATION: {
 251           assert(mode()->is_generational(), "OLD Generation only exists in generational mode");
 252           PLAB* plab = ShenandoahThreadLocalData::plab(thread);
 253           if (plab != nullptr) {
 254             has_plab = true;
 255           }
 256           copy = allocate_from_plab(thread, size, is_promotion);
 257           if ((copy == nullptr) && (size < ShenandoahThreadLocalData::plab_size(thread)) &&
 258               ShenandoahThreadLocalData::plab_retries_enabled(thread)) {
 259             // PLAB allocation failed because we are bumping up against the limit on old evacuation reserve or because
 260             // the requested object does not fit within the current plab but the plab still has an "abundance" of memory,
 261             // where abundance is defined as >= ShenGenHeap::plab_min_size().  In the former case, we try resetting the desired
 262             // PLAB size and retry PLAB allocation to avoid cascading of shared memory allocations.
 263 
 264             // In this situation, PLAB memory is precious.  We'll try to preserve our existing PLAB by forcing
 265             // this particular allocation to be shared.
 266             if (plab->words_remaining() < plab_min_size()) {
 267               ShenandoahThreadLocalData::set_plab_size(thread, plab_min_size());
 268               copy = allocate_from_plab(thread, size, is_promotion);
 269               // If we still get nullptr, we'll try a shared allocation below.
 270               if (copy == nullptr) {
 271                 // If retry fails, don't continue to retry until we have success (probably in next GC pass)
 272                 ShenandoahThreadLocalData::disable_plab_retries(thread);
 273               }
 274             }
 275             // else, copy still equals nullptr.  this causes shared allocation below, preserving this plab for future needs.
 276           }
 277           break;
 278         }
 279         default: {
 280           ShouldNotReachHere();
 281           break;
 282         }
 283       }
 284     }
 285 
 286     if (copy == nullptr) {
 287       // If we failed to allocate in LAB, we'll try a shared allocation.
 288       if (!is_promotion || !has_plab || (size > PLAB::min_size())) {
 289         ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared_gc(size, target_gen, is_promotion);
 290         copy = allocate_memory(req);
 291         alloc_from_lab = false;
 292       }
 293       // else, we leave copy equal to nullptr, signaling a promotion failure below if appropriate.
 294       // We choose not to promote objects smaller than PLAB::min_size() by way of shared allocations, as this is too
 295       // costly.  Instead, we'll simply "evacuate" to young-gen memory (using a GCLAB) and will promote in a future
 296       // evacuation pass.  This condition is denoted by: is_promotion && has_plab && (size <= PLAB::min_size())
 297     }
 298 #ifdef ASSERT
 299   }
 300 #endif
 301 
 302   if (copy == nullptr) {
 303     if (target_gen == OLD_GENERATION) {
 304       if (from_region->is_young()) {
 305         // Signal that promotion failed. Will evacuate this old object somewhere in young gen.
 306         old_generation()->handle_failed_promotion(thread, size);
 307         return nullptr;
 308       } else {
 309         // Remember that evacuation to old gen failed. We'll want to trigger a full gc to recover from this
 310         // after the evacuation threads have finished.
 311         old_generation()->handle_failed_evacuation();
 312       }
 313     }
 314 
 315     control_thread()->handle_alloc_failure_evac(size);
 316 
 317     oom_evac_handler()->handle_out_of_memory_during_evacuation();
 318 
 319     return ShenandoahBarrierSet::resolve_forwarded(p);
 320   }
 321 
 322   // Copy the object:
 323   evac_tracker()->begin_evacuation(thread, size * HeapWordSize);
 324   Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(p), copy, size);
 325 
 326   oop copy_val = cast_to_oop(copy);
 327 
 328   if (target_gen == YOUNG_GENERATION && is_aging_cycle()) {
 329     ShenandoahHeap::increase_object_age(copy_val, from_region->age() + 1);
 330   }
 331 
 332   // Try to install the new forwarding pointer.
 333   ContinuationGCSupport::relativize_stack_chunk(copy_val);
 334 
 335   oop result = ShenandoahForwarding::try_update_forwardee(p, copy_val);
 336   if (result == copy_val) {
 337     // Successfully evacuated. Our copy is now the public one!
 338     evac_tracker()->end_evacuation(thread, size * HeapWordSize);
 339     if (target_gen == OLD_GENERATION) {
 340       old_generation()->handle_evacuation(copy, size, from_region->is_young());
 341     } else {
 342       // When copying to the old generation above, we don't care
 343       // about recording object age in the census stats.
 344       assert(target_gen == YOUNG_GENERATION, "Error");
 345       // We record this census only when simulating pre-adaptive tenuring behavior, or
 346       // when we have been asked to record the census at evacuation rather than at mark
 347       if (ShenandoahGenerationalCensusAtEvac || !ShenandoahGenerationalAdaptiveTenuring) {
 348         evac_tracker()->record_age(thread, size * HeapWordSize, ShenandoahHeap::get_object_age(copy_val));
 349       }
 350     }
 351     shenandoah_assert_correct(nullptr, copy_val);
 352     return copy_val;
 353   }  else {
 354     // Failed to evacuate. We need to deal with the object that is left behind. Since this
 355     // new allocation is certainly after TAMS, it will be considered live in the next cycle.
 356     // But if it happens to contain references to evacuated regions, those references would
 357     // not get updated for this stale copy during this cycle, and we will crash while scanning
 358     // it the next cycle.
 359     if (alloc_from_lab) {
 360       // For LAB allocations, it is enough to rollback the allocation ptr. Either the next
 361       // object will overwrite this stale copy, or the filler object on LAB retirement will
 362       // do this.
 363       switch (target_gen) {
 364         case YOUNG_GENERATION: {
 365           ShenandoahThreadLocalData::gclab(thread)->undo_allocation(copy, size);
 366           break;
 367         }
 368         case OLD_GENERATION: {
 369           ShenandoahThreadLocalData::plab(thread)->undo_allocation(copy, size);
 370           if (is_promotion) {
 371             ShenandoahThreadLocalData::subtract_from_plab_promoted(thread, size * HeapWordSize);
 372           }
 373           break;
 374         }
 375         default: {
 376           ShouldNotReachHere();
 377           break;
 378         }
 379       }
 380     } else {
 381       // For non-LAB allocations, we have no way to retract the allocation, and
 382       // have to explicitly overwrite the copy with the filler object. With that overwrite,
 383       // we have to keep the fwdptr initialized and pointing to our (stale) copy.
 384       assert(size >= ShenandoahHeap::min_fill_size(), "previously allocated object known to be larger than min_size");
 385       fill_with_object(copy, size);
 386       shenandoah_assert_correct(nullptr, copy_val);
 387       // For non-LAB allocations, the object has already been registered
 388     }
 389     shenandoah_assert_correct(nullptr, result);
 390     return result;
 391   }
 392 }
 393 
 394 inline HeapWord* ShenandoahGenerationalHeap::allocate_from_plab(Thread* thread, size_t size, bool is_promotion) {
 395   assert(UseTLAB, "TLABs should be enabled");
 396 
 397   PLAB* plab = ShenandoahThreadLocalData::plab(thread);
 398   HeapWord* obj;
 399 
 400   if (plab == nullptr) {
 401     assert(!thread->is_Java_thread() && !thread->is_Worker_thread(), "Performance: thread should have PLAB: %s", thread->name());
 402     // No PLABs in this thread, fallback to shared allocation
 403     return nullptr;
 404   } else if (is_promotion && !ShenandoahThreadLocalData::allow_plab_promotions(thread)) {
 405     return nullptr;
 406   }
 407   // if plab->word_size() <= 0, thread's plab not yet initialized for this pass, so allow_plab_promotions() is not trustworthy
 408   obj = plab->allocate(size);
 409   if ((obj == nullptr) && (plab->words_remaining() < plab_min_size())) {
 410     // allocate_from_plab_slow will establish allow_plab_promotions(thread) for future invocations
 411     obj = allocate_from_plab_slow(thread, size, is_promotion);
 412   }
 413   // if plab->words_remaining() >= ShenGenHeap::heap()->plab_min_size(), just return nullptr so we can use a shared allocation
 414   if (obj == nullptr) {
 415     return nullptr;
 416   }
 417 
 418   if (is_promotion) {
 419     ShenandoahThreadLocalData::add_to_plab_promoted(thread, size * HeapWordSize);
 420   }
 421   return obj;
 422 }
 423 
 424 // Establish a new PLAB and allocate size HeapWords within it.
 425 HeapWord* ShenandoahGenerationalHeap::allocate_from_plab_slow(Thread* thread, size_t size, bool is_promotion) {
 426   // New object should fit the PLAB size
 427 
 428   assert(mode()->is_generational(), "PLABs only relevant to generational GC");
 429   const size_t plab_min_size = this->plab_min_size();
 430   // PLABs are aligned to card boundaries to avoid synchronization with concurrent
 431   // allocations in other PLABs.
 432   const size_t min_size = (size > plab_min_size)? align_up(size, CardTable::card_size_in_words()): plab_min_size;
 433 
 434   // Figure out size of new PLAB, using value determined at last refill.
 435   size_t cur_size = ShenandoahThreadLocalData::plab_size(thread);
 436   if (cur_size == 0) {
 437     cur_size = plab_min_size;
 438   }
 439 
 440   // Expand aggressively, doubling at each refill in this epoch, ceiling at plab_max_size()
 441   size_t future_size = MIN2(cur_size * 2, plab_max_size());
 442   // Doubling, starting at a card-multiple, should give us a card-multiple. (Ceiling and floor
 443   // are card multiples.)
 444   assert(is_aligned(future_size, CardTable::card_size_in_words()), "Card multiple by construction, future_size: " SIZE_FORMAT
 445           ", card_size: " SIZE_FORMAT ", cur_size: " SIZE_FORMAT ", max: " SIZE_FORMAT,
 446          future_size, (size_t) CardTable::card_size_in_words(), cur_size, plab_max_size());
 447 
 448   // Record new heuristic value even if we take any shortcut. This captures
 449   // the case when moderately-sized objects always take a shortcut. At some point,
 450   // heuristics should catch up with them.  Note that the requested cur_size may
 451   // not be honored, but we remember that this is the preferred size.
 452   log_debug(gc, free)("Set new PLAB size: " SIZE_FORMAT, future_size);
 453   ShenandoahThreadLocalData::set_plab_size(thread, future_size);
 454   if (cur_size < size) {
 455     // The PLAB to be allocated is still not large enough to hold the object. Fall back to shared allocation.
 456     // This avoids retiring perfectly good PLABs in order to represent a single large object allocation.
 457     log_debug(gc, free)("Current PLAB size (" SIZE_FORMAT ") is too small for " SIZE_FORMAT, cur_size, size);
 458     return nullptr;
 459   }
 460 
 461   // Retire current PLAB, and allocate a new one.
 462   PLAB* plab = ShenandoahThreadLocalData::plab(thread);
 463   if (plab->words_remaining() < plab_min_size) {
 464     // Retire current PLAB. This takes care of any PLAB book-keeping.
 465     // retire_plab() registers the remnant filler object with the remembered set scanner without a lock.
 466     // Since PLABs are card-aligned, concurrent registrations in other PLABs don't interfere.
 467     retire_plab(plab, thread);
 468 
 469     size_t actual_size = 0;
 470     HeapWord* plab_buf = allocate_new_plab(min_size, cur_size, &actual_size);
 471     if (plab_buf == nullptr) {
 472       if (min_size == plab_min_size) {
 473         // Disable PLAB promotions for this thread because we cannot even allocate a minimal PLAB. This allows us
 474         // to fail faster on subsequent promotion attempts.
 475         ShenandoahThreadLocalData::disable_plab_promotions(thread);
 476       }
 477       return nullptr;
 478     } else {
 479       ShenandoahThreadLocalData::enable_plab_retries(thread);
 480     }
 481     // Since the allocated PLAB may have been down-sized for alignment, plab->allocate(size) below may still fail.
 482     if (ZeroTLAB) {
 483       // ... and clear it.
 484       Copy::zero_to_words(plab_buf, actual_size);
 485     } else {
 486       // ...and zap just allocated object.
 487 #ifdef ASSERT
 488       // Skip mangling the space corresponding to the object header to
 489       // ensure that the returned space is not considered parsable by
 490       // any concurrent GC thread.
 491       size_t hdr_size = oopDesc::header_size();
 492       Copy::fill_to_words(plab_buf + hdr_size, actual_size - hdr_size, badHeapWordVal);
 493 #endif // ASSERT
 494     }
 495     assert(is_aligned(actual_size, CardTable::card_size_in_words()), "Align by design");
 496     plab->set_buf(plab_buf, actual_size);
 497     if (is_promotion && !ShenandoahThreadLocalData::allow_plab_promotions(thread)) {
 498       return nullptr;
 499     }
 500     return plab->allocate(size);
 501   } else {
 502     // If there's still at least min_size() words available within the current plab, don't retire it.  Let's nibble
 503     // away on this plab as long as we can.  Meanwhile, return nullptr to force this particular allocation request
 504     // to be satisfied with a shared allocation.  By packing more promotions into the previously allocated PLAB, we
 505     // reduce the likelihood of evacuation failures, and we reduce the need for downsizing our PLABs.
 506     return nullptr;
 507   }
 508 }
 509 
 510 HeapWord* ShenandoahGenerationalHeap::allocate_new_plab(size_t min_size, size_t word_size, size_t* actual_size) {
 511   // Align requested sizes to card-sized multiples.  Align down so that we don't violate max size of TLAB.
 512   assert(is_aligned(min_size, CardTable::card_size_in_words()), "Align by design");
 513   assert(word_size >= min_size, "Requested PLAB is too small");
 514 
 515   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_plab(min_size, word_size);
 516   // Note that allocate_memory() sets a thread-local flag to prohibit further promotions by this thread
 517   // if we are at risk of infringing on the old-gen evacuation budget.
 518   HeapWord* res = allocate_memory(req);
 519   if (res != nullptr) {
 520     *actual_size = req.actual_size();
 521   } else {
 522     *actual_size = 0;
 523   }
 524   assert(is_aligned(res, CardTable::card_size_in_words()), "Align by design");
 525   return res;
 526 }
 527 
 528 // TODO: It is probably most efficient to register all objects (both promotions and evacuations) that were allocated within
 529 // this plab at the time we retire the plab.  A tight registration loop will run within both code and data caches.  This change
 530 // would allow smaller and faster in-line implementation of alloc_from_plab().  Since plabs are aligned on card-table boundaries,
 531 // this object registration loop can be performed without acquiring a lock.
 532 void ShenandoahGenerationalHeap::retire_plab(PLAB* plab, Thread* thread) {
 533   // We don't enforce limits on plab evacuations.  We let it consume all available old-gen memory in order to reduce
 534   // probability of an evacuation failure.  We do enforce limits on promotion, to make sure that excessive promotion
 535   // does not result in an old-gen evacuation failure.  Note that a failed promotion is relatively harmless.  Any
 536   // object that fails to promote in the current cycle will be eligible for promotion in a subsequent cycle.
 537 
 538   // When the plab was instantiated, its entirety was treated as if the entire buffer was going to be dedicated to
 539   // promotions.  Now that we are retiring the buffer, we adjust for the reality that the plab is not entirely promotions.
 540   //  1. Some of the plab may have been dedicated to evacuations.
 541   //  2. Some of the plab may have been abandoned due to waste (at the end of the plab).
 542   size_t not_promoted =
 543           ShenandoahThreadLocalData::get_plab_actual_size(thread) - ShenandoahThreadLocalData::get_plab_promoted(thread);
 544   ShenandoahThreadLocalData::reset_plab_promoted(thread);
 545   ShenandoahThreadLocalData::set_plab_actual_size(thread, 0);
 546   if (not_promoted > 0) {
 547     old_generation()->unexpend_promoted(not_promoted);
 548   }
 549   const size_t original_waste = plab->waste();
 550   HeapWord* const top = plab->top();
 551 
 552   // plab->retire() overwrites unused memory between plab->top() and plab->hard_end() with a dummy object to make memory parsable.
 553   // It adds the size of this unused memory, in words, to plab->waste().
 554   plab->retire();
 555   if (top != nullptr && plab->waste() > original_waste && is_in_old(top)) {
 556     // If retiring the plab created a filler object, then we need to register it with our card scanner so it can
 557     // safely walk the region backing the plab.
 558     log_debug(gc)("retire_plab() is registering remnant of size " SIZE_FORMAT " at " PTR_FORMAT,
 559                   plab->waste() - original_waste, p2i(top));
 560     old_generation()->card_scan()->register_object_without_lock(top);
 561   }
 562 }
 563 
 564 void ShenandoahGenerationalHeap::retire_plab(PLAB* plab) {
 565   Thread* thread = Thread::current();
 566   retire_plab(plab, thread);
 567 }
 568 
 569 ShenandoahGenerationalHeap::TransferResult ShenandoahGenerationalHeap::balance_generations() {
 570   shenandoah_assert_heaplocked_or_safepoint();
 571 
 572   ShenandoahOldGeneration* old_gen = old_generation();
 573   const ssize_t old_region_balance = old_gen->get_region_balance();
 574   old_gen->set_region_balance(0);
 575 
 576   if (old_region_balance > 0) {
 577     const auto old_region_surplus = checked_cast<size_t>(old_region_balance);
 578     const bool success = generation_sizer()->transfer_to_young(old_region_surplus);
 579     return TransferResult {
 580       success, old_region_surplus, "young"
 581     };
 582   }
 583 
 584   if (old_region_balance < 0) {
 585     const auto old_region_deficit = checked_cast<size_t>(-old_region_balance);
 586     const bool success = generation_sizer()->transfer_to_old(old_region_deficit);
 587     if (!success) {
 588       old_gen->handle_failed_transfer();
 589     }
 590     return TransferResult {
 591       success, old_region_deficit, "old"
 592     };
 593   }
 594 
 595   return TransferResult {true, 0, "none"};
 596 }
 597 
 598 // Make sure old-generation is large enough, but no larger than is necessary, to hold mixed evacuations
 599 // and promotions, if we anticipate either. Any deficit is provided by the young generation, subject to
 600 // xfer_limit, and any surplus is transferred to the young generation.
 601 // xfer_limit is the maximum we're able to transfer from young to old.
 602 void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t old_xfer_limit, size_t old_cset_regions) {
 603 
 604   // We can limit the old reserve to the size of anticipated promotions:
 605   // max_old_reserve is an upper bound on memory evacuated from old and promoted to old,
 606   // clamped by the old generation space available.
 607   //
 608   // Here's the algebra.
 609   // Let SOEP = ShenandoahOldEvacRatioPercent,
 610   //     OE = old evac,
 611   //     YE = young evac, and
 612   //     TE = total evac = OE + YE
 613   // By definition:
 614   //            SOEP/100 = OE/TE
 615   //                     = OE/(OE+YE)
 616   //  => SOEP/(100-SOEP) = OE/((OE+YE)-OE)      // componendo-dividendo: If a/b = c/d, then a/(b-a) = c/(d-c)
 617   //                     = OE/YE
 618   //  =>              OE = YE*SOEP/(100-SOEP)
 619 
 620   // We have to be careful in the event that SOEP is set to 100 by the user.
 621   assert(ShenandoahOldEvacRatioPercent <= 100, "Error");
 622   const size_t old_available = old_generation()->available();
 623   // The free set will reserve this amount of memory to hold young evacuations
 624   const size_t young_reserve = (young_generation()->max_capacity() * ShenandoahEvacReserve) / 100;
 625 
 626   // In the case that ShenandoahOldEvacRatioPercent equals 100, max_old_reserve is limited only by xfer_limit.
 627 
 628   const size_t bound_on_old_reserve = old_available + old_xfer_limit + young_reserve;
 629   const size_t max_old_reserve = (ShenandoahOldEvacRatioPercent == 100)?
 630                                  bound_on_old_reserve: MIN2((young_reserve * ShenandoahOldEvacRatioPercent) / (100 - ShenandoahOldEvacRatioPercent),
 631                                                             bound_on_old_reserve);
 632 
 633   const size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
 634 
 635   // Decide how much old space we should reserve for a mixed collection
 636   size_t reserve_for_mixed = 0;
 637   if (old_generation()->has_unprocessed_collection_candidates()) {
 638     // We want this much memory to be unfragmented in order to reliably evacuate old.  This is conservative because we
 639     // may not evacuate the entirety of unprocessed candidates in a single mixed evacuation.
 640     const size_t max_evac_need = (size_t)
 641             (old_generation()->unprocessed_collection_candidates_live_memory() * ShenandoahOldEvacWaste);
 642     assert(old_available >= old_generation()->free_unaffiliated_regions() * region_size_bytes,
 643            "Unaffiliated available must be less than total available");
 644     const size_t old_fragmented_available =
 645             old_available - old_generation()->free_unaffiliated_regions() * region_size_bytes;
 646     reserve_for_mixed = max_evac_need + old_fragmented_available;
 647     if (reserve_for_mixed > max_old_reserve) {
 648       reserve_for_mixed = max_old_reserve;
 649     }
 650   }
 651 
 652   // Decide how much space we should reserve for promotions from young
 653   size_t reserve_for_promo = 0;
 654   const size_t promo_load = old_generation()->get_promotion_potential();
 655   const bool doing_promotions = promo_load > 0;
 656   if (doing_promotions) {
 657     // We're promoting and have a bound on the maximum amount that can be promoted
 658     assert(max_old_reserve >= reserve_for_mixed, "Sanity");
 659     const size_t available_for_promotions = max_old_reserve - reserve_for_mixed;
 660     reserve_for_promo = MIN2((size_t)(promo_load * ShenandoahPromoEvacWaste), available_for_promotions);
 661   }
 662 
 663   // This is the total old we want to ideally reserve
 664   const size_t old_reserve = reserve_for_mixed + reserve_for_promo;
 665   assert(old_reserve <= max_old_reserve, "cannot reserve more than max for old evacuations");
 666 
 667   // We now check if the old generation is running a surplus or a deficit.
 668   const size_t max_old_available = old_generation()->available() + old_cset_regions * region_size_bytes;
 669   if (max_old_available >= old_reserve) {
 670     // We are running a surplus, so the old region surplus can go to young
 671     const size_t old_surplus = (max_old_available - old_reserve) / region_size_bytes;
 672     const size_t unaffiliated_old_regions = old_generation()->free_unaffiliated_regions() + old_cset_regions;
 673     const size_t old_region_surplus = MIN2(old_surplus, unaffiliated_old_regions);
 674     old_generation()->set_region_balance(checked_cast<ssize_t>(old_region_surplus));
 675   } else {
 676     // We are running a deficit which we'd like to fill from young.
 677     // Ignore that this will directly impact young_generation()->max_capacity(),
 678     // indirectly impacting young_reserve and old_reserve.  These computations are conservative.
 679     // Note that deficit is rounded up by one region.
 680     const size_t old_need = (old_reserve - max_old_available + region_size_bytes - 1) / region_size_bytes;
 681     const size_t max_old_region_xfer = old_xfer_limit / region_size_bytes;
 682 
 683     // Round down the regions we can transfer from young to old. If we're running short
 684     // on young-gen memory, we restrict the xfer. Old-gen collection activities will be
 685     // curtailed if the budget is restricted.
 686     const size_t old_region_deficit = MIN2(old_need, max_old_region_xfer);
 687     old_generation()->set_region_balance(0 - checked_cast<ssize_t>(old_region_deficit));
 688   }
 689 }
 690 
 691 void ShenandoahGenerationalHeap::reset_generation_reserves() {
 692   young_generation()->set_evacuation_reserve(0);
 693   old_generation()->set_evacuation_reserve(0);
 694   old_generation()->set_promoted_reserve(0);
 695 }
 696 
 697 void ShenandoahGenerationalHeap::TransferResult::print_on(const char* when, outputStream* ss) const {
 698   auto heap = ShenandoahGenerationalHeap::heap();
 699   ShenandoahYoungGeneration* const young_gen = heap->young_generation();
 700   ShenandoahOldGeneration* const old_gen = heap->old_generation();
 701   const size_t young_available = young_gen->available();
 702   const size_t old_available = old_gen->available();
 703   ss->print_cr("After %s, %s " SIZE_FORMAT " regions to %s to prepare for next gc, old available: "
 704                      PROPERFMT ", young_available: " PROPERFMT,
 705                      when,
 706                      success? "successfully transferred": "failed to transfer", region_count, region_destination,
 707                      PROPERFMTARGS(old_available), PROPERFMTARGS(young_available));
 708 }
 709 
 710 void ShenandoahGenerationalHeap::coalesce_and_fill_old_regions(bool concurrent) {
 711   class ShenandoahGlobalCoalesceAndFill : public WorkerTask {
 712   private:
 713       ShenandoahPhaseTimings::Phase _phase;
 714       ShenandoahRegionIterator _regions;
 715   public:
 716     explicit ShenandoahGlobalCoalesceAndFill(ShenandoahPhaseTimings::Phase phase) :
 717       WorkerTask("Shenandoah Global Coalesce"),
 718       _phase(phase) {}
 719 
 720     void work(uint worker_id) override {
 721       ShenandoahWorkerTimingsTracker timer(_phase,
 722                                            ShenandoahPhaseTimings::ScanClusters,
 723                                            worker_id, true);
 724       ShenandoahHeapRegion* region;
 725       while ((region = _regions.next()) != nullptr) {
 726         // old region is not in the collection set and was not immediately trashed
 727         if (region->is_old() && region->is_active() && !region->is_humongous()) {
 728           // Reset the coalesce and fill boundary because this is a global collect
 729           // and cannot be preempted by young collects. We want to be sure the entire
 730           // region is coalesced here and does not resume from a previously interrupted
 731           // or completed coalescing.
 732           region->begin_preemptible_coalesce_and_fill();
 733           region->oop_coalesce_and_fill(false);
 734         }
 735       }
 736     }
 737   };
 738 
 739   ShenandoahPhaseTimings::Phase phase = concurrent ?
 740           ShenandoahPhaseTimings::conc_coalesce_and_fill :
 741           ShenandoahPhaseTimings::degen_gc_coalesce_and_fill;
 742 
 743   // This is not cancellable
 744   ShenandoahGlobalCoalesceAndFill coalesce(phase);
 745   workers()->run_task(&coalesce);
 746   old_generation()->set_parseable(true);
 747 }
 748 
 749 template<bool CONCURRENT>
 750 class ShenandoahGenerationalUpdateHeapRefsTask : public WorkerTask {
 751 private:
 752   ShenandoahGenerationalHeap* _heap;
 753   ShenandoahRegionIterator* _regions;
 754   ShenandoahRegionChunkIterator* _work_chunks;
 755 
 756 public:
 757   explicit ShenandoahGenerationalUpdateHeapRefsTask(ShenandoahRegionIterator* regions,
 758                                                     ShenandoahRegionChunkIterator* work_chunks) :
 759           WorkerTask("Shenandoah Update References"),
 760           _heap(ShenandoahGenerationalHeap::heap()),
 761           _regions(regions),
 762           _work_chunks(work_chunks)
 763   {
 764     bool old_bitmap_stable = _heap->old_generation()->is_mark_complete();
 765     log_debug(gc, remset)("Update refs, scan remembered set using bitmap: %s", BOOL_TO_STR(old_bitmap_stable));
 766   }
 767 
 768   void work(uint worker_id) {
 769     if (CONCURRENT) {
 770       ShenandoahConcurrentWorkerSession worker_session(worker_id);
 771       ShenandoahSuspendibleThreadSetJoiner stsj;
 772       do_work<ShenandoahConcUpdateRefsClosure>(worker_id);
 773     } else {
 774       ShenandoahParallelWorkerSession worker_session(worker_id);
 775       do_work<ShenandoahSTWUpdateRefsClosure>(worker_id);
 776     }
 777   }
 778 
 779 private:
 780   template<class T>
 781   void do_work(uint worker_id) {
 782     T cl;
 783 
 784     if (CONCURRENT && (worker_id == 0)) {
 785       // We ask the first worker to replenish the Mutator free set by moving regions previously reserved to hold the
 786       // results of evacuation.  These reserves are no longer necessary because evacuation has completed.
 787       size_t cset_regions = _heap->collection_set()->count();
 788       // We cannot transfer any more regions than will be reclaimed when the existing collection set is recycled, because
 789       // we need the reclaimed collection set regions to replenish the collector reserves
 790       _heap->free_set()->move_collector_sets_to_mutator(cset_regions);
 791     }
 792     // If !CONCURRENT, there's no value in expanding Mutator free set
 793 
 794     ShenandoahHeapRegion* r = _regions->next();
 795     // We update references for global, old, and young collections.
 796     ShenandoahGeneration* const gc_generation = _heap->gc_generation();
 797     shenandoah_assert_generations_reconciled();
 798     assert(gc_generation->is_mark_complete(), "Expected complete marking");
 799     ShenandoahMarkingContext* const ctx = _heap->marking_context();
 800     bool is_mixed = _heap->collection_set()->has_old_regions();
 801     while (r != nullptr) {
 802       HeapWord* update_watermark = r->get_update_watermark();
 803       assert (update_watermark >= r->bottom(), "sanity");
 804 
 805       log_debug(gc)("Update refs worker " UINT32_FORMAT ", looking at region " SIZE_FORMAT, worker_id, r->index());
 806       bool region_progress = false;
 807       if (r->is_active() && !r->is_cset()) {
 808         if (r->is_young()) {
 809           _heap->marked_object_oop_iterate(r, &cl, update_watermark);
 810           region_progress = true;
 811         } else if (r->is_old()) {
 812           if (gc_generation->is_global()) {
 813             // Note that GLOBAL collection is not as effectively balanced as young and mixed cycles.  This is because
 814             // concurrent GC threads are parceled out entire heap regions of work at a time and there
 815             // is no "catchup phase" consisting of remembered set scanning, during which parcels of work are smaller
 816             // and more easily distributed more fairly across threads.
 817 
 818             // TODO: Consider an improvement to load balance GLOBAL GC.
 819             _heap->marked_object_oop_iterate(r, &cl, update_watermark);
 820             region_progress = true;
 821           }
 822           // Otherwise, this is an old region in a young or mixed cycle.  Process it during a second phase, below.
 823           // Don't bother to report pacing progress in this case.
 824         } else {
 825           // Because updating of references runs concurrently, it is possible that a FREE inactive region transitions
 826           // to a non-free active region while this loop is executing.  Whenever this happens, the changing of a region's
 827           // active status may propagate at a different speed than the changing of the region's affiliation.
 828 
 829           // When we reach this control point, it is because a race has allowed a region's is_active() status to be seen
 830           // by this thread before the region's affiliation() is seen by this thread.
 831 
 832           // It's ok for this race to occur because the newly transformed region does not have any references to be
 833           // updated.
 834 
 835           assert(r->get_update_watermark() == r->bottom(),
 836                  "%s Region " SIZE_FORMAT " is_active but not recognized as YOUNG or OLD so must be newly transitioned from FREE",
 837                  r->affiliation_name(), r->index());
 838         }
 839       }
 840       if (region_progress && ShenandoahPacing) {
 841         _heap->pacer()->report_updaterefs(pointer_delta(update_watermark, r->bottom()));
 842       }
 843       if (_heap->check_cancelled_gc_and_yield(CONCURRENT)) {
 844         return;
 845       }
 846       r = _regions->next();
 847     }
 848 
 849     if (!gc_generation->is_global()) {
 850       // Since this is generational and not GLOBAL, we have to process the remembered set.  There's no remembered
 851       // set processing if not in generational mode or if GLOBAL mode.
 852 
 853       // After this thread has exhausted its traditional update-refs work, it continues with updating refs within remembered set.
 854       // The remembered set workload is better balanced between threads, so threads that are "behind" can catch up with other
 855       // threads during this phase, allowing all threads to work more effectively in parallel.
 856       struct ShenandoahRegionChunk assignment;
 857       RememberedScanner* scanner = _heap->old_generation()->card_scan();
 858 
 859       while (!_heap->check_cancelled_gc_and_yield(CONCURRENT) && _work_chunks->next(&assignment)) {
 860         // Keep grabbing next work chunk to process until finished, or asked to yield
 861         ShenandoahHeapRegion* r = assignment._r;
 862         if (r->is_active() && !r->is_cset() && r->is_old()) {
 863           HeapWord* start_of_range = r->bottom() + assignment._chunk_offset;
 864           HeapWord* end_of_range = r->get_update_watermark();
 865           if (end_of_range > start_of_range + assignment._chunk_size) {
 866             end_of_range = start_of_range + assignment._chunk_size;
 867           }
 868 
 869           // Old region in a young cycle or mixed cycle.
 870           if (is_mixed) {
 871             // TODO: For mixed evac, consider building an old-gen remembered set that allows restricted updating
 872             // within old-gen HeapRegions.  This remembered set can be constructed by old-gen concurrent marking
 873             // and augmented by card marking.  For example, old-gen concurrent marking can remember for each old-gen
 874             // card which other old-gen regions it refers to: none, one-other specifically, multiple-other non-specific.
 875             // Update-references when _mixed_evac processess each old-gen memory range that has a traditional DIRTY
 876             // card or if the "old-gen remembered set" indicates that this card holds pointers specifically to an
 877             // old-gen region in the most recent collection set, or if this card holds pointers to other non-specific
 878             // old-gen heap regions.
 879 
 880             if (r->is_humongous()) {
 881               if (start_of_range < end_of_range) {
 882                 // Need to examine both dirty and clean cards during mixed evac.
 883                 r->oop_iterate_humongous_slice(&cl, false, start_of_range, assignment._chunk_size, true);
 884               }
 885             } else {
 886               // Since this is mixed evacuation, old regions that are candidates for collection have not been coalesced
 887               // and filled.  Use mark bits to find objects that need to be updated.
 888               //
 889               // Future TODO: establish a second remembered set to identify which old-gen regions point to other old-gen
 890               // regions which are in the collection set for a particular mixed evacuation.
 891               if (start_of_range < end_of_range) {
 892                 HeapWord* p = nullptr;
 893                 size_t card_index = scanner->card_index_for_addr(start_of_range);
 894                 // In case last object in my range spans boundary of my chunk, I may need to scan all the way to top()
 895                 ShenandoahObjectToOopBoundedClosure<T> objs(&cl, start_of_range, r->top());
 896 
 897                 // Any object that begins in a previous range is part of a different scanning assignment.  Any object that
 898                 // starts after end_of_range is also not my responsibility.  (Either allocated during evacuation, so does
 899                 // not hold pointers to from-space, or is beyond the range of my assigned work chunk.)
 900 
 901                 // Find the first object that begins in my range, if there is one.
 902                 p = start_of_range;
 903                 oop obj = cast_to_oop(p);
 904                 HeapWord* tams = ctx->top_at_mark_start(r);
 905                 if (p >= tams) {
 906                   // We cannot use ctx->is_marked(obj) to test whether an object begins at this address.  Instead,
 907                   // we need to use the remembered set crossing map to advance p to the first object that starts
 908                   // within the enclosing card.
 909 
 910                   while (true) {
 911                     HeapWord* first_object = scanner->first_object_in_card(card_index);
 912                     if (first_object != nullptr) {
 913                       p = first_object;
 914                       break;
 915                     } else if (scanner->addr_for_card_index(card_index + 1) < end_of_range) {
 916                       card_index++;
 917                     } else {
 918                       // Force the loop that follows to immediately terminate.
 919                       p = end_of_range;
 920                       break;
 921                     }
 922                   }
 923                   obj = cast_to_oop(p);
 924                   // Note: p may be >= end_of_range
 925                 } else if (!ctx->is_marked(obj)) {
 926                   p = ctx->get_next_marked_addr(p, tams);
 927                   obj = cast_to_oop(p);
 928                   // If there are no more marked objects before tams, this returns tams.
 929                   // Note that tams is either >= end_of_range, or tams is the start of an object that is marked.
 930                 }
 931                 while (p < end_of_range) {
 932                   // p is known to point to the beginning of marked object obj
 933                   objs.do_object(obj);
 934                   HeapWord* prev_p = p;
 935                   p += obj->size();
 936                   if (p < tams) {
 937                     p = ctx->get_next_marked_addr(p, tams);
 938                     // If there are no more marked objects before tams, this returns tams.  Note that tams is
 939                     // either >= end_of_range, or tams is the start of an object that is marked.
 940                   }
 941                   assert(p != prev_p, "Lack of forward progress");
 942                   obj = cast_to_oop(p);
 943                 }
 944               }
 945             }
 946           } else {
 947             // This is a young evac..
 948             if (start_of_range < end_of_range) {
 949               size_t cluster_size =
 950                       CardTable::card_size_in_words() * ShenandoahCardCluster<ShenandoahDirectCardMarkRememberedSet>::CardsPerCluster;
 951               size_t clusters = assignment._chunk_size / cluster_size;
 952               assert(clusters * cluster_size == assignment._chunk_size, "Chunk assignment must align on cluster boundaries");
 953               scanner->process_region_slice(r, assignment._chunk_offset, clusters, end_of_range, &cl, true, worker_id);
 954             }
 955           }
 956           if (ShenandoahPacing && (start_of_range < end_of_range)) {
 957             _heap->pacer()->report_updaterefs(pointer_delta(end_of_range, start_of_range));
 958           }
 959         }
 960       }
 961     }
 962   }
 963 };
 964 
 965 void ShenandoahGenerationalHeap::update_heap_references(bool concurrent) {
 966   assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
 967   const uint nworkers = workers()->active_workers();
 968   ShenandoahRegionChunkIterator work_list(nworkers);
 969   if (concurrent) {
 970     ShenandoahGenerationalUpdateHeapRefsTask<true> task(&_update_refs_iterator, &work_list);
 971     workers()->run_task(&task);
 972   } else {
 973     ShenandoahGenerationalUpdateHeapRefsTask<false> task(&_update_refs_iterator, &work_list);
 974     workers()->run_task(&task);
 975   }
 976 
 977   if (ShenandoahEnableCardStats) {
 978     // Only do this if we are collecting card stats
 979     RememberedScanner* card_scan = old_generation()->card_scan();
 980     assert(card_scan != nullptr, "Card table must exist when card stats are enabled");
 981     card_scan->log_card_stats(nworkers, CARD_STAT_UPDATE_REFS);
 982   }
 983 }
 984 
 985 namespace ShenandoahCompositeRegionClosure {
 986   template<typename C1, typename C2>
 987   class Closure : public ShenandoahHeapRegionClosure {
 988   private:
 989     C1 &_c1;
 990     C2 &_c2;
 991 
 992   public:
 993     Closure(C1 &c1, C2 &c2) : ShenandoahHeapRegionClosure(), _c1(c1), _c2(c2) {}
 994 
 995     void heap_region_do(ShenandoahHeapRegion* r) override {
 996       _c1.heap_region_do(r);
 997       _c2.heap_region_do(r);
 998     }
 999 
1000     bool is_thread_safe() override {
1001       return _c1.is_thread_safe() && _c2.is_thread_safe();
1002     }
1003   };
1004 
1005 
1006   template<typename C1, typename C2>
1007   Closure<C1, C2> of(C1 &c1, C2 &c2) {
1008     return Closure<C1, C2>(c1, c2);
1009   }
1010 }
1011 
1012 class ShenandoahUpdateRegionAges : public ShenandoahHeapRegionClosure {
1013 private:
1014   ShenandoahMarkingContext* _ctx;
1015 
1016 public:
1017   explicit ShenandoahUpdateRegionAges(ShenandoahMarkingContext* ctx) : _ctx(ctx) { }
1018 
1019   void heap_region_do(ShenandoahHeapRegion* r) override {
1020     // Maintenance of region age must follow evacuation in order to account for
1021     // evacuation allocations within survivor regions.  We consult region age during
1022     // the subsequent evacuation to determine whether certain objects need to
1023     // be promoted.
1024     if (r->is_young() && r->is_active()) {
1025       HeapWord *tams = _ctx->top_at_mark_start(r);
1026       HeapWord *top = r->top();
1027 
1028       // Allocations move the watermark when top moves.  However, compacting
1029       // objects will sometimes lower top beneath the watermark, after which,
1030       // attempts to read the watermark will assert out (watermark should not be
1031       // higher than top).
1032       if (top > tams) {
1033         // There have been allocations in this region since the start of the cycle.
1034         // Any objects new to this region must not assimilate elevated age.
1035         r->reset_age();
1036       } else if (ShenandoahGenerationalHeap::heap()->is_aging_cycle()) {
1037         r->increment_age();
1038       }
1039     }
1040   }
1041 
1042   bool is_thread_safe() override {
1043     return true;
1044   }
1045 };
1046 
1047 void ShenandoahGenerationalHeap::final_update_refs_update_region_states() {
1048   ShenandoahSynchronizePinnedRegionStates pins;
1049   ShenandoahUpdateRegionAges ages(active_generation()->complete_marking_context());
1050   auto cl = ShenandoahCompositeRegionClosure::of(pins, ages);
1051   parallel_heap_region_iterate(&cl);
1052 }
1053 
1054 void ShenandoahGenerationalHeap::complete_degenerated_cycle() {
1055   shenandoah_assert_heaplocked_or_safepoint();
1056   if (is_concurrent_old_mark_in_progress()) {
1057     // This is still necessary for degenerated cycles because the degeneration point may occur
1058     // after final mark of the young generation. See ShenandoahConcurrentGC::op_final_updaterefs for
1059     // a more detailed explanation.
1060     old_generation()->transfer_pointers_from_satb();
1061   }
1062 
1063   // We defer generation resizing actions until after cset regions have been recycled.
1064   TransferResult result = balance_generations();
1065   LogTarget(Info, gc, ergo) lt;
1066   if (lt.is_enabled()) {
1067     LogStream ls(lt);
1068     result.print_on("Degenerated GC", &ls);
1069   }
1070 
1071   // In case degeneration interrupted concurrent evacuation or update references, we need to clean up
1072   // transient state. Otherwise, these actions have no effect.
1073   reset_generation_reserves();
1074 
1075   if (!old_generation()->is_parseable()) {
1076     ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_coalesce_and_fill);
1077     coalesce_and_fill_old_regions(false);
1078   }
1079 }
1080 
1081 void ShenandoahGenerationalHeap::complete_concurrent_cycle() {
1082   if (!old_generation()->is_parseable()) {
1083     // Class unloading may render the card offsets unusable, so we must rebuild them before
1084     // the next remembered set scan. We _could_ let the control thread do this sometime after
1085     // the global cycle has completed and before the next young collection, but under memory
1086     // pressure the control thread may not have the time (that is, because it's running back
1087     // to back GCs). In that scenario, we would have to make the old regions parsable before
1088     // we could start a young collection. This could delay the start of the young cycle and
1089     // throw off the heuristics.
1090     entry_global_coalesce_and_fill();
1091   }
1092 
1093   TransferResult result;
1094   {
1095     ShenandoahHeapLocker locker(lock());
1096 
1097     result = balance_generations();
1098     reset_generation_reserves();
1099   }
1100 
1101   LogTarget(Info, gc, ergo) lt;
1102   if (lt.is_enabled()) {
1103     LogStream ls(lt);
1104     result.print_on("Concurrent GC", &ls);
1105   }
1106 }
1107 
1108 void ShenandoahGenerationalHeap::entry_global_coalesce_and_fill() {
1109   const char* msg = "Coalescing and filling old regions";
1110   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_coalesce_and_fill);
1111 
1112   TraceCollectorStats tcs(monitoring_support()->concurrent_collection_counters());
1113   EventMark em("%s", msg);
1114   ShenandoahWorkerScope scope(workers(),
1115                               ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
1116                               "concurrent coalesce and fill");
1117 
1118   coalesce_and_fill_old_regions(true);
1119 }
1120 
1121 void ShenandoahGenerationalHeap::update_region_ages(ShenandoahMarkingContext* ctx) {
1122   ShenandoahUpdateRegionAges cl(ctx);
1123   parallel_heap_region_iterate(&cl);
1124 }