1 /*
   2  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
   3  * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "gc/shenandoah/shenandoahAgeCensus.hpp"
  27 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
  28 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  29 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  30 #include "gc/shenandoah/shenandoahGenerationalControlThread.hpp"
  31 #include "gc/shenandoah/shenandoahGenerationalEvacuationTask.hpp"
  32 #include "gc/shenandoah/shenandoahGenerationalHeap.hpp"
  33 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  34 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
  35 #include "gc/shenandoah/shenandoahHeapRegionClosures.hpp"
  36 #include "gc/shenandoah/shenandoahInitLogger.hpp"
  37 #include "gc/shenandoah/shenandoahMemoryPool.hpp"
  38 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  39 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
  40 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  41 #include "gc/shenandoah/shenandoahRegulatorThread.hpp"
  42 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
  43 #include "gc/shenandoah/shenandoahUtils.hpp"
  44 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  45 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
  46 #include "logging/log.hpp"
  47 #include "utilities/events.hpp"
  48 
  49 
  50 class ShenandoahGenerationalInitLogger : public ShenandoahInitLogger {
  51 public:
  52   static void print() {
  53     ShenandoahGenerationalInitLogger logger;
  54     logger.print_all();
  55   }
  56 
  57   void print_heap() override {
  58     ShenandoahInitLogger::print_heap();
  59 
  60     ShenandoahGenerationalHeap* heap = ShenandoahGenerationalHeap::heap();
  61 
  62     ShenandoahYoungGeneration* young = heap->young_generation();
  63     log_info(gc, init)("Young Generation Soft Size: " EXACTFMT, EXACTFMTARGS(young->soft_max_capacity()));
  64     log_info(gc, init)("Young Generation Max: " EXACTFMT, EXACTFMTARGS(young->max_capacity()));
  65 
  66     ShenandoahOldGeneration* old = heap->old_generation();
  67     log_info(gc, init)("Old Generation Soft Size: " EXACTFMT, EXACTFMTARGS(old->soft_max_capacity()));
  68     log_info(gc, init)("Old Generation Max: " EXACTFMT, EXACTFMTARGS(old->max_capacity()));
  69   }
  70 
  71 protected:
  72   void print_gc_specific() override {
  73     ShenandoahInitLogger::print_gc_specific();
  74 
  75     ShenandoahGenerationalHeap* heap = ShenandoahGenerationalHeap::heap();
  76     log_info(gc, init)("Young Heuristics: %s", heap->young_generation()->heuristics()->name());
  77     log_info(gc, init)("Old Heuristics: %s", heap->old_generation()->heuristics()->name());
  78   }
  79 };
  80 
  81 size_t ShenandoahGenerationalHeap::calculate_min_plab() {
  82   return align_up(PLAB::min_size(), CardTable::card_size_in_words());
  83 }
  84 
  85 size_t ShenandoahGenerationalHeap::calculate_max_plab() {
  86   size_t MaxTLABSizeWords = ShenandoahHeapRegion::max_tlab_size_words();
  87   return align_down(MaxTLABSizeWords, CardTable::card_size_in_words());
  88 }
  89 
  90 // Returns size in bytes
  91 size_t ShenandoahGenerationalHeap::unsafe_max_tlab_alloc(Thread *thread) const {
  92   return MIN2(ShenandoahHeapRegion::max_tlab_size_bytes(), young_generation()->available());
  93 }
  94 
  95 ShenandoahGenerationalHeap::ShenandoahGenerationalHeap(ShenandoahCollectorPolicy* policy) :
  96   ShenandoahHeap(policy),
  97   _age_census(nullptr),
  98   _evac_tracker(new ShenandoahEvacuationTracker()),
  99   _min_plab_size(calculate_min_plab()),
 100   _max_plab_size(calculate_max_plab()),
 101   _regulator_thread(nullptr),
 102   _young_gen_memory_pool(nullptr),
 103   _old_gen_memory_pool(nullptr) {
 104   assert(is_aligned(_min_plab_size, CardTable::card_size_in_words()), "min_plab_size must be aligned");
 105   assert(is_aligned(_max_plab_size, CardTable::card_size_in_words()), "max_plab_size must be aligned");
 106 }
 107 
 108 void ShenandoahGenerationalHeap::post_initialize() {
 109   ShenandoahHeap::post_initialize();
 110   _age_census = new ShenandoahAgeCensus();
 111 }
 112 
 113 void ShenandoahGenerationalHeap::print_init_logger() const {
 114   ShenandoahGenerationalInitLogger logger;
 115   logger.print_all();
 116 }
 117 
 118 void ShenandoahGenerationalHeap::print_tracing_info() const {
 119   ShenandoahHeap::print_tracing_info();
 120 
 121   LogTarget(Info, gc, stats) lt;
 122   if (lt.is_enabled()) {
 123     LogStream ls(lt);
 124     ls.cr();
 125     ls.cr();
 126     evac_tracker()->print_global_on(&ls);
 127   }
 128 }
 129 
 130 void ShenandoahGenerationalHeap::initialize_heuristics() {
 131   // Initialize global generation and heuristics even in generational mode.
 132   ShenandoahHeap::initialize_heuristics();
 133 
 134   // Max capacity is the maximum _allowed_ capacity. That is, the maximum allowed capacity
 135   // for old would be total heap - minimum capacity of young. This means the sum of the maximum
 136   // allowed for old and young could exceed the total heap size. It remains the case that the
 137   // _actual_ capacity of young + old = total.
 138   _generation_sizer.heap_size_changed(max_capacity());
 139   size_t initial_capacity_young = _generation_sizer.max_young_size();
 140   size_t max_capacity_young = _generation_sizer.max_young_size();
 141   size_t initial_capacity_old = max_capacity() - max_capacity_young;
 142   size_t max_capacity_old = max_capacity() - initial_capacity_young;
 143 
 144   _young_generation = new ShenandoahYoungGeneration(max_workers(), max_capacity_young, initial_capacity_young);
 145   _old_generation = new ShenandoahOldGeneration(max_workers(), max_capacity_old, initial_capacity_old);
 146   _young_generation->initialize_heuristics(mode());
 147   _old_generation->initialize_heuristics(mode());
 148 }
 149 
 150 void ShenandoahGenerationalHeap::initialize_serviceability() {
 151   assert(mode()->is_generational(), "Only for the generational mode");
 152   _young_gen_memory_pool = new ShenandoahYoungGenMemoryPool(this);
 153   _old_gen_memory_pool = new ShenandoahOldGenMemoryPool(this);
 154   cycle_memory_manager()->add_pool(_young_gen_memory_pool);
 155   cycle_memory_manager()->add_pool(_old_gen_memory_pool);
 156   stw_memory_manager()->add_pool(_young_gen_memory_pool);
 157   stw_memory_manager()->add_pool(_old_gen_memory_pool);
 158 }
 159 
 160 GrowableArray<MemoryPool*> ShenandoahGenerationalHeap::memory_pools() {
 161   assert(mode()->is_generational(), "Only for the generational mode");
 162   GrowableArray<MemoryPool*> memory_pools(2);
 163   memory_pools.append(_young_gen_memory_pool);
 164   memory_pools.append(_old_gen_memory_pool);
 165   return memory_pools;
 166 }
 167 
 168 void ShenandoahGenerationalHeap::initialize_controller() {
 169   auto control_thread = new ShenandoahGenerationalControlThread();
 170   _control_thread = control_thread;
 171   _regulator_thread = new ShenandoahRegulatorThread(control_thread);
 172 }
 173 
 174 void ShenandoahGenerationalHeap::gc_threads_do(ThreadClosure* tcl) const {
 175   if (!shenandoah_policy()->is_at_shutdown()) {
 176     ShenandoahHeap::gc_threads_do(tcl);
 177     tcl->do_thread(regulator_thread());
 178   }
 179 }
 180 
 181 void ShenandoahGenerationalHeap::stop() {
 182   ShenandoahHeap::stop();
 183   regulator_thread()->stop();
 184 }
 185 
 186 bool ShenandoahGenerationalHeap::requires_barriers(stackChunkOop obj) const {
 187   if (is_idle()) {
 188     return false;
 189   }
 190 
 191   if (is_concurrent_young_mark_in_progress() && is_in_young(obj) && !marking_context()->allocated_after_mark_start(obj)) {
 192     // We are marking young, this object is in young, and it is below the TAMS
 193     return true;
 194   }
 195 
 196   if (is_in_old(obj)) {
 197     // Card marking barriers are required for objects in the old generation
 198     return true;
 199   }
 200 
 201   if (has_forwarded_objects()) {
 202     // Object may have pointers that need to be updated
 203     return true;
 204   }
 205 
 206   return false;
 207 }
 208 
 209 void ShenandoahGenerationalHeap::evacuate_collection_set(bool concurrent) {
 210   ShenandoahRegionIterator regions;
 211   ShenandoahGenerationalEvacuationTask task(this, &regions, concurrent, false /* only promote regions */);
 212   workers()->run_task(&task);
 213 }
 214 
 215 void ShenandoahGenerationalHeap::promote_regions_in_place(bool concurrent) {
 216   ShenandoahRegionIterator regions;
 217   ShenandoahGenerationalEvacuationTask task(this, &regions, concurrent, true /* only promote regions */);
 218   workers()->run_task(&task);
 219 }
 220 
 221 oop ShenandoahGenerationalHeap::evacuate_object(oop p, Thread* thread) {
 222   assert(thread == Thread::current(), "Expected thread parameter to be current thread.");
 223   if (ShenandoahThreadLocalData::is_oom_during_evac(thread)) {
 224     // This thread went through the OOM during evac protocol and it is safe to return
 225     // the forward pointer. It must not attempt to evacuate anymore.
 226     return ShenandoahBarrierSet::resolve_forwarded(p);
 227   }
 228 
 229   assert(ShenandoahThreadLocalData::is_evac_allowed(thread), "must be enclosed in oom-evac scope");
 230 
 231   ShenandoahHeapRegion* r = heap_region_containing(p);
 232   assert(!r->is_humongous(), "never evacuate humongous objects");
 233 
 234   ShenandoahAffiliation target_gen = r->affiliation();
 235   // gc_generation() can change asynchronously and should not be used here.
 236   assert(active_generation() != nullptr, "Error");
 237   if (active_generation()->is_young() && target_gen == YOUNG_GENERATION) {
 238     markWord mark = p->mark();
 239     if (mark.is_marked()) {
 240       // Already forwarded.
 241       return ShenandoahBarrierSet::resolve_forwarded(p);
 242     }
 243 
 244     if (mark.has_displaced_mark_helper()) {
 245       // We don't want to deal with MT here just to ensure we read the right mark word.
 246       // Skip the potential promotion attempt for this one.
 247     } else if (r->age() + mark.age() >= age_census()->tenuring_threshold()) {
 248       oop result = try_evacuate_object(p, thread, r, OLD_GENERATION);
 249       if (result != nullptr) {
 250         return result;
 251       }
 252       // If we failed to promote this aged object, we'll fall through to code below and evacuate to young-gen.
 253     }
 254   }
 255   return try_evacuate_object(p, thread, r, target_gen);
 256 }
 257 
 258 // try_evacuate_object registers the object and dirties the associated remembered set information when evacuating
 259 // to OLD_GENERATION.
 260 oop ShenandoahGenerationalHeap::try_evacuate_object(oop p, Thread* thread, ShenandoahHeapRegion* from_region,
 261                                         ShenandoahAffiliation target_gen) {
 262   bool alloc_from_lab = true;
 263   bool has_plab = false;
 264   HeapWord* copy = nullptr;
 265   size_t size = ShenandoahForwarding::size(p);
 266   bool is_promotion = (target_gen == OLD_GENERATION) && from_region->is_young();
 267 
 268 #ifdef ASSERT
 269   if (ShenandoahOOMDuringEvacALot &&
 270       (os::random() & 1) == 0) { // Simulate OOM every ~2nd slow-path call
 271     copy = nullptr;
 272   } else {
 273 #endif
 274     if (UseTLAB) {
 275       switch (target_gen) {
 276         case YOUNG_GENERATION: {
 277           copy = allocate_from_gclab(thread, size);
 278           if ((copy == nullptr) && (size < ShenandoahThreadLocalData::gclab_size(thread))) {
 279             // GCLAB allocation failed because we are bumping up against the limit on young evacuation reserve.  Try resetting
 280             // the desired GCLAB size and retry GCLAB allocation to avoid cascading of shared memory allocations.
 281             ShenandoahThreadLocalData::set_gclab_size(thread, PLAB::min_size());
 282             copy = allocate_from_gclab(thread, size);
 283             // If we still get nullptr, we'll try a shared allocation below.
 284           }
 285           break;
 286         }
 287         case OLD_GENERATION: {
 288           PLAB* plab = ShenandoahThreadLocalData::plab(thread);
 289           if (plab != nullptr) {
 290             has_plab = true;
 291             copy = allocate_from_plab(thread, size, is_promotion);
 292             if ((copy == nullptr) && (size < ShenandoahThreadLocalData::plab_size(thread)) &&
 293                 ShenandoahThreadLocalData::plab_retries_enabled(thread)) {
 294               // PLAB allocation failed because we are bumping up against the limit on old evacuation reserve or because
 295               // the requested object does not fit within the current plab but the plab still has an "abundance" of memory,
 296               // where abundance is defined as >= ShenGenHeap::plab_min_size().  In the former case, we try shrinking the
 297               // desired PLAB size to the minimum and retry PLAB allocation to avoid cascading of shared memory allocations.
 298               if (plab->words_remaining() < plab_min_size()) {
 299                 ShenandoahThreadLocalData::set_plab_size(thread, plab_min_size());
 300                 copy = allocate_from_plab(thread, size, is_promotion);
 301                 // If we still get nullptr, we'll try a shared allocation below.
 302                 if (copy == nullptr) {
 303                   // If retry fails, don't continue to retry until we have success (probably in next GC pass)
 304                   ShenandoahThreadLocalData::disable_plab_retries(thread);
 305                 }
 306               }
 307               // else, copy still equals nullptr.  this causes shared allocation below, preserving this plab for future needs.
 308             }
 309           }
 310           break;
 311         }
 312         default: {
 313           ShouldNotReachHere();
 314           break;
 315         }
 316       }
 317     }
 318 
 319     if (copy == nullptr) {
 320       // If we failed to allocate in LAB, we'll try a shared allocation.
 321       if (!is_promotion || !has_plab || (size > PLAB::min_size())) {
 322         ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared_gc(size, target_gen, is_promotion);
 323         copy = allocate_memory(req);
 324         alloc_from_lab = false;
 325       }
 326       // else, we leave copy equal to nullptr, signaling a promotion failure below if appropriate.
 327       // We choose not to promote objects smaller than PLAB::min_size() by way of shared allocations, as this is too
 328       // costly.  Instead, we'll simply "evacuate" to young-gen memory (using a GCLAB) and will promote in a future
 329       // evacuation pass.  This condition is denoted by: is_promotion && has_plab && (size <= PLAB::min_size())
 330     }
 331 #ifdef ASSERT
 332   }
 333 #endif
 334 
 335   if (copy == nullptr) {
 336     if (target_gen == OLD_GENERATION) {
 337       if (from_region->is_young()) {
 338         // Signal that promotion failed. Will evacuate this old object somewhere in young gen.
 339         old_generation()->handle_failed_promotion(thread, size);
 340         return nullptr;
 341       } else {
 342         // Remember that evacuation to old gen failed. We'll want to trigger a full gc to recover from this
 343         // after the evacuation threads have finished.
 344         old_generation()->handle_failed_evacuation();
 345       }
 346     }
 347 
 348     control_thread()->handle_alloc_failure_evac(size);
 349 
 350     oom_evac_handler()->handle_out_of_memory_during_evacuation();
 351 
 352     return ShenandoahBarrierSet::resolve_forwarded(p);
 353   }
 354 
 355   // Copy the object:
 356   NOT_PRODUCT(evac_tracker()->begin_evacuation(thread, size * HeapWordSize));
 357   Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(p), copy, size);
 358   oop copy_val = cast_to_oop(copy);
 359 
 360   // Update the age of the evacuated object
 361   if (target_gen == YOUNG_GENERATION && is_aging_cycle()) {
 362     ShenandoahHeap::increase_object_age(copy_val, from_region->age() + 1);
 363   }
 364 
 365   // Try to install the new forwarding pointer.
 366   oop result = ShenandoahForwarding::try_update_forwardee(p, copy_val);
 367   if (result == copy_val) {
 368     // Successfully evacuated. Our copy is now the public one!
 369 
 370     // This is necessary for virtual thread support. This uses the mark word without
 371     // considering that it may now be a forwarding pointer (and could therefore crash).
 372     // Secondarily, we do not want to spend cycles relativizing stack chunks for oops
 373     // that lost the evacuation race (and will therefore not become visible). It is
 374     // safe to do this on the public copy (this is also done during concurrent mark).
 375     ContinuationGCSupport::relativize_stack_chunk(copy_val);
 376 
 377     // Record that the evacuation succeeded
 378     NOT_PRODUCT(evac_tracker()->end_evacuation(thread, size * HeapWordSize));
 379 
 380     if (target_gen == OLD_GENERATION) {
 381       old_generation()->handle_evacuation(copy, size, from_region->is_young());
 382     } else {
 383       // When copying to the old generation above, we don't care
 384       // about recording object age in the census stats.
 385       assert(target_gen == YOUNG_GENERATION, "Error");
 386       // We record this census only when simulating pre-adaptive tenuring behavior, or
 387       // when we have been asked to record the census at evacuation rather than at mark
 388       if (ShenandoahGenerationalCensusAtEvac || !ShenandoahGenerationalAdaptiveTenuring) {
 389         evac_tracker()->record_age(thread, size * HeapWordSize, ShenandoahHeap::get_object_age(copy_val));
 390       }
 391     }
 392     shenandoah_assert_correct(nullptr, copy_val);
 393     return copy_val;
 394   }  else {
 395     // Failed to evacuate. We need to deal with the object that is left behind. Since this
 396     // new allocation is certainly after TAMS, it will be considered live in the next cycle.
 397     // But if it happens to contain references to evacuated regions, those references would
 398     // not get updated for this stale copy during this cycle, and we will crash while scanning
 399     // it the next cycle.
 400     if (alloc_from_lab) {
 401       // For LAB allocations, it is enough to rollback the allocation ptr. Either the next
 402       // object will overwrite this stale copy, or the filler object on LAB retirement will
 403       // do this.
 404       switch (target_gen) {
 405         case YOUNG_GENERATION: {
 406           ShenandoahThreadLocalData::gclab(thread)->undo_allocation(copy, size);
 407           break;
 408         }
 409         case OLD_GENERATION: {
 410           ShenandoahThreadLocalData::plab(thread)->undo_allocation(copy, size);
 411           if (is_promotion) {
 412             ShenandoahThreadLocalData::subtract_from_plab_promoted(thread, size * HeapWordSize);
 413           }
 414           break;
 415         }
 416         default: {
 417           ShouldNotReachHere();
 418           break;
 419         }
 420       }
 421     } else {
 422       // For non-LAB allocations, we have no way to retract the allocation, and
 423       // have to explicitly overwrite the copy with the filler object. With that overwrite,
 424       // we have to keep the fwdptr initialized and pointing to our (stale) copy.
 425       assert(size >= ShenandoahHeap::min_fill_size(), "previously allocated object known to be larger than min_size");
 426       fill_with_object(copy, size);
 427       shenandoah_assert_correct(nullptr, copy_val);
 428       // For non-LAB allocations, the object has already been registered
 429     }
 430     shenandoah_assert_correct(nullptr, result);
 431     return result;
 432   }
 433 }
 434 
 435 inline HeapWord* ShenandoahGenerationalHeap::allocate_from_plab(Thread* thread, size_t size, bool is_promotion) {
 436   assert(UseTLAB, "TLABs should be enabled");
 437 
 438   PLAB* plab = ShenandoahThreadLocalData::plab(thread);
 439   HeapWord* obj;
 440 
 441   if (plab == nullptr) {
 442     assert(!thread->is_Java_thread() && !thread->is_Worker_thread(), "Performance: thread should have PLAB: %s", thread->name());
 443     // No PLABs in this thread, fallback to shared allocation
 444     return nullptr;
 445   } else if (is_promotion && !ShenandoahThreadLocalData::allow_plab_promotions(thread)) {
 446     return nullptr;
 447   }
 448   // if plab->word_size() <= 0, thread's plab not yet initialized for this pass, so allow_plab_promotions() is not trustworthy
 449   obj = plab->allocate(size);
 450   if ((obj == nullptr) && (plab->words_remaining() < plab_min_size())) {
 451     // allocate_from_plab_slow will establish allow_plab_promotions(thread) for future invocations
 452     obj = allocate_from_plab_slow(thread, size, is_promotion);
 453   }
 454   // if plab->words_remaining() >= ShenGenHeap::heap()->plab_min_size(), just return nullptr so we can use a shared allocation
 455   if (obj == nullptr) {
 456     return nullptr;
 457   }
 458 
 459   if (is_promotion) {
 460     ShenandoahThreadLocalData::add_to_plab_promoted(thread, size * HeapWordSize);
 461   }
 462   return obj;
 463 }
 464 
 465 // Establish a new PLAB and allocate size HeapWords within it.
 466 HeapWord* ShenandoahGenerationalHeap::allocate_from_plab_slow(Thread* thread, size_t size, bool is_promotion) {
 467   // New object should fit the PLAB size
 468 
 469   assert(mode()->is_generational(), "PLABs only relevant to generational GC");
 470   const size_t plab_min_size = this->plab_min_size();
 471   // PLABs are aligned to card boundaries to avoid synchronization with concurrent
 472   // allocations in other PLABs.
 473   const size_t min_size = (size > plab_min_size)? align_up(size, CardTable::card_size_in_words()): plab_min_size;
 474 
 475   // Figure out size of new PLAB, using value determined at last refill.
 476   size_t cur_size = ShenandoahThreadLocalData::plab_size(thread);
 477   if (cur_size == 0) {
 478     cur_size = plab_min_size;
 479   }
 480 
 481   // Expand aggressively, doubling at each refill in this epoch, ceiling at plab_max_size()
 482   size_t future_size = MIN2(cur_size * 2, plab_max_size());
 483   // Doubling, starting at a card-multiple, should give us a card-multiple. (Ceiling and floor
 484   // are card multiples.)
 485   assert(is_aligned(future_size, CardTable::card_size_in_words()), "Card multiple by construction, future_size: %zu"
 486           ", card_size: %zu, cur_size: %zu, max: %zu",
 487          future_size, (size_t) CardTable::card_size_in_words(), cur_size, plab_max_size());
 488 
 489   // Record new heuristic value even if we take any shortcut. This captures
 490   // the case when moderately-sized objects always take a shortcut. At some point,
 491   // heuristics should catch up with them.  Note that the requested cur_size may
 492   // not be honored, but we remember that this is the preferred size.
 493   log_debug(gc, free)("Set new PLAB size: %zu", future_size);
 494   ShenandoahThreadLocalData::set_plab_size(thread, future_size);
 495   if (cur_size < size) {
 496     // The PLAB to be allocated is still not large enough to hold the object. Fall back to shared allocation.
 497     // This avoids retiring perfectly good PLABs in order to represent a single large object allocation.
 498     log_debug(gc, free)("Current PLAB size (%zu) is too small for %zu", cur_size, size);
 499     return nullptr;
 500   }
 501 
 502   // Retire current PLAB, and allocate a new one.
 503   PLAB* plab = ShenandoahThreadLocalData::plab(thread);
 504   if (plab->words_remaining() < plab_min_size) {
 505     // Retire current PLAB. This takes care of any PLAB book-keeping.
 506     // retire_plab() registers the remnant filler object with the remembered set scanner without a lock.
 507     // Since PLABs are card-aligned, concurrent registrations in other PLABs don't interfere.
 508     retire_plab(plab, thread);
 509 
 510     size_t actual_size = 0;
 511     HeapWord* plab_buf = allocate_new_plab(min_size, cur_size, &actual_size);
 512     if (plab_buf == nullptr) {
 513       if (min_size == plab_min_size) {
 514         // Disable PLAB promotions for this thread because we cannot even allocate a minimal PLAB. This allows us
 515         // to fail faster on subsequent promotion attempts.
 516         ShenandoahThreadLocalData::disable_plab_promotions(thread);
 517       }
 518       return nullptr;
 519     } else {
 520       ShenandoahThreadLocalData::enable_plab_retries(thread);
 521     }
 522     // Since the allocated PLAB may have been down-sized for alignment, plab->allocate(size) below may still fail.
 523     if (ZeroTLAB) {
 524       // ... and clear it.
 525       Copy::zero_to_words(plab_buf, actual_size);
 526     } else {
 527       // ...and zap just allocated object.
 528 #ifdef ASSERT
 529       // Skip mangling the space corresponding to the object header to
 530       // ensure that the returned space is not considered parsable by
 531       // any concurrent GC thread.
 532       size_t hdr_size = oopDesc::header_size();
 533       Copy::fill_to_words(plab_buf + hdr_size, actual_size - hdr_size, badHeapWordVal);
 534 #endif // ASSERT
 535     }
 536     assert(is_aligned(actual_size, CardTable::card_size_in_words()), "Align by design");
 537     plab->set_buf(plab_buf, actual_size);
 538     if (is_promotion && !ShenandoahThreadLocalData::allow_plab_promotions(thread)) {
 539       return nullptr;
 540     }
 541     return plab->allocate(size);
 542   } else {
 543     // If there's still at least min_size() words available within the current plab, don't retire it.  Let's nibble
 544     // away on this plab as long as we can.  Meanwhile, return nullptr to force this particular allocation request
 545     // to be satisfied with a shared allocation.  By packing more promotions into the previously allocated PLAB, we
 546     // reduce the likelihood of evacuation failures, and we reduce the need for downsizing our PLABs.
 547     return nullptr;
 548   }
 549 }
 550 
 551 HeapWord* ShenandoahGenerationalHeap::allocate_new_plab(size_t min_size, size_t word_size, size_t* actual_size) {
 552   // Align requested sizes to card-sized multiples.  Align down so that we don't violate max size of TLAB.
 553   assert(is_aligned(min_size, CardTable::card_size_in_words()), "Align by design");
 554   assert(word_size >= min_size, "Requested PLAB is too small");
 555 
 556   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_plab(min_size, word_size);
 557   // Note that allocate_memory() sets a thread-local flag to prohibit further promotions by this thread
 558   // if we are at risk of infringing on the old-gen evacuation budget.
 559   HeapWord* res = allocate_memory(req);
 560   if (res != nullptr) {
 561     *actual_size = req.actual_size();
 562   } else {
 563     *actual_size = 0;
 564   }
 565   assert(is_aligned(res, CardTable::card_size_in_words()), "Align by design");
 566   return res;
 567 }
 568 
 569 void ShenandoahGenerationalHeap::retire_plab(PLAB* plab, Thread* thread) {
 570   // We don't enforce limits on plab evacuations.  We let it consume all available old-gen memory in order to reduce
 571   // probability of an evacuation failure.  We do enforce limits on promotion, to make sure that excessive promotion
 572   // does not result in an old-gen evacuation failure.  Note that a failed promotion is relatively harmless.  Any
 573   // object that fails to promote in the current cycle will be eligible for promotion in a subsequent cycle.
 574 
 575   // When the plab was instantiated, its entirety was treated as if the entire buffer was going to be dedicated to
 576   // promotions.  Now that we are retiring the buffer, we adjust for the reality that the plab is not entirely promotions.
 577   //  1. Some of the plab may have been dedicated to evacuations.
 578   //  2. Some of the plab may have been abandoned due to waste (at the end of the plab).
 579   size_t not_promoted =
 580           ShenandoahThreadLocalData::get_plab_actual_size(thread) - ShenandoahThreadLocalData::get_plab_promoted(thread);
 581   ShenandoahThreadLocalData::reset_plab_promoted(thread);
 582   ShenandoahThreadLocalData::set_plab_actual_size(thread, 0);
 583   if (not_promoted > 0) {
 584     old_generation()->unexpend_promoted(not_promoted);
 585   }
 586   const size_t original_waste = plab->waste();
 587   HeapWord* const top = plab->top();
 588 
 589   // plab->retire() overwrites unused memory between plab->top() and plab->hard_end() with a dummy object to make memory parsable.
 590   // It adds the size of this unused memory, in words, to plab->waste().
 591   plab->retire();
 592   if (top != nullptr && plab->waste() > original_waste && is_in_old(top)) {
 593     // If retiring the plab created a filler object, then we need to register it with our card scanner so it can
 594     // safely walk the region backing the plab.
 595     log_debug(gc)("retire_plab() is registering remnant of size %zu at " PTR_FORMAT,
 596                   plab->waste() - original_waste, p2i(top));
 597     // No lock is necessary because the PLAB memory is aligned on card boundaries.
 598     old_generation()->card_scan()->register_object_without_lock(top);
 599   }
 600 }
 601 
 602 void ShenandoahGenerationalHeap::retire_plab(PLAB* plab) {
 603   Thread* thread = Thread::current();
 604   retire_plab(plab, thread);
 605 }
 606 
 607 ShenandoahGenerationalHeap::TransferResult ShenandoahGenerationalHeap::balance_generations() {
 608   shenandoah_assert_heaplocked_or_safepoint();
 609 
 610   ShenandoahOldGeneration* old_gen = old_generation();
 611   const ssize_t old_region_balance = old_gen->get_region_balance();
 612   old_gen->set_region_balance(0);
 613 
 614   if (old_region_balance > 0) {
 615     const auto old_region_surplus = checked_cast<size_t>(old_region_balance);
 616     const bool success = generation_sizer()->transfer_to_young(old_region_surplus);
 617     return TransferResult {
 618       success, old_region_surplus, "young"
 619     };
 620   }
 621 
 622   if (old_region_balance < 0) {
 623     const auto old_region_deficit = checked_cast<size_t>(-old_region_balance);
 624     const bool success = generation_sizer()->transfer_to_old(old_region_deficit);
 625     if (!success) {
 626       old_gen->handle_failed_transfer();
 627     }
 628     return TransferResult {
 629       success, old_region_deficit, "old"
 630     };
 631   }
 632 
 633   return TransferResult {true, 0, "none"};
 634 }
 635 
 636 // Make sure old-generation is large enough, but no larger than is necessary, to hold mixed evacuations
 637 // and promotions, if we anticipate either. Any deficit is provided by the young generation, subject to
 638 // xfer_limit, and any surplus is transferred to the young generation.
 639 // xfer_limit is the maximum we're able to transfer from young to old.
 640 void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t old_xfer_limit, size_t old_cset_regions) {
 641 
 642   // We can limit the old reserve to the size of anticipated promotions:
 643   // max_old_reserve is an upper bound on memory evacuated from old and promoted to old,
 644   // clamped by the old generation space available.
 645   //
 646   // Here's the algebra.
 647   // Let SOEP = ShenandoahOldEvacRatioPercent,
 648   //     OE = old evac,
 649   //     YE = young evac, and
 650   //     TE = total evac = OE + YE
 651   // By definition:
 652   //            SOEP/100 = OE/TE
 653   //                     = OE/(OE+YE)
 654   //  => SOEP/(100-SOEP) = OE/((OE+YE)-OE)      // componendo-dividendo: If a/b = c/d, then a/(b-a) = c/(d-c)
 655   //                     = OE/YE
 656   //  =>              OE = YE*SOEP/(100-SOEP)
 657 
 658   // We have to be careful in the event that SOEP is set to 100 by the user.
 659   assert(ShenandoahOldEvacRatioPercent <= 100, "Error");
 660   const size_t old_available = old_generation()->available();
 661   // The free set will reserve this amount of memory to hold young evacuations
 662   const size_t young_reserve = (young_generation()->max_capacity() * ShenandoahEvacReserve) / 100;
 663 
 664   // In the case that ShenandoahOldEvacRatioPercent equals 100, max_old_reserve is limited only by xfer_limit.
 665 
 666   const double bound_on_old_reserve = old_available + old_xfer_limit + young_reserve;
 667   const double max_old_reserve = (ShenandoahOldEvacRatioPercent == 100)?
 668                                  bound_on_old_reserve: MIN2(double(young_reserve * ShenandoahOldEvacRatioPercent) / double(100 - ShenandoahOldEvacRatioPercent),
 669                                                             bound_on_old_reserve);
 670 
 671   const size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
 672 
 673   // Decide how much old space we should reserve for a mixed collection
 674   double reserve_for_mixed = 0;
 675   if (old_generation()->has_unprocessed_collection_candidates()) {
 676     // We want this much memory to be unfragmented in order to reliably evacuate old.  This is conservative because we
 677     // may not evacuate the entirety of unprocessed candidates in a single mixed evacuation.
 678     const double max_evac_need = (double(old_generation()->unprocessed_collection_candidates_live_memory()) * ShenandoahOldEvacWaste);
 679     assert(old_available >= old_generation()->free_unaffiliated_regions() * region_size_bytes,
 680            "Unaffiliated available must be less than total available");
 681     const double old_fragmented_available = double(old_available - old_generation()->free_unaffiliated_regions() * region_size_bytes);
 682     reserve_for_mixed = max_evac_need + old_fragmented_available;
 683     if (reserve_for_mixed > max_old_reserve) {
 684       reserve_for_mixed = max_old_reserve;
 685     }
 686   }
 687 
 688   // Decide how much space we should reserve for promotions from young
 689   size_t reserve_for_promo = 0;
 690   const size_t promo_load = old_generation()->get_promotion_potential();
 691   const bool doing_promotions = promo_load > 0;
 692   if (doing_promotions) {
 693     // We're promoting and have a bound on the maximum amount that can be promoted
 694     assert(max_old_reserve >= reserve_for_mixed, "Sanity");
 695     const size_t available_for_promotions = max_old_reserve - reserve_for_mixed;
 696     reserve_for_promo = MIN2((size_t)(promo_load * ShenandoahPromoEvacWaste), available_for_promotions);
 697   }
 698 
 699   // This is the total old we want to ideally reserve
 700   const size_t old_reserve = reserve_for_mixed + reserve_for_promo;
 701   assert(old_reserve <= max_old_reserve, "cannot reserve more than max for old evacuations");
 702 
 703   // We now check if the old generation is running a surplus or a deficit.
 704   const size_t max_old_available = old_generation()->available() + old_cset_regions * region_size_bytes;
 705   if (max_old_available >= old_reserve) {
 706     // We are running a surplus, so the old region surplus can go to young
 707     const size_t old_surplus = (max_old_available - old_reserve) / region_size_bytes;
 708     const size_t unaffiliated_old_regions = old_generation()->free_unaffiliated_regions() + old_cset_regions;
 709     const size_t old_region_surplus = MIN2(old_surplus, unaffiliated_old_regions);
 710     old_generation()->set_region_balance(checked_cast<ssize_t>(old_region_surplus));
 711   } else {
 712     // We are running a deficit which we'd like to fill from young.
 713     // Ignore that this will directly impact young_generation()->max_capacity(),
 714     // indirectly impacting young_reserve and old_reserve.  These computations are conservative.
 715     // Note that deficit is rounded up by one region.
 716     const size_t old_need = (old_reserve - max_old_available + region_size_bytes - 1) / region_size_bytes;
 717     const size_t max_old_region_xfer = old_xfer_limit / region_size_bytes;
 718 
 719     // Round down the regions we can transfer from young to old. If we're running short
 720     // on young-gen memory, we restrict the xfer. Old-gen collection activities will be
 721     // curtailed if the budget is restricted.
 722     const size_t old_region_deficit = MIN2(old_need, max_old_region_xfer);
 723     old_generation()->set_region_balance(0 - checked_cast<ssize_t>(old_region_deficit));
 724   }
 725 }
 726 
 727 void ShenandoahGenerationalHeap::reset_generation_reserves() {
 728   young_generation()->set_evacuation_reserve(0);
 729   old_generation()->set_evacuation_reserve(0);
 730   old_generation()->set_promoted_reserve(0);
 731 }
 732 
 733 void ShenandoahGenerationalHeap::TransferResult::print_on(const char* when, outputStream* ss) const {
 734   auto heap = ShenandoahGenerationalHeap::heap();
 735   ShenandoahYoungGeneration* const young_gen = heap->young_generation();
 736   ShenandoahOldGeneration* const old_gen = heap->old_generation();
 737   const size_t young_available = young_gen->available();
 738   const size_t old_available = old_gen->available();
 739   ss->print_cr("After %s, %s %zu regions to %s to prepare for next gc, old available: "
 740                      PROPERFMT ", young_available: " PROPERFMT,
 741                      when,
 742                      success? "successfully transferred": "failed to transfer", region_count, region_destination,
 743                      PROPERFMTARGS(old_available), PROPERFMTARGS(young_available));
 744 }
 745 
 746 void ShenandoahGenerationalHeap::coalesce_and_fill_old_regions(bool concurrent) {
 747   class ShenandoahGlobalCoalesceAndFill : public WorkerTask {
 748   private:
 749       ShenandoahPhaseTimings::Phase _phase;
 750       ShenandoahRegionIterator _regions;
 751   public:
 752     explicit ShenandoahGlobalCoalesceAndFill(ShenandoahPhaseTimings::Phase phase) :
 753       WorkerTask("Shenandoah Global Coalesce"),
 754       _phase(phase) {}
 755 
 756     void work(uint worker_id) override {
 757       ShenandoahWorkerTimingsTracker timer(_phase,
 758                                            ShenandoahPhaseTimings::ScanClusters,
 759                                            worker_id, true);
 760       ShenandoahHeapRegion* region;
 761       while ((region = _regions.next()) != nullptr) {
 762         // old region is not in the collection set and was not immediately trashed
 763         if (region->is_old() && region->is_active() && !region->is_humongous()) {
 764           // Reset the coalesce and fill boundary because this is a global collect
 765           // and cannot be preempted by young collects. We want to be sure the entire
 766           // region is coalesced here and does not resume from a previously interrupted
 767           // or completed coalescing.
 768           region->begin_preemptible_coalesce_and_fill();
 769           region->oop_coalesce_and_fill(false);
 770         }
 771       }
 772     }
 773   };
 774 
 775   ShenandoahPhaseTimings::Phase phase = concurrent ?
 776           ShenandoahPhaseTimings::conc_coalesce_and_fill :
 777           ShenandoahPhaseTimings::degen_gc_coalesce_and_fill;
 778 
 779   // This is not cancellable
 780   ShenandoahGlobalCoalesceAndFill coalesce(phase);
 781   workers()->run_task(&coalesce);
 782   old_generation()->set_parsable(true);
 783 }
 784 
 785 template<bool CONCURRENT>
 786 class ShenandoahGenerationalUpdateHeapRefsTask : public WorkerTask {
 787 private:
 788   ShenandoahGenerationalHeap* _heap;
 789   ShenandoahRegionIterator* _regions;
 790   ShenandoahRegionChunkIterator* _work_chunks;
 791 
 792 public:
 793   explicit ShenandoahGenerationalUpdateHeapRefsTask(ShenandoahRegionIterator* regions,
 794                                                     ShenandoahRegionChunkIterator* work_chunks) :
 795           WorkerTask("Shenandoah Update References"),
 796           _heap(ShenandoahGenerationalHeap::heap()),
 797           _regions(regions),
 798           _work_chunks(work_chunks)
 799   {
 800     bool old_bitmap_stable = _heap->old_generation()->is_mark_complete();
 801     log_debug(gc, remset)("Update refs, scan remembered set using bitmap: %s", BOOL_TO_STR(old_bitmap_stable));
 802   }
 803 
 804   void work(uint worker_id) {
 805     if (CONCURRENT) {
 806       ShenandoahConcurrentWorkerSession worker_session(worker_id);
 807       ShenandoahSuspendibleThreadSetJoiner stsj;
 808       do_work<ShenandoahConcUpdateRefsClosure>(worker_id);
 809     } else {
 810       ShenandoahParallelWorkerSession worker_session(worker_id);
 811       do_work<ShenandoahNonConcUpdateRefsClosure>(worker_id);
 812     }
 813   }
 814 
 815 private:
 816   template<class T>
 817   void do_work(uint worker_id) {
 818     T cl;
 819 
 820     if (CONCURRENT && (worker_id == 0)) {
 821       // We ask the first worker to replenish the Mutator free set by moving regions previously reserved to hold the
 822       // results of evacuation.  These reserves are no longer necessary because evacuation has completed.
 823       size_t cset_regions = _heap->collection_set()->count();
 824 
 825       // Now that evacuation is done, we can reassign any regions that had been reserved to hold the results of evacuation
 826       // to the mutator free set.  At the end of GC, we will have cset_regions newly evacuated fully empty regions from
 827       // which we will be able to replenish the Collector free set and the OldCollector free set in preparation for the
 828       // next GC cycle.
 829       _heap->free_set()->move_regions_from_collector_to_mutator(cset_regions);
 830     }
 831     // If !CONCURRENT, there's no value in expanding Mutator free set
 832 
 833     ShenandoahHeapRegion* r = _regions->next();
 834     // We update references for global, old, and young collections.
 835     ShenandoahGeneration* const gc_generation = _heap->gc_generation();
 836     shenandoah_assert_generations_reconciled();
 837     assert(gc_generation->is_mark_complete(), "Expected complete marking");
 838     ShenandoahMarkingContext* const ctx = _heap->marking_context();
 839     bool is_mixed = _heap->collection_set()->has_old_regions();
 840     while (r != nullptr) {
 841       HeapWord* update_watermark = r->get_update_watermark();
 842       assert(update_watermark >= r->bottom(), "sanity");
 843 
 844       log_debug(gc)("Update refs worker " UINT32_FORMAT ", looking at region %zu", worker_id, r->index());
 845       bool region_progress = false;
 846       if (r->is_active() && !r->is_cset()) {
 847         if (r->is_young()) {
 848           _heap->marked_object_oop_iterate(r, &cl, update_watermark);
 849           region_progress = true;
 850         } else if (r->is_old()) {
 851           if (gc_generation->is_global()) {
 852 
 853             _heap->marked_object_oop_iterate(r, &cl, update_watermark);
 854             region_progress = true;
 855           }
 856           // Otherwise, this is an old region in a young or mixed cycle.  Process it during a second phase, below.
 857           // Don't bother to report pacing progress in this case.
 858         } else {
 859           // Because updating of references runs concurrently, it is possible that a FREE inactive region transitions
 860           // to a non-free active region while this loop is executing.  Whenever this happens, the changing of a region's
 861           // active status may propagate at a different speed than the changing of the region's affiliation.
 862 
 863           // When we reach this control point, it is because a race has allowed a region's is_active() status to be seen
 864           // by this thread before the region's affiliation() is seen by this thread.
 865 
 866           // It's ok for this race to occur because the newly transformed region does not have any references to be
 867           // updated.
 868 
 869           assert(r->get_update_watermark() == r->bottom(),
 870                  "%s Region %zu is_active but not recognized as YOUNG or OLD so must be newly transitioned from FREE",
 871                  r->affiliation_name(), r->index());
 872         }
 873       }
 874 
 875       if (region_progress && ShenandoahPacing) {
 876         _heap->pacer()->report_update_refs(pointer_delta(update_watermark, r->bottom()));
 877       }
 878 
 879       if (_heap->check_cancelled_gc_and_yield(CONCURRENT)) {
 880         return;
 881       }
 882 
 883       r = _regions->next();
 884     }
 885 
 886     if (!gc_generation->is_global()) {
 887       // Since this is generational and not GLOBAL, we have to process the remembered set.  There's no remembered
 888       // set processing if not in generational mode or if GLOBAL mode.
 889 
 890       // After this thread has exhausted its traditional update-refs work, it continues with updating refs within
 891       // remembered set. The remembered set workload is better balanced between threads, so threads that are "behind"
 892       // can catch up with other threads during this phase, allowing all threads to work more effectively in parallel.
 893       update_references_in_remembered_set(worker_id, cl, ctx, is_mixed);
 894     }
 895   }
 896 
 897   template<class T>
 898   void update_references_in_remembered_set(uint worker_id, T &cl, const ShenandoahMarkingContext* ctx, bool is_mixed) {
 899 
 900     struct ShenandoahRegionChunk assignment;
 901     ShenandoahScanRemembered* scanner = _heap->old_generation()->card_scan();
 902 
 903     while (!_heap->check_cancelled_gc_and_yield(CONCURRENT) && _work_chunks->next(&assignment)) {
 904       // Keep grabbing next work chunk to process until finished, or asked to yield
 905       ShenandoahHeapRegion* r = assignment._r;
 906       if (r->is_active() && !r->is_cset() && r->is_old()) {
 907         HeapWord* start_of_range = r->bottom() + assignment._chunk_offset;
 908         HeapWord* end_of_range = r->get_update_watermark();
 909         if (end_of_range > start_of_range + assignment._chunk_size) {
 910           end_of_range = start_of_range + assignment._chunk_size;
 911         }
 912 
 913         if (start_of_range >= end_of_range) {
 914           continue;
 915         }
 916 
 917         // Old region in a young cycle or mixed cycle.
 918         if (is_mixed) {
 919           if (r->is_humongous()) {
 920             // Need to examine both dirty and clean cards during mixed evac.
 921             r->oop_iterate_humongous_slice_all(&cl,start_of_range, assignment._chunk_size);
 922           } else {
 923             // Since this is mixed evacuation, old regions that are candidates for collection have not been coalesced
 924             // and filled.  This will use mark bits to find objects that need to be updated.
 925             update_references_in_old_region(cl, ctx, scanner, r, start_of_range, end_of_range);
 926           }
 927         } else {
 928           // This is a young evacuation
 929           size_t cluster_size = CardTable::card_size_in_words() * ShenandoahCardCluster::CardsPerCluster;
 930           size_t clusters = assignment._chunk_size / cluster_size;
 931           assert(clusters * cluster_size == assignment._chunk_size, "Chunk assignment must align on cluster boundaries");
 932           scanner->process_region_slice(r, assignment._chunk_offset, clusters, end_of_range, &cl, true, worker_id);
 933         }
 934 
 935         if (ShenandoahPacing) {
 936           _heap->pacer()->report_update_refs(pointer_delta(end_of_range, start_of_range));
 937         }
 938       }
 939     }
 940   }
 941 
 942   template<class T>
 943   void update_references_in_old_region(T &cl, const ShenandoahMarkingContext* ctx, ShenandoahScanRemembered* scanner,
 944                                     const ShenandoahHeapRegion* r, HeapWord* start_of_range,
 945                                     HeapWord* end_of_range) const {
 946     // In case last object in my range spans boundary of my chunk, I may need to scan all the way to top()
 947     ShenandoahObjectToOopBoundedClosure<T> objs(&cl, start_of_range, r->top());
 948 
 949     // Any object that begins in a previous range is part of a different scanning assignment.  Any object that
 950     // starts after end_of_range is also not my responsibility.  (Either allocated during evacuation, so does
 951     // not hold pointers to from-space, or is beyond the range of my assigned work chunk.)
 952 
 953     // Find the first object that begins in my range, if there is one. Note that `p` will be set to `end_of_range`
 954     // when no live object is found in the range.
 955     HeapWord* tams = ctx->top_at_mark_start(r);
 956     HeapWord* p = get_first_object_start_word(ctx, scanner, tams, start_of_range, end_of_range);
 957 
 958     while (p < end_of_range) {
 959       // p is known to point to the beginning of marked object obj
 960       oop obj = cast_to_oop(p);
 961       objs.do_object(obj);
 962       HeapWord* prev_p = p;
 963       p += obj->size();
 964       if (p < tams) {
 965         p = ctx->get_next_marked_addr(p, tams);
 966         // If there are no more marked objects before tams, this returns tams.  Note that tams is
 967         // either >= end_of_range, or tams is the start of an object that is marked.
 968       }
 969       assert(p != prev_p, "Lack of forward progress");
 970     }
 971   }
 972 
 973   HeapWord* get_first_object_start_word(const ShenandoahMarkingContext* ctx, ShenandoahScanRemembered* scanner, HeapWord* tams,
 974                                         HeapWord* start_of_range, HeapWord* end_of_range) const {
 975     HeapWord* p = start_of_range;
 976 
 977     if (p >= tams) {
 978       // We cannot use ctx->is_marked(obj) to test whether an object begins at this address.  Instead,
 979       // we need to use the remembered set crossing map to advance p to the first object that starts
 980       // within the enclosing card.
 981       size_t card_index = scanner->card_index_for_addr(start_of_range);
 982       while (true) {
 983         HeapWord* first_object = scanner->first_object_in_card(card_index);
 984         if (first_object != nullptr) {
 985           p = first_object;
 986           break;
 987         } else if (scanner->addr_for_card_index(card_index + 1) < end_of_range) {
 988           card_index++;
 989         } else {
 990           // Signal that no object was found in range
 991           p = end_of_range;
 992           break;
 993         }
 994       }
 995     } else if (!ctx->is_marked(cast_to_oop(p))) {
 996       p = ctx->get_next_marked_addr(p, tams);
 997       // If there are no more marked objects before tams, this returns tams.
 998       // Note that tams is either >= end_of_range, or tams is the start of an object that is marked.
 999     }
1000     return p;
1001   }
1002 };
1003 
1004 void ShenandoahGenerationalHeap::update_heap_references(bool concurrent) {
1005   assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
1006   const uint nworkers = workers()->active_workers();
1007   ShenandoahRegionChunkIterator work_list(nworkers);
1008   if (concurrent) {
1009     ShenandoahGenerationalUpdateHeapRefsTask<true> task(&_update_refs_iterator, &work_list);
1010     workers()->run_task(&task);
1011   } else {
1012     ShenandoahGenerationalUpdateHeapRefsTask<false> task(&_update_refs_iterator, &work_list);
1013     workers()->run_task(&task);
1014   }
1015 
1016   if (ShenandoahEnableCardStats) {
1017     // Only do this if we are collecting card stats
1018     ShenandoahScanRemembered* card_scan = old_generation()->card_scan();
1019     assert(card_scan != nullptr, "Card table must exist when card stats are enabled");
1020     card_scan->log_card_stats(nworkers, CARD_STAT_UPDATE_REFS);
1021   }
1022 }
1023 
1024 struct ShenandoahCompositeRegionClosure {
1025   template<typename C1, typename C2>
1026   class Closure : public ShenandoahHeapRegionClosure {
1027   private:
1028     C1 &_c1;
1029     C2 &_c2;
1030 
1031   public:
1032     Closure(C1 &c1, C2 &c2) : ShenandoahHeapRegionClosure(), _c1(c1), _c2(c2) {}
1033 
1034     void heap_region_do(ShenandoahHeapRegion* r) override {
1035       _c1.heap_region_do(r);
1036       _c2.heap_region_do(r);
1037     }
1038 
1039     bool is_thread_safe() override {
1040       return _c1.is_thread_safe() && _c2.is_thread_safe();
1041     }
1042   };
1043 
1044   template<typename C1, typename C2>
1045   static Closure<C1, C2> of(C1 &c1, C2 &c2) {
1046     return Closure<C1, C2>(c1, c2);
1047   }
1048 };
1049 
1050 class ShenandoahUpdateRegionAges : public ShenandoahHeapRegionClosure {
1051 private:
1052   ShenandoahMarkingContext* _ctx;
1053 
1054 public:
1055   explicit ShenandoahUpdateRegionAges(ShenandoahMarkingContext* ctx) : _ctx(ctx) { }
1056 
1057   void heap_region_do(ShenandoahHeapRegion* r) override {
1058     // Maintenance of region age must follow evacuation in order to account for
1059     // evacuation allocations within survivor regions.  We consult region age during
1060     // the subsequent evacuation to determine whether certain objects need to
1061     // be promoted.
1062     if (r->is_young() && r->is_active()) {
1063       HeapWord *tams = _ctx->top_at_mark_start(r);
1064       HeapWord *top = r->top();
1065 
1066       // Allocations move the watermark when top moves.  However, compacting
1067       // objects will sometimes lower top beneath the watermark, after which,
1068       // attempts to read the watermark will assert out (watermark should not be
1069       // higher than top).
1070       if (top > tams) {
1071         // There have been allocations in this region since the start of the cycle.
1072         // Any objects new to this region must not assimilate elevated age.
1073         r->reset_age();
1074       } else if (ShenandoahGenerationalHeap::heap()->is_aging_cycle()) {
1075         r->increment_age();
1076       }
1077     }
1078   }
1079 
1080   bool is_thread_safe() override {
1081     return true;
1082   }
1083 };
1084 
1085 void ShenandoahGenerationalHeap::final_update_refs_update_region_states() {
1086   ShenandoahSynchronizePinnedRegionStates pins;
1087   ShenandoahUpdateRegionAges ages(active_generation()->complete_marking_context());
1088   auto cl = ShenandoahCompositeRegionClosure::of(pins, ages);
1089   parallel_heap_region_iterate(&cl);
1090 }
1091 
1092 void ShenandoahGenerationalHeap::complete_degenerated_cycle() {
1093   shenandoah_assert_heaplocked_or_safepoint();
1094   if (is_concurrent_old_mark_in_progress()) {
1095     // This is still necessary for degenerated cycles because the degeneration point may occur
1096     // after final mark of the young generation. See ShenandoahConcurrentGC::op_final_update_refs for
1097     // a more detailed explanation.
1098     old_generation()->transfer_pointers_from_satb();
1099   }
1100 
1101   // We defer generation resizing actions until after cset regions have been recycled.
1102   TransferResult result = balance_generations();
1103   LogTarget(Info, gc, ergo) lt;
1104   if (lt.is_enabled()) {
1105     LogStream ls(lt);
1106     result.print_on("Degenerated GC", &ls);
1107   }
1108 
1109   // In case degeneration interrupted concurrent evacuation or update references, we need to clean up
1110   // transient state. Otherwise, these actions have no effect.
1111   reset_generation_reserves();
1112 
1113   if (!old_generation()->is_parsable()) {
1114     ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_coalesce_and_fill);
1115     coalesce_and_fill_old_regions(false);
1116   }
1117 }
1118 
1119 void ShenandoahGenerationalHeap::complete_concurrent_cycle() {
1120   if (!old_generation()->is_parsable()) {
1121     // Class unloading may render the card offsets unusable, so we must rebuild them before
1122     // the next remembered set scan. We _could_ let the control thread do this sometime after
1123     // the global cycle has completed and before the next young collection, but under memory
1124     // pressure the control thread may not have the time (that is, because it's running back
1125     // to back GCs). In that scenario, we would have to make the old regions parsable before
1126     // we could start a young collection. This could delay the start of the young cycle and
1127     // throw off the heuristics.
1128     entry_global_coalesce_and_fill();
1129   }
1130 
1131   TransferResult result;
1132   {
1133     ShenandoahHeapLocker locker(lock());
1134 
1135     result = balance_generations();
1136     reset_generation_reserves();
1137   }
1138 
1139   LogTarget(Info, gc, ergo) lt;
1140   if (lt.is_enabled()) {
1141     LogStream ls(lt);
1142     result.print_on("Concurrent GC", &ls);
1143   }
1144 }
1145 
1146 void ShenandoahGenerationalHeap::entry_global_coalesce_and_fill() {
1147   const char* msg = "Coalescing and filling old regions";
1148   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_coalesce_and_fill);
1149 
1150   TraceCollectorStats tcs(monitoring_support()->concurrent_collection_counters());
1151   EventMark em("%s", msg);
1152   ShenandoahWorkerScope scope(workers(),
1153                               ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
1154                               "concurrent coalesce and fill");
1155 
1156   coalesce_and_fill_old_regions(true);
1157 }
1158 
1159 void ShenandoahGenerationalHeap::update_region_ages(ShenandoahMarkingContext* ctx) {
1160   ShenandoahUpdateRegionAges cl(ctx);
1161   parallel_heap_region_iterate(&cl);
1162 }