1 /*
   2  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
   3  * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "gc/shenandoah/shenandoahAgeCensus.hpp"
  27 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
  28 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  29 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  30 #include "gc/shenandoah/shenandoahGenerationalControlThread.hpp"
  31 #include "gc/shenandoah/shenandoahGenerationalEvacuationTask.hpp"
  32 #include "gc/shenandoah/shenandoahGenerationalHeap.hpp"
  33 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  34 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
  35 #include "gc/shenandoah/shenandoahHeapRegionClosures.hpp"
  36 #include "gc/shenandoah/shenandoahInitLogger.hpp"
  37 #include "gc/shenandoah/shenandoahMemoryPool.hpp"
  38 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  39 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
  40 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  41 #include "gc/shenandoah/shenandoahRegulatorThread.hpp"
  42 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
  43 #include "gc/shenandoah/shenandoahUtils.hpp"
  44 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  45 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
  46 #include "logging/log.hpp"
  47 #include "utilities/events.hpp"
  48 
  49 
  50 class ShenandoahGenerationalInitLogger : public ShenandoahInitLogger {
  51 public:
  52   static void print() {
  53     ShenandoahGenerationalInitLogger logger;
  54     logger.print_all();
  55   }
  56 protected:
  57   void print_gc_specific() override {
  58     ShenandoahInitLogger::print_gc_specific();
  59 
  60     ShenandoahGenerationalHeap* heap = ShenandoahGenerationalHeap::heap();
  61     log_info(gc, init)("Young Heuristics: %s", heap->young_generation()->heuristics()->name());
  62     log_info(gc, init)("Old Heuristics: %s", heap->old_generation()->heuristics()->name());
  63   }
  64 };
  65 
  66 size_t ShenandoahGenerationalHeap::calculate_min_plab() {
  67   return align_up(PLAB::min_size(), CardTable::card_size_in_words());
  68 }
  69 
  70 size_t ShenandoahGenerationalHeap::calculate_max_plab() {
  71   size_t MaxTLABSizeWords = ShenandoahHeapRegion::max_tlab_size_words();
  72   return align_down(MaxTLABSizeWords, CardTable::card_size_in_words());
  73 }
  74 
  75 // Returns size in bytes
  76 size_t ShenandoahGenerationalHeap::unsafe_max_tlab_alloc(Thread *thread) const {
  77   return MIN2(ShenandoahHeapRegion::max_tlab_size_bytes(), young_generation()->available());
  78 }
  79 
  80 ShenandoahGenerationalHeap::ShenandoahGenerationalHeap(ShenandoahCollectorPolicy* policy) :
  81   ShenandoahHeap(policy),
  82   _age_census(nullptr),
  83   _evac_tracker(new ShenandoahEvacuationTracker()),
  84   _min_plab_size(calculate_min_plab()),
  85   _max_plab_size(calculate_max_plab()),
  86   _regulator_thread(nullptr),
  87   _young_gen_memory_pool(nullptr),
  88   _old_gen_memory_pool(nullptr) {
  89   assert(is_aligned(_min_plab_size, CardTable::card_size_in_words()), "min_plab_size must be aligned");
  90   assert(is_aligned(_max_plab_size, CardTable::card_size_in_words()), "max_plab_size must be aligned");
  91 }
  92 
  93 void ShenandoahGenerationalHeap::post_initialize() {
  94   ShenandoahHeap::post_initialize();
  95   _age_census = new ShenandoahAgeCensus();
  96 }
  97 
  98 void ShenandoahGenerationalHeap::print_init_logger() const {
  99   ShenandoahGenerationalInitLogger logger;
 100   logger.print_all();
 101 }
 102 
 103 void ShenandoahGenerationalHeap::print_tracing_info() const {
 104   ShenandoahHeap::print_tracing_info();
 105 
 106   LogTarget(Info, gc, stats) lt;
 107   if (lt.is_enabled()) {
 108     LogStream ls(lt);
 109     ls.cr();
 110     ls.cr();
 111     evac_tracker()->print_global_on(&ls);
 112   }
 113 }
 114 
 115 void ShenandoahGenerationalHeap::initialize_heuristics() {
 116   // Initialize global generation and heuristics even in generational mode.
 117   ShenandoahHeap::initialize_heuristics();
 118 
 119   // Max capacity is the maximum _allowed_ capacity. That is, the maximum allowed capacity
 120   // for old would be total heap - minimum capacity of young. This means the sum of the maximum
 121   // allowed for old and young could exceed the total heap size. It remains the case that the
 122   // _actual_ capacity of young + old = total.
 123   _generation_sizer.heap_size_changed(max_capacity());
 124   size_t initial_capacity_young = _generation_sizer.max_young_size();
 125   size_t max_capacity_young = _generation_sizer.max_young_size();
 126   size_t initial_capacity_old = max_capacity() - max_capacity_young;
 127   size_t max_capacity_old = max_capacity() - initial_capacity_young;
 128 
 129   _young_generation = new ShenandoahYoungGeneration(max_workers(), max_capacity_young);
 130   _old_generation = new ShenandoahOldGeneration(max_workers(), max_capacity_old);
 131   _young_generation->initialize_heuristics(mode());
 132   _old_generation->initialize_heuristics(mode());
 133 }
 134 
 135 void ShenandoahGenerationalHeap::initialize_serviceability() {
 136   assert(mode()->is_generational(), "Only for the generational mode");
 137   _young_gen_memory_pool = new ShenandoahYoungGenMemoryPool(this);
 138   _old_gen_memory_pool = new ShenandoahOldGenMemoryPool(this);
 139   cycle_memory_manager()->add_pool(_young_gen_memory_pool);
 140   cycle_memory_manager()->add_pool(_old_gen_memory_pool);
 141   stw_memory_manager()->add_pool(_young_gen_memory_pool);
 142   stw_memory_manager()->add_pool(_old_gen_memory_pool);
 143 }
 144 
 145 GrowableArray<MemoryPool*> ShenandoahGenerationalHeap::memory_pools() {
 146   assert(mode()->is_generational(), "Only for the generational mode");
 147   GrowableArray<MemoryPool*> memory_pools(2);
 148   memory_pools.append(_young_gen_memory_pool);
 149   memory_pools.append(_old_gen_memory_pool);
 150   return memory_pools;
 151 }
 152 
 153 void ShenandoahGenerationalHeap::initialize_controller() {
 154   auto control_thread = new ShenandoahGenerationalControlThread();
 155   _control_thread = control_thread;
 156   _regulator_thread = new ShenandoahRegulatorThread(control_thread);
 157 }
 158 
 159 void ShenandoahGenerationalHeap::gc_threads_do(ThreadClosure* tcl) const {
 160   if (!shenandoah_policy()->is_at_shutdown()) {
 161     ShenandoahHeap::gc_threads_do(tcl);
 162     tcl->do_thread(regulator_thread());
 163   }
 164 }
 165 
 166 void ShenandoahGenerationalHeap::stop() {
 167   ShenandoahHeap::stop();
 168   regulator_thread()->stop();
 169 }
 170 
 171 bool ShenandoahGenerationalHeap::requires_barriers(stackChunkOop obj) const {
 172   if (is_idle()) {
 173     return false;
 174   }
 175 
 176   if (is_concurrent_young_mark_in_progress() && is_in_young(obj) && !marking_context()->allocated_after_mark_start(obj)) {
 177     // We are marking young, this object is in young, and it is below the TAMS
 178     return true;
 179   }
 180 
 181   if (is_in_old(obj)) {
 182     // Card marking barriers are required for objects in the old generation
 183     return true;
 184   }
 185 
 186   if (has_forwarded_objects()) {
 187     // Object may have pointers that need to be updated
 188     return true;
 189   }
 190 
 191   return false;
 192 }
 193 
 194 void ShenandoahGenerationalHeap::evacuate_collection_set(bool concurrent) {
 195   ShenandoahRegionIterator regions;
 196   ShenandoahGenerationalEvacuationTask task(this, &regions, concurrent, false /* only promote regions */);
 197   workers()->run_task(&task);
 198 }
 199 
 200 void ShenandoahGenerationalHeap::promote_regions_in_place(bool concurrent) {
 201   ShenandoahRegionIterator regions;
 202   ShenandoahGenerationalEvacuationTask task(this, &regions, concurrent, true /* only promote regions */);
 203   workers()->run_task(&task);
 204 }
 205 
 206 oop ShenandoahGenerationalHeap::evacuate_object(oop p, Thread* thread) {
 207   assert(thread == Thread::current(), "Expected thread parameter to be current thread.");
 208   if (ShenandoahThreadLocalData::is_oom_during_evac(thread)) {
 209     // This thread went through the OOM during evac protocol and it is safe to return
 210     // the forward pointer. It must not attempt to evacuate anymore.
 211     return ShenandoahBarrierSet::resolve_forwarded(p);
 212   }
 213 
 214   assert(ShenandoahThreadLocalData::is_evac_allowed(thread), "must be enclosed in oom-evac scope");
 215 
 216   ShenandoahHeapRegion* r = heap_region_containing(p);
 217   assert(!r->is_humongous(), "never evacuate humongous objects");
 218 
 219   ShenandoahAffiliation target_gen = r->affiliation();
 220   // gc_generation() can change asynchronously and should not be used here.
 221   assert(active_generation() != nullptr, "Error");
 222   if (active_generation()->is_young() && target_gen == YOUNG_GENERATION) {
 223     markWord mark = p->mark();
 224     if (mark.is_marked()) {
 225       // Already forwarded.
 226       return ShenandoahBarrierSet::resolve_forwarded(p);
 227     }
 228 
 229     if (mark.has_displaced_mark_helper()) {
 230       // We don't want to deal with MT here just to ensure we read the right mark word.
 231       // Skip the potential promotion attempt for this one.
 232     } else if (r->age() + mark.age() >= age_census()->tenuring_threshold()) {
 233       oop result = try_evacuate_object(p, thread, r, OLD_GENERATION);
 234       if (result != nullptr) {
 235         return result;
 236       }
 237       // If we failed to promote this aged object, we'll fall through to code below and evacuate to young-gen.
 238     }
 239   }
 240   return try_evacuate_object(p, thread, r, target_gen);
 241 }
 242 
 243 // try_evacuate_object registers the object and dirties the associated remembered set information when evacuating
 244 // to OLD_GENERATION.
 245 oop ShenandoahGenerationalHeap::try_evacuate_object(oop p, Thread* thread, ShenandoahHeapRegion* from_region,
 246                                         ShenandoahAffiliation target_gen) {
 247   bool alloc_from_lab = true;
 248   bool has_plab = false;
 249   HeapWord* copy = nullptr;
 250 
 251   markWord mark = p->mark();
 252   if (ShenandoahForwarding::is_forwarded(mark)) {
 253     return ShenandoahForwarding::get_forwardee(p);
 254   }
 255   size_t old_size = ShenandoahForwarding::size(p);
 256   size_t size = p->copy_size(old_size, mark);
 257 
 258   bool is_promotion = (target_gen == OLD_GENERATION) && from_region->is_young();
 259 
 260 #ifdef ASSERT
 261   if (ShenandoahOOMDuringEvacALot &&
 262       (os::random() & 1) == 0) { // Simulate OOM every ~2nd slow-path call
 263     copy = nullptr;
 264   } else {
 265 #endif
 266     if (UseTLAB) {
 267       switch (target_gen) {
 268         case YOUNG_GENERATION: {
 269           copy = allocate_from_gclab(thread, size);
 270           if ((copy == nullptr) && (size < ShenandoahThreadLocalData::gclab_size(thread))) {
 271             // GCLAB allocation failed because we are bumping up against the limit on young evacuation reserve.  Try resetting
 272             // the desired GCLAB size and retry GCLAB allocation to avoid cascading of shared memory allocations.
 273             ShenandoahThreadLocalData::set_gclab_size(thread, PLAB::min_size());
 274             copy = allocate_from_gclab(thread, size);
 275             // If we still get nullptr, we'll try a shared allocation below.
 276           }
 277           break;
 278         }
 279         case OLD_GENERATION: {
 280           PLAB* plab = ShenandoahThreadLocalData::plab(thread);
 281           if (plab != nullptr) {
 282             has_plab = true;
 283             copy = allocate_from_plab(thread, size, is_promotion);
 284             if ((copy == nullptr) && (size < ShenandoahThreadLocalData::plab_size(thread)) &&
 285                 ShenandoahThreadLocalData::plab_retries_enabled(thread)) {
 286               // PLAB allocation failed because we are bumping up against the limit on old evacuation reserve or because
 287               // the requested object does not fit within the current plab but the plab still has an "abundance" of memory,
 288               // where abundance is defined as >= ShenGenHeap::plab_min_size().  In the former case, we try shrinking the
 289               // desired PLAB size to the minimum and retry PLAB allocation to avoid cascading of shared memory allocations.
 290               if (plab->words_remaining() < plab_min_size()) {
 291                 ShenandoahThreadLocalData::set_plab_size(thread, plab_min_size());
 292                 copy = allocate_from_plab(thread, size, is_promotion);
 293                 // If we still get nullptr, we'll try a shared allocation below.
 294                 if (copy == nullptr) {
 295                   // If retry fails, don't continue to retry until we have success (probably in next GC pass)
 296                   ShenandoahThreadLocalData::disable_plab_retries(thread);
 297                 }
 298               }
 299               // else, copy still equals nullptr.  this causes shared allocation below, preserving this plab for future needs.
 300             }
 301           }
 302           break;
 303         }
 304         default: {
 305           ShouldNotReachHere();
 306           break;
 307         }
 308       }
 309     }
 310 
 311     if (copy == nullptr) {
 312       // If we failed to allocate in LAB, we'll try a shared allocation.
 313       if (!is_promotion || !has_plab || (size > PLAB::min_size())) {
 314         ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared_gc(size, target_gen, is_promotion);
 315         copy = allocate_memory(req);
 316         alloc_from_lab = false;
 317       }
 318       // else, we leave copy equal to nullptr, signaling a promotion failure below if appropriate.
 319       // We choose not to promote objects smaller than PLAB::min_size() by way of shared allocations, as this is too
 320       // costly.  Instead, we'll simply "evacuate" to young-gen memory (using a GCLAB) and will promote in a future
 321       // evacuation pass.  This condition is denoted by: is_promotion && has_plab && (size <= PLAB::min_size())
 322     }
 323 #ifdef ASSERT
 324   }
 325 #endif
 326 
 327   if (copy == nullptr) {
 328     if (target_gen == OLD_GENERATION) {
 329       if (from_region->is_young()) {
 330         // Signal that promotion failed. Will evacuate this old object somewhere in young gen.
 331         old_generation()->handle_failed_promotion(thread, size);
 332         return nullptr;
 333       } else {
 334         // Remember that evacuation to old gen failed. We'll want to trigger a full gc to recover from this
 335         // after the evacuation threads have finished.
 336         old_generation()->handle_failed_evacuation();
 337       }
 338     }
 339 
 340     control_thread()->handle_alloc_failure_evac(size);
 341 
 342     oom_evac_handler()->handle_out_of_memory_during_evacuation();
 343 
 344     return ShenandoahBarrierSet::resolve_forwarded(p);
 345   }
 346 
 347   // Copy the object:
 348   NOT_PRODUCT(evac_tracker()->begin_evacuation(thread, size * HeapWordSize));
 349   Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(p), copy, old_size);
 350   oop copy_val = cast_to_oop(copy);
 351 
 352   // Update the age of the evacuated object
 353   if (target_gen == YOUNG_GENERATION && is_aging_cycle()) {
 354     ShenandoahHeap::increase_object_age(copy_val, from_region->age() + 1);
 355   }
 356 
 357   // Try to install the new forwarding pointer.
 358   oop result = ShenandoahForwarding::try_update_forwardee(p, copy_val);
 359   if (result == copy_val) {
 360     // Successfully evacuated. Our copy is now the public one!
 361 
 362     // This is necessary for virtual thread support. This uses the mark word without
 363     // considering that it may now be a forwarding pointer (and could therefore crash).
 364     // Secondarily, we do not want to spend cycles relativizing stack chunks for oops
 365     // that lost the evacuation race (and will therefore not become visible). It is
 366     // safe to do this on the public copy (this is also done during concurrent mark).
 367     copy_val->initialize_hash_if_necessary(p);
 368     ContinuationGCSupport::relativize_stack_chunk(copy_val);
 369 
 370     // Record that the evacuation succeeded
 371     NOT_PRODUCT(evac_tracker()->end_evacuation(thread, size * HeapWordSize));
 372 
 373     if (target_gen == OLD_GENERATION) {
 374       old_generation()->handle_evacuation(copy, size, from_region->is_young());
 375     } else {
 376       // When copying to the old generation above, we don't care
 377       // about recording object age in the census stats.
 378       assert(target_gen == YOUNG_GENERATION, "Error");
 379       // We record this census only when simulating pre-adaptive tenuring behavior, or
 380       // when we have been asked to record the census at evacuation rather than at mark
 381       if (ShenandoahGenerationalCensusAtEvac || !ShenandoahGenerationalAdaptiveTenuring) {
 382         evac_tracker()->record_age(thread, size * HeapWordSize, ShenandoahHeap::get_object_age(copy_val));
 383       }
 384     }
 385     shenandoah_assert_correct(nullptr, copy_val);
 386     return copy_val;
 387   }  else {
 388     // Failed to evacuate. We need to deal with the object that is left behind. Since this
 389     // new allocation is certainly after TAMS, it will be considered live in the next cycle.
 390     // But if it happens to contain references to evacuated regions, those references would
 391     // not get updated for this stale copy during this cycle, and we will crash while scanning
 392     // it the next cycle.
 393     if (alloc_from_lab) {
 394       // For LAB allocations, it is enough to rollback the allocation ptr. Either the next
 395       // object will overwrite this stale copy, or the filler object on LAB retirement will
 396       // do this.
 397       switch (target_gen) {
 398         case YOUNG_GENERATION: {
 399           ShenandoahThreadLocalData::gclab(thread)->undo_allocation(copy, size);
 400           break;
 401         }
 402         case OLD_GENERATION: {
 403           ShenandoahThreadLocalData::plab(thread)->undo_allocation(copy, size);
 404           if (is_promotion) {
 405             ShenandoahThreadLocalData::subtract_from_plab_promoted(thread, size * HeapWordSize);
 406           }
 407           break;
 408         }
 409         default: {
 410           ShouldNotReachHere();
 411           break;
 412         }
 413       }
 414     } else {
 415       // For non-LAB allocations, we have no way to retract the allocation, and
 416       // have to explicitly overwrite the copy with the filler object. With that overwrite,
 417       // we have to keep the fwdptr initialized and pointing to our (stale) copy.
 418       assert(size >= ShenandoahHeap::min_fill_size(), "previously allocated object known to be larger than min_size");
 419       fill_with_object(copy, size);
 420       shenandoah_assert_correct(nullptr, copy_val);
 421       // For non-LAB allocations, the object has already been registered
 422     }
 423     shenandoah_assert_correct(nullptr, result);
 424     return result;
 425   }
 426 }
 427 
 428 inline HeapWord* ShenandoahGenerationalHeap::allocate_from_plab(Thread* thread, size_t size, bool is_promotion) {
 429   assert(UseTLAB, "TLABs should be enabled");
 430 
 431   PLAB* plab = ShenandoahThreadLocalData::plab(thread);
 432   HeapWord* obj;
 433 
 434   if (plab == nullptr) {
 435     assert(!thread->is_Java_thread() && !thread->is_Worker_thread(), "Performance: thread should have PLAB: %s", thread->name());
 436     // No PLABs in this thread, fallback to shared allocation
 437     return nullptr;
 438   } else if (is_promotion && !ShenandoahThreadLocalData::allow_plab_promotions(thread)) {
 439     return nullptr;
 440   }
 441   // if plab->word_size() <= 0, thread's plab not yet initialized for this pass, so allow_plab_promotions() is not trustworthy
 442   obj = plab->allocate(size);
 443   if ((obj == nullptr) && (plab->words_remaining() < plab_min_size())) {
 444     // allocate_from_plab_slow will establish allow_plab_promotions(thread) for future invocations
 445     obj = allocate_from_plab_slow(thread, size, is_promotion);
 446   }
 447   // if plab->words_remaining() >= ShenGenHeap::heap()->plab_min_size(), just return nullptr so we can use a shared allocation
 448   if (obj == nullptr) {
 449     return nullptr;
 450   }
 451 
 452   if (is_promotion) {
 453     ShenandoahThreadLocalData::add_to_plab_promoted(thread, size * HeapWordSize);
 454   }
 455   return obj;
 456 }
 457 
 458 // Establish a new PLAB and allocate size HeapWords within it.
 459 HeapWord* ShenandoahGenerationalHeap::allocate_from_plab_slow(Thread* thread, size_t size, bool is_promotion) {
 460   // New object should fit the PLAB size
 461 
 462   assert(mode()->is_generational(), "PLABs only relevant to generational GC");
 463   const size_t plab_min_size = this->plab_min_size();
 464   // PLABs are aligned to card boundaries to avoid synchronization with concurrent
 465   // allocations in other PLABs.
 466   const size_t min_size = (size > plab_min_size)? align_up(size, CardTable::card_size_in_words()): plab_min_size;
 467 
 468   // Figure out size of new PLAB, using value determined at last refill.
 469   size_t cur_size = ShenandoahThreadLocalData::plab_size(thread);
 470   if (cur_size == 0) {
 471     cur_size = plab_min_size;
 472   }
 473 
 474   // Expand aggressively, doubling at each refill in this epoch, ceiling at plab_max_size()
 475   size_t future_size = MIN2(cur_size * 2, plab_max_size());
 476   // Doubling, starting at a card-multiple, should give us a card-multiple. (Ceiling and floor
 477   // are card multiples.)
 478   assert(is_aligned(future_size, CardTable::card_size_in_words()), "Card multiple by construction, future_size: %zu"
 479           ", card_size: %zu, cur_size: %zu, max: %zu",
 480          future_size, (size_t) CardTable::card_size_in_words(), cur_size, plab_max_size());
 481 
 482   // Record new heuristic value even if we take any shortcut. This captures
 483   // the case when moderately-sized objects always take a shortcut. At some point,
 484   // heuristics should catch up with them.  Note that the requested cur_size may
 485   // not be honored, but we remember that this is the preferred size.
 486   log_debug(gc, free)("Set new PLAB size: %zu", future_size);
 487   ShenandoahThreadLocalData::set_plab_size(thread, future_size);
 488   if (cur_size < size) {
 489     // The PLAB to be allocated is still not large enough to hold the object. Fall back to shared allocation.
 490     // This avoids retiring perfectly good PLABs in order to represent a single large object allocation.
 491     log_debug(gc, free)("Current PLAB size (%zu) is too small for %zu", cur_size, size);
 492     return nullptr;
 493   }
 494 
 495   // Retire current PLAB, and allocate a new one.
 496   PLAB* plab = ShenandoahThreadLocalData::plab(thread);
 497   if (plab->words_remaining() < plab_min_size) {
 498     // Retire current PLAB. This takes care of any PLAB book-keeping.
 499     // retire_plab() registers the remnant filler object with the remembered set scanner without a lock.
 500     // Since PLABs are card-aligned, concurrent registrations in other PLABs don't interfere.
 501     retire_plab(plab, thread);
 502 
 503     size_t actual_size = 0;
 504     HeapWord* plab_buf = allocate_new_plab(min_size, cur_size, &actual_size);
 505     if (plab_buf == nullptr) {
 506       if (min_size == plab_min_size) {
 507         // Disable PLAB promotions for this thread because we cannot even allocate a minimal PLAB. This allows us
 508         // to fail faster on subsequent promotion attempts.
 509         ShenandoahThreadLocalData::disable_plab_promotions(thread);
 510       }
 511       return nullptr;
 512     } else {
 513       ShenandoahThreadLocalData::enable_plab_retries(thread);
 514     }
 515     // Since the allocated PLAB may have been down-sized for alignment, plab->allocate(size) below may still fail.
 516     if (ZeroTLAB) {
 517       // ... and clear it.
 518       Copy::zero_to_words(plab_buf, actual_size);
 519     } else {
 520       // ...and zap just allocated object.
 521 #ifdef ASSERT
 522       // Skip mangling the space corresponding to the object header to
 523       // ensure that the returned space is not considered parsable by
 524       // any concurrent GC thread.
 525       size_t hdr_size = oopDesc::header_size();
 526       Copy::fill_to_words(plab_buf + hdr_size, actual_size - hdr_size, badHeapWordVal);
 527 #endif // ASSERT
 528     }
 529     assert(is_aligned(actual_size, CardTable::card_size_in_words()), "Align by design");
 530     plab->set_buf(plab_buf, actual_size);
 531     if (is_promotion && !ShenandoahThreadLocalData::allow_plab_promotions(thread)) {
 532       return nullptr;
 533     }
 534     return plab->allocate(size);
 535   } else {
 536     // If there's still at least min_size() words available within the current plab, don't retire it.  Let's nibble
 537     // away on this plab as long as we can.  Meanwhile, return nullptr to force this particular allocation request
 538     // to be satisfied with a shared allocation.  By packing more promotions into the previously allocated PLAB, we
 539     // reduce the likelihood of evacuation failures, and we reduce the need for downsizing our PLABs.
 540     return nullptr;
 541   }
 542 }
 543 
 544 HeapWord* ShenandoahGenerationalHeap::allocate_new_plab(size_t min_size, size_t word_size, size_t* actual_size) {
 545   // Align requested sizes to card-sized multiples.  Align down so that we don't violate max size of TLAB.
 546   assert(is_aligned(min_size, CardTable::card_size_in_words()), "Align by design");
 547   assert(word_size >= min_size, "Requested PLAB is too small");
 548 
 549   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_plab(min_size, word_size);
 550   // Note that allocate_memory() sets a thread-local flag to prohibit further promotions by this thread
 551   // if we are at risk of infringing on the old-gen evacuation budget.
 552   HeapWord* res = allocate_memory(req);
 553   if (res != nullptr) {
 554     *actual_size = req.actual_size();
 555   } else {
 556     *actual_size = 0;
 557   }
 558   assert(is_aligned(res, CardTable::card_size_in_words()), "Align by design");
 559   return res;
 560 }
 561 
 562 void ShenandoahGenerationalHeap::retire_plab(PLAB* plab, Thread* thread) {
 563   // We don't enforce limits on plab evacuations.  We let it consume all available old-gen memory in order to reduce
 564   // probability of an evacuation failure.  We do enforce limits on promotion, to make sure that excessive promotion
 565   // does not result in an old-gen evacuation failure.  Note that a failed promotion is relatively harmless.  Any
 566   // object that fails to promote in the current cycle will be eligible for promotion in a subsequent cycle.
 567 
 568   // When the plab was instantiated, its entirety was treated as if the entire buffer was going to be dedicated to
 569   // promotions.  Now that we are retiring the buffer, we adjust for the reality that the plab is not entirely promotions.
 570   //  1. Some of the plab may have been dedicated to evacuations.
 571   //  2. Some of the plab may have been abandoned due to waste (at the end of the plab).
 572   size_t not_promoted =
 573           ShenandoahThreadLocalData::get_plab_actual_size(thread) - ShenandoahThreadLocalData::get_plab_promoted(thread);
 574   ShenandoahThreadLocalData::reset_plab_promoted(thread);
 575   ShenandoahThreadLocalData::set_plab_actual_size(thread, 0);
 576   if (not_promoted > 0) {
 577     old_generation()->unexpend_promoted(not_promoted);
 578   }
 579   const size_t original_waste = plab->waste();
 580   HeapWord* const top = plab->top();
 581 
 582   // plab->retire() overwrites unused memory between plab->top() and plab->hard_end() with a dummy object to make memory parsable.
 583   // It adds the size of this unused memory, in words, to plab->waste().
 584   plab->retire();
 585   if (top != nullptr && plab->waste() > original_waste && is_in_old(top)) {
 586     // If retiring the plab created a filler object, then we need to register it with our card scanner so it can
 587     // safely walk the region backing the plab.
 588     log_debug(gc)("retire_plab() is registering remnant of size %zu at " PTR_FORMAT,
 589                   plab->waste() - original_waste, p2i(top));
 590     // No lock is necessary because the PLAB memory is aligned on card boundaries.
 591     old_generation()->card_scan()->register_object_without_lock(top);
 592   }
 593 }
 594 
 595 void ShenandoahGenerationalHeap::retire_plab(PLAB* plab) {
 596   Thread* thread = Thread::current();
 597   retire_plab(plab, thread);
 598 }
 599 
 600 ShenandoahGenerationalHeap::TransferResult ShenandoahGenerationalHeap::balance_generations() {
 601   shenandoah_assert_heaplocked_or_safepoint();
 602 
 603   ShenandoahOldGeneration* old_gen = old_generation();
 604   const ssize_t old_region_balance = old_gen->get_region_balance();
 605   old_gen->set_region_balance(0);
 606 
 607   if (old_region_balance > 0) {
 608     const auto old_region_surplus = checked_cast<size_t>(old_region_balance);
 609     const bool success = generation_sizer()->transfer_to_young(old_region_surplus);
 610     return TransferResult {
 611       success, old_region_surplus, "young"
 612     };
 613   }
 614 
 615   if (old_region_balance < 0) {
 616     const auto old_region_deficit = checked_cast<size_t>(-old_region_balance);
 617     const bool success = generation_sizer()->transfer_to_old(old_region_deficit);
 618     if (!success) {
 619       old_gen->handle_failed_transfer();
 620     }
 621     return TransferResult {
 622       success, old_region_deficit, "old"
 623     };
 624   }
 625 
 626   return TransferResult {true, 0, "none"};
 627 }
 628 
 629 // Make sure old-generation is large enough, but no larger than is necessary, to hold mixed evacuations
 630 // and promotions, if we anticipate either. Any deficit is provided by the young generation, subject to
 631 // xfer_limit, and any surplus is transferred to the young generation.
 632 // xfer_limit is the maximum we're able to transfer from young to old.
 633 void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t old_xfer_limit, size_t old_cset_regions) {
 634 
 635   // We can limit the old reserve to the size of anticipated promotions:
 636   // max_old_reserve is an upper bound on memory evacuated from old and promoted to old,
 637   // clamped by the old generation space available.
 638   //
 639   // Here's the algebra.
 640   // Let SOEP = ShenandoahOldEvacRatioPercent,
 641   //     OE = old evac,
 642   //     YE = young evac, and
 643   //     TE = total evac = OE + YE
 644   // By definition:
 645   //            SOEP/100 = OE/TE
 646   //                     = OE/(OE+YE)
 647   //  => SOEP/(100-SOEP) = OE/((OE+YE)-OE)      // componendo-dividendo: If a/b = c/d, then a/(b-a) = c/(d-c)
 648   //                     = OE/YE
 649   //  =>              OE = YE*SOEP/(100-SOEP)
 650 
 651   // We have to be careful in the event that SOEP is set to 100 by the user.
 652   assert(ShenandoahOldEvacRatioPercent <= 100, "Error");
 653   const size_t old_available = old_generation()->available();
 654   // The free set will reserve this amount of memory to hold young evacuations
 655   const size_t young_reserve = (young_generation()->max_capacity() * ShenandoahEvacReserve) / 100;
 656 
 657   // In the case that ShenandoahOldEvacRatioPercent equals 100, max_old_reserve is limited only by xfer_limit.
 658 
 659   const double bound_on_old_reserve = old_available + old_xfer_limit + young_reserve;
 660   const double max_old_reserve = (ShenandoahOldEvacRatioPercent == 100)?
 661                                  bound_on_old_reserve: MIN2(double(young_reserve * ShenandoahOldEvacRatioPercent) / double(100 - ShenandoahOldEvacRatioPercent),
 662                                                             bound_on_old_reserve);
 663 
 664   const size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
 665 
 666   // Decide how much old space we should reserve for a mixed collection
 667   double reserve_for_mixed = 0;
 668   if (old_generation()->has_unprocessed_collection_candidates()) {
 669     // We want this much memory to be unfragmented in order to reliably evacuate old.  This is conservative because we
 670     // may not evacuate the entirety of unprocessed candidates in a single mixed evacuation.
 671     const double max_evac_need = (double(old_generation()->unprocessed_collection_candidates_live_memory()) * ShenandoahOldEvacWaste);
 672     assert(old_available >= old_generation()->free_unaffiliated_regions() * region_size_bytes,
 673            "Unaffiliated available must be less than total available");
 674     const double old_fragmented_available = double(old_available - old_generation()->free_unaffiliated_regions() * region_size_bytes);
 675     reserve_for_mixed = max_evac_need + old_fragmented_available;
 676     if (reserve_for_mixed > max_old_reserve) {
 677       reserve_for_mixed = max_old_reserve;
 678     }
 679   }
 680 
 681   // Decide how much space we should reserve for promotions from young
 682   size_t reserve_for_promo = 0;
 683   const size_t promo_load = old_generation()->get_promotion_potential();
 684   const bool doing_promotions = promo_load > 0;
 685   if (doing_promotions) {
 686     // We're promoting and have a bound on the maximum amount that can be promoted
 687     assert(max_old_reserve >= reserve_for_mixed, "Sanity");
 688     const size_t available_for_promotions = max_old_reserve - reserve_for_mixed;
 689     reserve_for_promo = MIN2((size_t)(promo_load * ShenandoahPromoEvacWaste), available_for_promotions);
 690   }
 691 
 692   // This is the total old we want to ideally reserve
 693   const size_t old_reserve = reserve_for_mixed + reserve_for_promo;
 694   assert(old_reserve <= max_old_reserve, "cannot reserve more than max for old evacuations");
 695 
 696   // We now check if the old generation is running a surplus or a deficit.
 697   const size_t max_old_available = old_generation()->available() + old_cset_regions * region_size_bytes;
 698   if (max_old_available >= old_reserve) {
 699     // We are running a surplus, so the old region surplus can go to young
 700     const size_t old_surplus = (max_old_available - old_reserve) / region_size_bytes;
 701     const size_t unaffiliated_old_regions = old_generation()->free_unaffiliated_regions() + old_cset_regions;
 702     const size_t old_region_surplus = MIN2(old_surplus, unaffiliated_old_regions);
 703     old_generation()->set_region_balance(checked_cast<ssize_t>(old_region_surplus));
 704   } else {
 705     // We are running a deficit which we'd like to fill from young.
 706     // Ignore that this will directly impact young_generation()->max_capacity(),
 707     // indirectly impacting young_reserve and old_reserve.  These computations are conservative.
 708     // Note that deficit is rounded up by one region.
 709     const size_t old_need = (old_reserve - max_old_available + region_size_bytes - 1) / region_size_bytes;
 710     const size_t max_old_region_xfer = old_xfer_limit / region_size_bytes;
 711 
 712     // Round down the regions we can transfer from young to old. If we're running short
 713     // on young-gen memory, we restrict the xfer. Old-gen collection activities will be
 714     // curtailed if the budget is restricted.
 715     const size_t old_region_deficit = MIN2(old_need, max_old_region_xfer);
 716     old_generation()->set_region_balance(0 - checked_cast<ssize_t>(old_region_deficit));
 717   }
 718 }
 719 
 720 void ShenandoahGenerationalHeap::reset_generation_reserves() {
 721   young_generation()->set_evacuation_reserve(0);
 722   old_generation()->set_evacuation_reserve(0);
 723   old_generation()->set_promoted_reserve(0);
 724 }
 725 
 726 void ShenandoahGenerationalHeap::TransferResult::print_on(const char* when, outputStream* ss) const {
 727   auto heap = ShenandoahGenerationalHeap::heap();
 728   ShenandoahYoungGeneration* const young_gen = heap->young_generation();
 729   ShenandoahOldGeneration* const old_gen = heap->old_generation();
 730   const size_t young_available = young_gen->available();
 731   const size_t old_available = old_gen->available();
 732   ss->print_cr("After %s, %s %zu regions to %s to prepare for next gc, old available: "
 733                      PROPERFMT ", young_available: " PROPERFMT,
 734                      when,
 735                      success? "successfully transferred": "failed to transfer", region_count, region_destination,
 736                      PROPERFMTARGS(old_available), PROPERFMTARGS(young_available));
 737 }
 738 
 739 void ShenandoahGenerationalHeap::coalesce_and_fill_old_regions(bool concurrent) {
 740   class ShenandoahGlobalCoalesceAndFill : public WorkerTask {
 741   private:
 742       ShenandoahPhaseTimings::Phase _phase;
 743       ShenandoahRegionIterator _regions;
 744   public:
 745     explicit ShenandoahGlobalCoalesceAndFill(ShenandoahPhaseTimings::Phase phase) :
 746       WorkerTask("Shenandoah Global Coalesce"),
 747       _phase(phase) {}
 748 
 749     void work(uint worker_id) override {
 750       ShenandoahWorkerTimingsTracker timer(_phase,
 751                                            ShenandoahPhaseTimings::ScanClusters,
 752                                            worker_id, true);
 753       ShenandoahHeapRegion* region;
 754       while ((region = _regions.next()) != nullptr) {
 755         // old region is not in the collection set and was not immediately trashed
 756         if (region->is_old() && region->is_active() && !region->is_humongous()) {
 757           // Reset the coalesce and fill boundary because this is a global collect
 758           // and cannot be preempted by young collects. We want to be sure the entire
 759           // region is coalesced here and does not resume from a previously interrupted
 760           // or completed coalescing.
 761           region->begin_preemptible_coalesce_and_fill();
 762           region->oop_coalesce_and_fill(false);
 763         }
 764       }
 765     }
 766   };
 767 
 768   ShenandoahPhaseTimings::Phase phase = concurrent ?
 769           ShenandoahPhaseTimings::conc_coalesce_and_fill :
 770           ShenandoahPhaseTimings::degen_gc_coalesce_and_fill;
 771 
 772   // This is not cancellable
 773   ShenandoahGlobalCoalesceAndFill coalesce(phase);
 774   workers()->run_task(&coalesce);
 775   old_generation()->set_parsable(true);
 776 }
 777 
 778 template<bool CONCURRENT>
 779 class ShenandoahGenerationalUpdateHeapRefsTask : public WorkerTask {
 780 private:
 781   ShenandoahGenerationalHeap* _heap;
 782   ShenandoahRegionIterator* _regions;
 783   ShenandoahRegionChunkIterator* _work_chunks;
 784 
 785 public:
 786   explicit ShenandoahGenerationalUpdateHeapRefsTask(ShenandoahRegionIterator* regions,
 787                                                     ShenandoahRegionChunkIterator* work_chunks) :
 788           WorkerTask("Shenandoah Update References"),
 789           _heap(ShenandoahGenerationalHeap::heap()),
 790           _regions(regions),
 791           _work_chunks(work_chunks)
 792   {
 793     bool old_bitmap_stable = _heap->old_generation()->is_mark_complete();
 794     log_debug(gc, remset)("Update refs, scan remembered set using bitmap: %s", BOOL_TO_STR(old_bitmap_stable));
 795   }
 796 
 797   void work(uint worker_id) {
 798     if (CONCURRENT) {
 799       ShenandoahConcurrentWorkerSession worker_session(worker_id);
 800       ShenandoahSuspendibleThreadSetJoiner stsj;
 801       do_work<ShenandoahConcUpdateRefsClosure>(worker_id);
 802     } else {
 803       ShenandoahParallelWorkerSession worker_session(worker_id);
 804       do_work<ShenandoahNonConcUpdateRefsClosure>(worker_id);
 805     }
 806   }
 807 
 808 private:
 809   template<class T>
 810   void do_work(uint worker_id) {
 811     T cl;
 812 
 813     if (CONCURRENT && (worker_id == 0)) {
 814       // We ask the first worker to replenish the Mutator free set by moving regions previously reserved to hold the
 815       // results of evacuation.  These reserves are no longer necessary because evacuation has completed.
 816       size_t cset_regions = _heap->collection_set()->count();
 817 
 818       // Now that evacuation is done, we can reassign any regions that had been reserved to hold the results of evacuation
 819       // to the mutator free set.  At the end of GC, we will have cset_regions newly evacuated fully empty regions from
 820       // which we will be able to replenish the Collector free set and the OldCollector free set in preparation for the
 821       // next GC cycle.
 822       _heap->free_set()->move_regions_from_collector_to_mutator(cset_regions);
 823     }
 824     // If !CONCURRENT, there's no value in expanding Mutator free set
 825 
 826     ShenandoahHeapRegion* r = _regions->next();
 827     // We update references for global, old, and young collections.
 828     ShenandoahGeneration* const gc_generation = _heap->gc_generation();
 829     shenandoah_assert_generations_reconciled();
 830     assert(gc_generation->is_mark_complete(), "Expected complete marking");
 831     ShenandoahMarkingContext* const ctx = _heap->marking_context();
 832     bool is_mixed = _heap->collection_set()->has_old_regions();
 833     while (r != nullptr) {
 834       HeapWord* update_watermark = r->get_update_watermark();
 835       assert(update_watermark >= r->bottom(), "sanity");
 836 
 837       log_debug(gc)("Update refs worker " UINT32_FORMAT ", looking at region %zu", worker_id, r->index());
 838       bool region_progress = false;
 839       if (r->is_active() && !r->is_cset()) {
 840         if (r->is_young()) {
 841           _heap->marked_object_oop_iterate(r, &cl, update_watermark);
 842           region_progress = true;
 843         } else if (r->is_old()) {
 844           if (gc_generation->is_global()) {
 845 
 846             _heap->marked_object_oop_iterate(r, &cl, update_watermark);
 847             region_progress = true;
 848           }
 849           // Otherwise, this is an old region in a young or mixed cycle.  Process it during a second phase, below.
 850           // Don't bother to report pacing progress in this case.
 851         } else {
 852           // Because updating of references runs concurrently, it is possible that a FREE inactive region transitions
 853           // to a non-free active region while this loop is executing.  Whenever this happens, the changing of a region's
 854           // active status may propagate at a different speed than the changing of the region's affiliation.
 855 
 856           // When we reach this control point, it is because a race has allowed a region's is_active() status to be seen
 857           // by this thread before the region's affiliation() is seen by this thread.
 858 
 859           // It's ok for this race to occur because the newly transformed region does not have any references to be
 860           // updated.
 861 
 862           assert(r->get_update_watermark() == r->bottom(),
 863                  "%s Region %zu is_active but not recognized as YOUNG or OLD so must be newly transitioned from FREE",
 864                  r->affiliation_name(), r->index());
 865         }
 866       }
 867 
 868       if (region_progress && ShenandoahPacing) {
 869         _heap->pacer()->report_update_refs(pointer_delta(update_watermark, r->bottom()));
 870       }
 871 
 872       if (_heap->check_cancelled_gc_and_yield(CONCURRENT)) {
 873         return;
 874       }
 875 
 876       r = _regions->next();
 877     }
 878 
 879     if (!gc_generation->is_global()) {
 880       // Since this is generational and not GLOBAL, we have to process the remembered set.  There's no remembered
 881       // set processing if not in generational mode or if GLOBAL mode.
 882 
 883       // After this thread has exhausted its traditional update-refs work, it continues with updating refs within
 884       // remembered set. The remembered set workload is better balanced between threads, so threads that are "behind"
 885       // can catch up with other threads during this phase, allowing all threads to work more effectively in parallel.
 886       update_references_in_remembered_set(worker_id, cl, ctx, is_mixed);
 887     }
 888   }
 889 
 890   template<class T>
 891   void update_references_in_remembered_set(uint worker_id, T &cl, const ShenandoahMarkingContext* ctx, bool is_mixed) {
 892 
 893     struct ShenandoahRegionChunk assignment;
 894     ShenandoahScanRemembered* scanner = _heap->old_generation()->card_scan();
 895 
 896     while (!_heap->check_cancelled_gc_and_yield(CONCURRENT) && _work_chunks->next(&assignment)) {
 897       // Keep grabbing next work chunk to process until finished, or asked to yield
 898       ShenandoahHeapRegion* r = assignment._r;
 899       if (r->is_active() && !r->is_cset() && r->is_old()) {
 900         HeapWord* start_of_range = r->bottom() + assignment._chunk_offset;
 901         HeapWord* end_of_range = r->get_update_watermark();
 902         if (end_of_range > start_of_range + assignment._chunk_size) {
 903           end_of_range = start_of_range + assignment._chunk_size;
 904         }
 905 
 906         if (start_of_range >= end_of_range) {
 907           continue;
 908         }
 909 
 910         // Old region in a young cycle or mixed cycle.
 911         if (is_mixed) {
 912           if (r->is_humongous()) {
 913             // Need to examine both dirty and clean cards during mixed evac.
 914             r->oop_iterate_humongous_slice_all(&cl,start_of_range, assignment._chunk_size);
 915           } else {
 916             // Since this is mixed evacuation, old regions that are candidates for collection have not been coalesced
 917             // and filled.  This will use mark bits to find objects that need to be updated.
 918             update_references_in_old_region(cl, ctx, scanner, r, start_of_range, end_of_range);
 919           }
 920         } else {
 921           // This is a young evacuation
 922           size_t cluster_size = CardTable::card_size_in_words() * ShenandoahCardCluster::CardsPerCluster;
 923           size_t clusters = assignment._chunk_size / cluster_size;
 924           assert(clusters * cluster_size == assignment._chunk_size, "Chunk assignment must align on cluster boundaries");
 925           scanner->process_region_slice(r, assignment._chunk_offset, clusters, end_of_range, &cl, true, worker_id);
 926         }
 927 
 928         if (ShenandoahPacing) {
 929           _heap->pacer()->report_update_refs(pointer_delta(end_of_range, start_of_range));
 930         }
 931       }
 932     }
 933   }
 934 
 935   template<class T>
 936   void update_references_in_old_region(T &cl, const ShenandoahMarkingContext* ctx, ShenandoahScanRemembered* scanner,
 937                                     const ShenandoahHeapRegion* r, HeapWord* start_of_range,
 938                                     HeapWord* end_of_range) const {
 939     // In case last object in my range spans boundary of my chunk, I may need to scan all the way to top()
 940     ShenandoahObjectToOopBoundedClosure<T> objs(&cl, start_of_range, r->top());
 941 
 942     // Any object that begins in a previous range is part of a different scanning assignment.  Any object that
 943     // starts after end_of_range is also not my responsibility.  (Either allocated during evacuation, so does
 944     // not hold pointers to from-space, or is beyond the range of my assigned work chunk.)
 945 
 946     // Find the first object that begins in my range, if there is one. Note that `p` will be set to `end_of_range`
 947     // when no live object is found in the range.
 948     HeapWord* tams = ctx->top_at_mark_start(r);
 949     HeapWord* p = get_first_object_start_word(ctx, scanner, tams, start_of_range, end_of_range);
 950 
 951     while (p < end_of_range) {
 952       // p is known to point to the beginning of marked object obj
 953       oop obj = cast_to_oop(p);
 954       objs.do_object(obj);
 955       HeapWord* prev_p = p;
 956       p += obj->size();
 957       if (p < tams) {
 958         p = ctx->get_next_marked_addr(p, tams);
 959         // If there are no more marked objects before tams, this returns tams.  Note that tams is
 960         // either >= end_of_range, or tams is the start of an object that is marked.
 961       }
 962       assert(p != prev_p, "Lack of forward progress");
 963     }
 964   }
 965 
 966   HeapWord* get_first_object_start_word(const ShenandoahMarkingContext* ctx, ShenandoahScanRemembered* scanner, HeapWord* tams,
 967                                         HeapWord* start_of_range, HeapWord* end_of_range) const {
 968     HeapWord* p = start_of_range;
 969 
 970     if (p >= tams) {
 971       // We cannot use ctx->is_marked(obj) to test whether an object begins at this address.  Instead,
 972       // we need to use the remembered set crossing map to advance p to the first object that starts
 973       // within the enclosing card.
 974       size_t card_index = scanner->card_index_for_addr(start_of_range);
 975       while (true) {
 976         HeapWord* first_object = scanner->first_object_in_card(card_index);
 977         if (first_object != nullptr) {
 978           p = first_object;
 979           break;
 980         } else if (scanner->addr_for_card_index(card_index + 1) < end_of_range) {
 981           card_index++;
 982         } else {
 983           // Signal that no object was found in range
 984           p = end_of_range;
 985           break;
 986         }
 987       }
 988     } else if (!ctx->is_marked(cast_to_oop(p))) {
 989       p = ctx->get_next_marked_addr(p, tams);
 990       // If there are no more marked objects before tams, this returns tams.
 991       // Note that tams is either >= end_of_range, or tams is the start of an object that is marked.
 992     }
 993     return p;
 994   }
 995 };
 996 
 997 void ShenandoahGenerationalHeap::update_heap_references(bool concurrent) {
 998   assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
 999   const uint nworkers = workers()->active_workers();
1000   ShenandoahRegionChunkIterator work_list(nworkers);
1001   if (concurrent) {
1002     ShenandoahGenerationalUpdateHeapRefsTask<true> task(&_update_refs_iterator, &work_list);
1003     workers()->run_task(&task);
1004   } else {
1005     ShenandoahGenerationalUpdateHeapRefsTask<false> task(&_update_refs_iterator, &work_list);
1006     workers()->run_task(&task);
1007   }
1008 
1009   if (ShenandoahEnableCardStats) {
1010     // Only do this if we are collecting card stats
1011     ShenandoahScanRemembered* card_scan = old_generation()->card_scan();
1012     assert(card_scan != nullptr, "Card table must exist when card stats are enabled");
1013     card_scan->log_card_stats(nworkers, CARD_STAT_UPDATE_REFS);
1014   }
1015 }
1016 
1017 struct ShenandoahCompositeRegionClosure {
1018   template<typename C1, typename C2>
1019   class Closure : public ShenandoahHeapRegionClosure {
1020   private:
1021     C1 &_c1;
1022     C2 &_c2;
1023 
1024   public:
1025     Closure(C1 &c1, C2 &c2) : ShenandoahHeapRegionClosure(), _c1(c1), _c2(c2) {}
1026 
1027     void heap_region_do(ShenandoahHeapRegion* r) override {
1028       _c1.heap_region_do(r);
1029       _c2.heap_region_do(r);
1030     }
1031 
1032     bool is_thread_safe() override {
1033       return _c1.is_thread_safe() && _c2.is_thread_safe();
1034     }
1035   };
1036 
1037   template<typename C1, typename C2>
1038   static Closure<C1, C2> of(C1 &c1, C2 &c2) {
1039     return Closure<C1, C2>(c1, c2);
1040   }
1041 };
1042 
1043 class ShenandoahUpdateRegionAges : public ShenandoahHeapRegionClosure {
1044 private:
1045   ShenandoahMarkingContext* _ctx;
1046 
1047 public:
1048   explicit ShenandoahUpdateRegionAges(ShenandoahMarkingContext* ctx) : _ctx(ctx) { }
1049 
1050   void heap_region_do(ShenandoahHeapRegion* r) override {
1051     // Maintenance of region age must follow evacuation in order to account for
1052     // evacuation allocations within survivor regions.  We consult region age during
1053     // the subsequent evacuation to determine whether certain objects need to
1054     // be promoted.
1055     if (r->is_young() && r->is_active()) {
1056       HeapWord *tams = _ctx->top_at_mark_start(r);
1057       HeapWord *top = r->top();
1058 
1059       // Allocations move the watermark when top moves.  However, compacting
1060       // objects will sometimes lower top beneath the watermark, after which,
1061       // attempts to read the watermark will assert out (watermark should not be
1062       // higher than top).
1063       if (top > tams) {
1064         // There have been allocations in this region since the start of the cycle.
1065         // Any objects new to this region must not assimilate elevated age.
1066         r->reset_age();
1067       } else if (ShenandoahGenerationalHeap::heap()->is_aging_cycle()) {
1068         r->increment_age();
1069       }
1070     }
1071   }
1072 
1073   bool is_thread_safe() override {
1074     return true;
1075   }
1076 };
1077 
1078 void ShenandoahGenerationalHeap::final_update_refs_update_region_states() {
1079   ShenandoahSynchronizePinnedRegionStates pins;
1080   ShenandoahUpdateRegionAges ages(active_generation()->complete_marking_context());
1081   auto cl = ShenandoahCompositeRegionClosure::of(pins, ages);
1082   parallel_heap_region_iterate(&cl);
1083 }
1084 
1085 void ShenandoahGenerationalHeap::complete_degenerated_cycle() {
1086   shenandoah_assert_heaplocked_or_safepoint();
1087   if (is_concurrent_old_mark_in_progress()) {
1088     // This is still necessary for degenerated cycles because the degeneration point may occur
1089     // after final mark of the young generation. See ShenandoahConcurrentGC::op_final_update_refs for
1090     // a more detailed explanation.
1091     old_generation()->transfer_pointers_from_satb();
1092   }
1093 
1094   // We defer generation resizing actions until after cset regions have been recycled.
1095   TransferResult result = balance_generations();
1096   LogTarget(Info, gc, ergo) lt;
1097   if (lt.is_enabled()) {
1098     LogStream ls(lt);
1099     result.print_on("Degenerated GC", &ls);
1100   }
1101 
1102   // In case degeneration interrupted concurrent evacuation or update references, we need to clean up
1103   // transient state. Otherwise, these actions have no effect.
1104   reset_generation_reserves();
1105 
1106   if (!old_generation()->is_parsable()) {
1107     ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_coalesce_and_fill);
1108     coalesce_and_fill_old_regions(false);
1109   }
1110 }
1111 
1112 void ShenandoahGenerationalHeap::complete_concurrent_cycle() {
1113   if (!old_generation()->is_parsable()) {
1114     // Class unloading may render the card offsets unusable, so we must rebuild them before
1115     // the next remembered set scan. We _could_ let the control thread do this sometime after
1116     // the global cycle has completed and before the next young collection, but under memory
1117     // pressure the control thread may not have the time (that is, because it's running back
1118     // to back GCs). In that scenario, we would have to make the old regions parsable before
1119     // we could start a young collection. This could delay the start of the young cycle and
1120     // throw off the heuristics.
1121     entry_global_coalesce_and_fill();
1122   }
1123 
1124   TransferResult result;
1125   {
1126     ShenandoahHeapLocker locker(lock());
1127 
1128     result = balance_generations();
1129     reset_generation_reserves();
1130   }
1131 
1132   LogTarget(Info, gc, ergo) lt;
1133   if (lt.is_enabled()) {
1134     LogStream ls(lt);
1135     result.print_on("Concurrent GC", &ls);
1136   }
1137 }
1138 
1139 void ShenandoahGenerationalHeap::entry_global_coalesce_and_fill() {
1140   const char* msg = "Coalescing and filling old regions";
1141   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_coalesce_and_fill);
1142 
1143   TraceCollectorStats tcs(monitoring_support()->concurrent_collection_counters());
1144   EventMark em("%s", msg);
1145   ShenandoahWorkerScope scope(workers(),
1146                               ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
1147                               "concurrent coalesce and fill");
1148 
1149   coalesce_and_fill_old_regions(true);
1150 }
1151 
1152 void ShenandoahGenerationalHeap::update_region_ages(ShenandoahMarkingContext* ctx) {
1153   ShenandoahUpdateRegionAges cl(ctx);
1154   parallel_heap_region_iterate(&cl);
1155 }