1 /*
   2  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 
  27 #include "gc/shenandoah/shenandoahAgeCensus.hpp"
  28 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  29 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  30 #include "gc/shenandoah/shenandoahGenerationalControlThread.hpp"
  31 #include "gc/shenandoah/shenandoahGenerationalEvacuationTask.hpp"
  32 #include "gc/shenandoah/shenandoahGenerationalHeap.hpp"
  33 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  34 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
  35 #include "gc/shenandoah/shenandoahHeapRegionClosures.hpp"
  36 #include "gc/shenandoah/shenandoahInitLogger.hpp"
  37 #include "gc/shenandoah/shenandoahMemoryPool.hpp"
  38 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  39 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
  40 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  41 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  42 #include "gc/shenandoah/shenandoahRegulatorThread.hpp"
  43 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
  44 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  45 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
  46 #include "gc/shenandoah/shenandoahUtils.hpp"
  47 #include "logging/log.hpp"
  48 #include "utilities/events.hpp"
  49 
  50 
  51 class ShenandoahGenerationalInitLogger : public ShenandoahInitLogger {
  52 public:
  53   static void print() {
  54     ShenandoahGenerationalInitLogger logger;
  55     logger.print_all();
  56   }
  57 
  58   void print_heap() override {
  59     ShenandoahInitLogger::print_heap();
  60 
  61     ShenandoahGenerationalHeap* heap = ShenandoahGenerationalHeap::heap();
  62 
  63     ShenandoahYoungGeneration* young = heap->young_generation();
  64     log_info(gc, init)("Young Generation Soft Size: " EXACTFMT, EXACTFMTARGS(young->soft_max_capacity()));
  65     log_info(gc, init)("Young Generation Max: " EXACTFMT, EXACTFMTARGS(young->max_capacity()));
  66 
  67     ShenandoahOldGeneration* old = heap->old_generation();
  68     log_info(gc, init)("Old Generation Soft Size: " EXACTFMT, EXACTFMTARGS(old->soft_max_capacity()));
  69     log_info(gc, init)("Old Generation Max: " EXACTFMT, EXACTFMTARGS(old->max_capacity()));
  70   }
  71 
  72 protected:
  73   void print_gc_specific() override {
  74     ShenandoahInitLogger::print_gc_specific();
  75 
  76     ShenandoahGenerationalHeap* heap = ShenandoahGenerationalHeap::heap();
  77     log_info(gc, init)("Young Heuristics: %s", heap->young_generation()->heuristics()->name());
  78     log_info(gc, init)("Old Heuristics: %s", heap->old_generation()->heuristics()->name());
  79   }
  80 };
  81 
  82 size_t ShenandoahGenerationalHeap::calculate_min_plab() {
  83   return align_up(PLAB::min_size(), CardTable::card_size_in_words());
  84 }
  85 
  86 size_t ShenandoahGenerationalHeap::calculate_max_plab() {
  87   size_t MaxTLABSizeWords = ShenandoahHeapRegion::max_tlab_size_words();
  88   return align_down(MaxTLABSizeWords, CardTable::card_size_in_words());
  89 }
  90 
  91 // Returns size in bytes
  92 size_t ShenandoahGenerationalHeap::unsafe_max_tlab_alloc(Thread *thread) const {
  93   return MIN2(ShenandoahHeapRegion::max_tlab_size_bytes(), young_generation()->available());
  94 }
  95 
  96 ShenandoahGenerationalHeap::ShenandoahGenerationalHeap(ShenandoahCollectorPolicy* policy) :
  97   ShenandoahHeap(policy),
  98   _age_census(nullptr),
  99   _evac_tracker(new ShenandoahEvacuationTracker()),
 100   _min_plab_size(calculate_min_plab()),
 101   _max_plab_size(calculate_max_plab()),
 102   _regulator_thread(nullptr),
 103   _young_gen_memory_pool(nullptr),
 104   _old_gen_memory_pool(nullptr) {
 105   assert(is_aligned(_min_plab_size, CardTable::card_size_in_words()), "min_plab_size must be aligned");
 106   assert(is_aligned(_max_plab_size, CardTable::card_size_in_words()), "max_plab_size must be aligned");
 107 }
 108 
 109 void ShenandoahGenerationalHeap::post_initialize() {
 110   ShenandoahHeap::post_initialize();
 111   _age_census = new ShenandoahAgeCensus();
 112 }
 113 
 114 void ShenandoahGenerationalHeap::print_init_logger() const {
 115   ShenandoahGenerationalInitLogger logger;
 116   logger.print_all();
 117 }
 118 
 119 void ShenandoahGenerationalHeap::print_tracing_info() const {
 120   ShenandoahHeap::print_tracing_info();
 121 
 122   LogTarget(Info, gc, stats) lt;
 123   if (lt.is_enabled()) {
 124     LogStream ls(lt);
 125     ls.cr();
 126     ls.cr();
 127     evac_tracker()->print_global_on(&ls);
 128   }
 129 }
 130 
 131 void ShenandoahGenerationalHeap::initialize_heuristics() {
 132   // Initialize global generation and heuristics even in generational mode.
 133   ShenandoahHeap::initialize_heuristics();
 134 
 135   // Max capacity is the maximum _allowed_ capacity. That is, the maximum allowed capacity
 136   // for old would be total heap - minimum capacity of young. This means the sum of the maximum
 137   // allowed for old and young could exceed the total heap size. It remains the case that the
 138   // _actual_ capacity of young + old = total.
 139   _generation_sizer.heap_size_changed(max_capacity());
 140   size_t initial_capacity_young = _generation_sizer.max_young_size();
 141   size_t max_capacity_young = _generation_sizer.max_young_size();
 142   size_t initial_capacity_old = max_capacity() - max_capacity_young;
 143   size_t max_capacity_old = max_capacity() - initial_capacity_young;
 144 
 145   _young_generation = new ShenandoahYoungGeneration(max_workers(), max_capacity_young, initial_capacity_young);
 146   _old_generation = new ShenandoahOldGeneration(max_workers(), max_capacity_old, initial_capacity_old);
 147   _young_generation->initialize_heuristics(mode());
 148   _old_generation->initialize_heuristics(mode());
 149 }
 150 
 151 void ShenandoahGenerationalHeap::initialize_serviceability() {
 152   assert(mode()->is_generational(), "Only for the generational mode");
 153   _young_gen_memory_pool = new ShenandoahYoungGenMemoryPool(this);
 154   _old_gen_memory_pool = new ShenandoahOldGenMemoryPool(this);
 155   cycle_memory_manager()->add_pool(_young_gen_memory_pool);
 156   cycle_memory_manager()->add_pool(_old_gen_memory_pool);
 157   stw_memory_manager()->add_pool(_young_gen_memory_pool);
 158   stw_memory_manager()->add_pool(_old_gen_memory_pool);
 159 }
 160 
 161 GrowableArray<MemoryPool*> ShenandoahGenerationalHeap::memory_pools() {
 162   assert(mode()->is_generational(), "Only for the generational mode");
 163   GrowableArray<MemoryPool*> memory_pools(2);
 164   memory_pools.append(_young_gen_memory_pool);
 165   memory_pools.append(_old_gen_memory_pool);
 166   return memory_pools;
 167 }
 168 
 169 void ShenandoahGenerationalHeap::initialize_controller() {
 170   auto control_thread = new ShenandoahGenerationalControlThread();
 171   _control_thread = control_thread;
 172   _regulator_thread = new ShenandoahRegulatorThread(control_thread);
 173 }
 174 
 175 void ShenandoahGenerationalHeap::gc_threads_do(ThreadClosure* tcl) const {
 176   if (!shenandoah_policy()->is_at_shutdown()) {
 177     ShenandoahHeap::gc_threads_do(tcl);
 178     tcl->do_thread(regulator_thread());
 179   }
 180 }
 181 
 182 void ShenandoahGenerationalHeap::stop() {
 183   ShenandoahHeap::stop();
 184   regulator_thread()->stop();
 185 }
 186 
 187 void ShenandoahGenerationalHeap::evacuate_collection_set(bool concurrent) {
 188   ShenandoahRegionIterator regions;
 189   ShenandoahGenerationalEvacuationTask task(this, &regions, concurrent, false /* only promote regions */);
 190   workers()->run_task(&task);
 191 }
 192 
 193 void ShenandoahGenerationalHeap::promote_regions_in_place(bool concurrent) {
 194   ShenandoahRegionIterator regions;
 195   ShenandoahGenerationalEvacuationTask task(this, &regions, concurrent, true /* only promote regions */);
 196   workers()->run_task(&task);
 197 }
 198 
 199 oop ShenandoahGenerationalHeap::evacuate_object(oop p, Thread* thread) {
 200   assert(thread == Thread::current(), "Expected thread parameter to be current thread.");
 201   if (ShenandoahThreadLocalData::is_oom_during_evac(thread)) {
 202     // This thread went through the OOM during evac protocol and it is safe to return
 203     // the forward pointer. It must not attempt to evacuate anymore.
 204     return ShenandoahBarrierSet::resolve_forwarded(p);
 205   }
 206 
 207   assert(ShenandoahThreadLocalData::is_evac_allowed(thread), "must be enclosed in oom-evac scope");
 208 
 209   ShenandoahHeapRegion* r = heap_region_containing(p);
 210   assert(!r->is_humongous(), "never evacuate humongous objects");
 211 
 212   ShenandoahAffiliation target_gen = r->affiliation();
 213   // gc_generation() can change asynchronously and should not be used here.
 214   assert(active_generation() != nullptr, "Error");
 215   if (active_generation()->is_young() && target_gen == YOUNG_GENERATION) {
 216     markWord mark = p->mark();
 217     if (mark.is_marked()) {
 218       // Already forwarded.
 219       return ShenandoahBarrierSet::resolve_forwarded(p);
 220     }
 221 
 222     if (mark.has_displaced_mark_helper()) {
 223       // We don't want to deal with MT here just to ensure we read the right mark word.
 224       // Skip the potential promotion attempt for this one.
 225     } else if (r->age() + mark.age() >= age_census()->tenuring_threshold()) {
 226       oop result = try_evacuate_object(p, thread, r, OLD_GENERATION);
 227       if (result != nullptr) {
 228         return result;
 229       }
 230       // If we failed to promote this aged object, we'll fall through to code below and evacuate to young-gen.
 231     }
 232   }
 233   return try_evacuate_object(p, thread, r, target_gen);
 234 }
 235 
 236 // try_evacuate_object registers the object and dirties the associated remembered set information when evacuating
 237 // to OLD_GENERATION.
 238 oop ShenandoahGenerationalHeap::try_evacuate_object(oop p, Thread* thread, ShenandoahHeapRegion* from_region,
 239                                         ShenandoahAffiliation target_gen) {
 240   bool alloc_from_lab = true;
 241   bool has_plab = false;
 242   HeapWord* copy = nullptr;
 243   size_t size = p->size();
 244   bool is_promotion = (target_gen == OLD_GENERATION) && from_region->is_young();
 245 
 246 #ifdef ASSERT
 247   if (ShenandoahOOMDuringEvacALot &&
 248       (os::random() & 1) == 0) { // Simulate OOM every ~2nd slow-path call
 249     copy = nullptr;
 250   } else {
 251 #endif
 252     if (UseTLAB) {
 253       switch (target_gen) {
 254         case YOUNG_GENERATION: {
 255           copy = allocate_from_gclab(thread, size);
 256           if ((copy == nullptr) && (size < ShenandoahThreadLocalData::gclab_size(thread))) {
 257             // GCLAB allocation failed because we are bumping up against the limit on young evacuation reserve.  Try resetting
 258             // the desired GCLAB size and retry GCLAB allocation to avoid cascading of shared memory allocations.
 259             ShenandoahThreadLocalData::set_gclab_size(thread, PLAB::min_size());
 260             copy = allocate_from_gclab(thread, size);
 261             // If we still get nullptr, we'll try a shared allocation below.
 262           }
 263           break;
 264         }
 265         case OLD_GENERATION: {
 266           PLAB* plab = ShenandoahThreadLocalData::plab(thread);
 267           if (plab != nullptr) {
 268             has_plab = true;
 269             copy = allocate_from_plab(thread, size, is_promotion);
 270             if ((copy == nullptr) && (size < ShenandoahThreadLocalData::plab_size(thread)) &&
 271                 ShenandoahThreadLocalData::plab_retries_enabled(thread)) {
 272               // PLAB allocation failed because we are bumping up against the limit on old evacuation reserve or because
 273               // the requested object does not fit within the current plab but the plab still has an "abundance" of memory,
 274               // where abundance is defined as >= ShenGenHeap::plab_min_size().  In the former case, we try shrinking the
 275               // desired PLAB size to the minimum and retry PLAB allocation to avoid cascading of shared memory allocations.
 276               if (plab->words_remaining() < plab_min_size()) {
 277                 ShenandoahThreadLocalData::set_plab_size(thread, plab_min_size());
 278                 copy = allocate_from_plab(thread, size, is_promotion);
 279                 // If we still get nullptr, we'll try a shared allocation below.
 280                 if (copy == nullptr) {
 281                   // If retry fails, don't continue to retry until we have success (probably in next GC pass)
 282                   ShenandoahThreadLocalData::disable_plab_retries(thread);
 283                 }
 284               }
 285               // else, copy still equals nullptr.  this causes shared allocation below, preserving this plab for future needs.
 286             }
 287           }
 288           break;
 289         }
 290         default: {
 291           ShouldNotReachHere();
 292           break;
 293         }
 294       }
 295     }
 296 
 297     if (copy == nullptr) {
 298       // If we failed to allocate in LAB, we'll try a shared allocation.
 299       if (!is_promotion || !has_plab || (size > PLAB::min_size())) {
 300         ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared_gc(size, target_gen, is_promotion);
 301         copy = allocate_memory(req);
 302         alloc_from_lab = false;
 303       }
 304       // else, we leave copy equal to nullptr, signaling a promotion failure below if appropriate.
 305       // We choose not to promote objects smaller than PLAB::min_size() by way of shared allocations, as this is too
 306       // costly.  Instead, we'll simply "evacuate" to young-gen memory (using a GCLAB) and will promote in a future
 307       // evacuation pass.  This condition is denoted by: is_promotion && has_plab && (size <= PLAB::min_size())
 308     }
 309 #ifdef ASSERT
 310   }
 311 #endif
 312 
 313   if (copy == nullptr) {
 314     if (target_gen == OLD_GENERATION) {
 315       if (from_region->is_young()) {
 316         // Signal that promotion failed. Will evacuate this old object somewhere in young gen.
 317         old_generation()->handle_failed_promotion(thread, size);
 318         return nullptr;
 319       } else {
 320         // Remember that evacuation to old gen failed. We'll want to trigger a full gc to recover from this
 321         // after the evacuation threads have finished.
 322         old_generation()->handle_failed_evacuation();
 323       }
 324     }
 325 
 326     control_thread()->handle_alloc_failure_evac(size);
 327 
 328     oom_evac_handler()->handle_out_of_memory_during_evacuation();
 329 
 330     return ShenandoahBarrierSet::resolve_forwarded(p);
 331   }
 332 
 333   // Copy the object:
 334   NOT_PRODUCT(evac_tracker()->begin_evacuation(thread, size * HeapWordSize));
 335   Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(p), copy, size);
 336   oop copy_val = cast_to_oop(copy);
 337 
 338   // Update the age of the evacuated object
 339   if (target_gen == YOUNG_GENERATION && is_aging_cycle()) {
 340     ShenandoahHeap::increase_object_age(copy_val, from_region->age() + 1);
 341   }
 342 
 343   // Try to install the new forwarding pointer.
 344   oop result = ShenandoahForwarding::try_update_forwardee(p, copy_val);
 345   if (result == copy_val) {
 346     // Successfully evacuated. Our copy is now the public one!
 347 
 348     // This is necessary for virtual thread support. This uses the mark word without
 349     // considering that it may now be a forwarding pointer (and could therefore crash).
 350     // Secondarily, we do not want to spend cycles relativizing stack chunks for oops
 351     // that lost the evacuation race (and will therefore not become visible). It is
 352     // safe to do this on the public copy (this is also done during concurrent mark).
 353     ContinuationGCSupport::relativize_stack_chunk(copy_val);
 354 
 355     // Record that the evacuation succeeded
 356     NOT_PRODUCT(evac_tracker()->end_evacuation(thread, size * HeapWordSize));
 357 
 358     if (target_gen == OLD_GENERATION) {
 359       old_generation()->handle_evacuation(copy, size, from_region->is_young());
 360     } else {
 361       // When copying to the old generation above, we don't care
 362       // about recording object age in the census stats.
 363       assert(target_gen == YOUNG_GENERATION, "Error");
 364       // We record this census only when simulating pre-adaptive tenuring behavior, or
 365       // when we have been asked to record the census at evacuation rather than at mark
 366       if (ShenandoahGenerationalCensusAtEvac || !ShenandoahGenerationalAdaptiveTenuring) {
 367         evac_tracker()->record_age(thread, size * HeapWordSize, ShenandoahHeap::get_object_age(copy_val));
 368       }
 369     }
 370     shenandoah_assert_correct(nullptr, copy_val);
 371     return copy_val;
 372   }  else {
 373     // Failed to evacuate. We need to deal with the object that is left behind. Since this
 374     // new allocation is certainly after TAMS, it will be considered live in the next cycle.
 375     // But if it happens to contain references to evacuated regions, those references would
 376     // not get updated for this stale copy during this cycle, and we will crash while scanning
 377     // it the next cycle.
 378     if (alloc_from_lab) {
 379       // For LAB allocations, it is enough to rollback the allocation ptr. Either the next
 380       // object will overwrite this stale copy, or the filler object on LAB retirement will
 381       // do this.
 382       switch (target_gen) {
 383         case YOUNG_GENERATION: {
 384           ShenandoahThreadLocalData::gclab(thread)->undo_allocation(copy, size);
 385           break;
 386         }
 387         case OLD_GENERATION: {
 388           ShenandoahThreadLocalData::plab(thread)->undo_allocation(copy, size);
 389           if (is_promotion) {
 390             ShenandoahThreadLocalData::subtract_from_plab_promoted(thread, size * HeapWordSize);
 391           }
 392           break;
 393         }
 394         default: {
 395           ShouldNotReachHere();
 396           break;
 397         }
 398       }
 399     } else {
 400       // For non-LAB allocations, we have no way to retract the allocation, and
 401       // have to explicitly overwrite the copy with the filler object. With that overwrite,
 402       // we have to keep the fwdptr initialized and pointing to our (stale) copy.
 403       assert(size >= ShenandoahHeap::min_fill_size(), "previously allocated object known to be larger than min_size");
 404       fill_with_object(copy, size);
 405       shenandoah_assert_correct(nullptr, copy_val);
 406       // For non-LAB allocations, the object has already been registered
 407     }
 408     shenandoah_assert_correct(nullptr, result);
 409     return result;
 410   }
 411 }
 412 
 413 inline HeapWord* ShenandoahGenerationalHeap::allocate_from_plab(Thread* thread, size_t size, bool is_promotion) {
 414   assert(UseTLAB, "TLABs should be enabled");
 415 
 416   PLAB* plab = ShenandoahThreadLocalData::plab(thread);
 417   HeapWord* obj;
 418 
 419   if (plab == nullptr) {
 420     assert(!thread->is_Java_thread() && !thread->is_Worker_thread(), "Performance: thread should have PLAB: %s", thread->name());
 421     // No PLABs in this thread, fallback to shared allocation
 422     return nullptr;
 423   } else if (is_promotion && !ShenandoahThreadLocalData::allow_plab_promotions(thread)) {
 424     return nullptr;
 425   }
 426   // if plab->word_size() <= 0, thread's plab not yet initialized for this pass, so allow_plab_promotions() is not trustworthy
 427   obj = plab->allocate(size);
 428   if ((obj == nullptr) && (plab->words_remaining() < plab_min_size())) {
 429     // allocate_from_plab_slow will establish allow_plab_promotions(thread) for future invocations
 430     obj = allocate_from_plab_slow(thread, size, is_promotion);
 431   }
 432   // if plab->words_remaining() >= ShenGenHeap::heap()->plab_min_size(), just return nullptr so we can use a shared allocation
 433   if (obj == nullptr) {
 434     return nullptr;
 435   }
 436 
 437   if (is_promotion) {
 438     ShenandoahThreadLocalData::add_to_plab_promoted(thread, size * HeapWordSize);
 439   }
 440   return obj;
 441 }
 442 
 443 // Establish a new PLAB and allocate size HeapWords within it.
 444 HeapWord* ShenandoahGenerationalHeap::allocate_from_plab_slow(Thread* thread, size_t size, bool is_promotion) {
 445   // New object should fit the PLAB size
 446 
 447   assert(mode()->is_generational(), "PLABs only relevant to generational GC");
 448   const size_t plab_min_size = this->plab_min_size();
 449   // PLABs are aligned to card boundaries to avoid synchronization with concurrent
 450   // allocations in other PLABs.
 451   const size_t min_size = (size > plab_min_size)? align_up(size, CardTable::card_size_in_words()): plab_min_size;
 452 
 453   // Figure out size of new PLAB, using value determined at last refill.
 454   size_t cur_size = ShenandoahThreadLocalData::plab_size(thread);
 455   if (cur_size == 0) {
 456     cur_size = plab_min_size;
 457   }
 458 
 459   // Expand aggressively, doubling at each refill in this epoch, ceiling at plab_max_size()
 460   size_t future_size = MIN2(cur_size * 2, plab_max_size());
 461   // Doubling, starting at a card-multiple, should give us a card-multiple. (Ceiling and floor
 462   // are card multiples.)
 463   assert(is_aligned(future_size, CardTable::card_size_in_words()), "Card multiple by construction, future_size: " SIZE_FORMAT
 464           ", card_size: " SIZE_FORMAT ", cur_size: " SIZE_FORMAT ", max: " SIZE_FORMAT,
 465          future_size, (size_t) CardTable::card_size_in_words(), cur_size, plab_max_size());
 466 
 467   // Record new heuristic value even if we take any shortcut. This captures
 468   // the case when moderately-sized objects always take a shortcut. At some point,
 469   // heuristics should catch up with them.  Note that the requested cur_size may
 470   // not be honored, but we remember that this is the preferred size.
 471   log_debug(gc, free)("Set new PLAB size: " SIZE_FORMAT, future_size);
 472   ShenandoahThreadLocalData::set_plab_size(thread, future_size);
 473   if (cur_size < size) {
 474     // The PLAB to be allocated is still not large enough to hold the object. Fall back to shared allocation.
 475     // This avoids retiring perfectly good PLABs in order to represent a single large object allocation.
 476     log_debug(gc, free)("Current PLAB size (" SIZE_FORMAT ") is too small for " SIZE_FORMAT, cur_size, size);
 477     return nullptr;
 478   }
 479 
 480   // Retire current PLAB, and allocate a new one.
 481   PLAB* plab = ShenandoahThreadLocalData::plab(thread);
 482   if (plab->words_remaining() < plab_min_size) {
 483     // Retire current PLAB. This takes care of any PLAB book-keeping.
 484     // retire_plab() registers the remnant filler object with the remembered set scanner without a lock.
 485     // Since PLABs are card-aligned, concurrent registrations in other PLABs don't interfere.
 486     retire_plab(plab, thread);
 487 
 488     size_t actual_size = 0;
 489     HeapWord* plab_buf = allocate_new_plab(min_size, cur_size, &actual_size);
 490     if (plab_buf == nullptr) {
 491       if (min_size == plab_min_size) {
 492         // Disable PLAB promotions for this thread because we cannot even allocate a minimal PLAB. This allows us
 493         // to fail faster on subsequent promotion attempts.
 494         ShenandoahThreadLocalData::disable_plab_promotions(thread);
 495       }
 496       return nullptr;
 497     } else {
 498       ShenandoahThreadLocalData::enable_plab_retries(thread);
 499     }
 500     // Since the allocated PLAB may have been down-sized for alignment, plab->allocate(size) below may still fail.
 501     if (ZeroTLAB) {
 502       // ... and clear it.
 503       Copy::zero_to_words(plab_buf, actual_size);
 504     } else {
 505       // ...and zap just allocated object.
 506 #ifdef ASSERT
 507       // Skip mangling the space corresponding to the object header to
 508       // ensure that the returned space is not considered parsable by
 509       // any concurrent GC thread.
 510       size_t hdr_size = oopDesc::header_size();
 511       Copy::fill_to_words(plab_buf + hdr_size, actual_size - hdr_size, badHeapWordVal);
 512 #endif // ASSERT
 513     }
 514     assert(is_aligned(actual_size, CardTable::card_size_in_words()), "Align by design");
 515     plab->set_buf(plab_buf, actual_size);
 516     if (is_promotion && !ShenandoahThreadLocalData::allow_plab_promotions(thread)) {
 517       return nullptr;
 518     }
 519     return plab->allocate(size);
 520   } else {
 521     // If there's still at least min_size() words available within the current plab, don't retire it.  Let's nibble
 522     // away on this plab as long as we can.  Meanwhile, return nullptr to force this particular allocation request
 523     // to be satisfied with a shared allocation.  By packing more promotions into the previously allocated PLAB, we
 524     // reduce the likelihood of evacuation failures, and we reduce the need for downsizing our PLABs.
 525     return nullptr;
 526   }
 527 }
 528 
 529 HeapWord* ShenandoahGenerationalHeap::allocate_new_plab(size_t min_size, size_t word_size, size_t* actual_size) {
 530   // Align requested sizes to card-sized multiples.  Align down so that we don't violate max size of TLAB.
 531   assert(is_aligned(min_size, CardTable::card_size_in_words()), "Align by design");
 532   assert(word_size >= min_size, "Requested PLAB is too small");
 533 
 534   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_plab(min_size, word_size);
 535   // Note that allocate_memory() sets a thread-local flag to prohibit further promotions by this thread
 536   // if we are at risk of infringing on the old-gen evacuation budget.
 537   HeapWord* res = allocate_memory(req);
 538   if (res != nullptr) {
 539     *actual_size = req.actual_size();
 540   } else {
 541     *actual_size = 0;
 542   }
 543   assert(is_aligned(res, CardTable::card_size_in_words()), "Align by design");
 544   return res;
 545 }
 546 
 547 void ShenandoahGenerationalHeap::retire_plab(PLAB* plab, Thread* thread) {
 548   // We don't enforce limits on plab evacuations.  We let it consume all available old-gen memory in order to reduce
 549   // probability of an evacuation failure.  We do enforce limits on promotion, to make sure that excessive promotion
 550   // does not result in an old-gen evacuation failure.  Note that a failed promotion is relatively harmless.  Any
 551   // object that fails to promote in the current cycle will be eligible for promotion in a subsequent cycle.
 552 
 553   // When the plab was instantiated, its entirety was treated as if the entire buffer was going to be dedicated to
 554   // promotions.  Now that we are retiring the buffer, we adjust for the reality that the plab is not entirely promotions.
 555   //  1. Some of the plab may have been dedicated to evacuations.
 556   //  2. Some of the plab may have been abandoned due to waste (at the end of the plab).
 557   size_t not_promoted =
 558           ShenandoahThreadLocalData::get_plab_actual_size(thread) - ShenandoahThreadLocalData::get_plab_promoted(thread);
 559   ShenandoahThreadLocalData::reset_plab_promoted(thread);
 560   ShenandoahThreadLocalData::set_plab_actual_size(thread, 0);
 561   if (not_promoted > 0) {
 562     old_generation()->unexpend_promoted(not_promoted);
 563   }
 564   const size_t original_waste = plab->waste();
 565   HeapWord* const top = plab->top();
 566 
 567   // plab->retire() overwrites unused memory between plab->top() and plab->hard_end() with a dummy object to make memory parsable.
 568   // It adds the size of this unused memory, in words, to plab->waste().
 569   plab->retire();
 570   if (top != nullptr && plab->waste() > original_waste && is_in_old(top)) {
 571     // If retiring the plab created a filler object, then we need to register it with our card scanner so it can
 572     // safely walk the region backing the plab.
 573     log_debug(gc)("retire_plab() is registering remnant of size " SIZE_FORMAT " at " PTR_FORMAT,
 574                   plab->waste() - original_waste, p2i(top));
 575     // No lock is necessary because the PLAB memory is aligned on card boundaries.
 576     old_generation()->card_scan()->register_object_without_lock(top);
 577   }
 578 }
 579 
 580 void ShenandoahGenerationalHeap::retire_plab(PLAB* plab) {
 581   Thread* thread = Thread::current();
 582   retire_plab(plab, thread);
 583 }
 584 
 585 ShenandoahGenerationalHeap::TransferResult ShenandoahGenerationalHeap::balance_generations() {
 586   shenandoah_assert_heaplocked_or_safepoint();
 587 
 588   ShenandoahOldGeneration* old_gen = old_generation();
 589   const ssize_t old_region_balance = old_gen->get_region_balance();
 590   old_gen->set_region_balance(0);
 591 
 592   if (old_region_balance > 0) {
 593     const auto old_region_surplus = checked_cast<size_t>(old_region_balance);
 594     const bool success = generation_sizer()->transfer_to_young(old_region_surplus);
 595     return TransferResult {
 596       success, old_region_surplus, "young"
 597     };
 598   }
 599 
 600   if (old_region_balance < 0) {
 601     const auto old_region_deficit = checked_cast<size_t>(-old_region_balance);
 602     const bool success = generation_sizer()->transfer_to_old(old_region_deficit);
 603     if (!success) {
 604       old_gen->handle_failed_transfer();
 605     }
 606     return TransferResult {
 607       success, old_region_deficit, "old"
 608     };
 609   }
 610 
 611   return TransferResult {true, 0, "none"};
 612 }
 613 
 614 // Make sure old-generation is large enough, but no larger than is necessary, to hold mixed evacuations
 615 // and promotions, if we anticipate either. Any deficit is provided by the young generation, subject to
 616 // xfer_limit, and any surplus is transferred to the young generation.
 617 // xfer_limit is the maximum we're able to transfer from young to old.
 618 void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t old_xfer_limit, size_t old_cset_regions) {
 619 
 620   // We can limit the old reserve to the size of anticipated promotions:
 621   // max_old_reserve is an upper bound on memory evacuated from old and promoted to old,
 622   // clamped by the old generation space available.
 623   //
 624   // Here's the algebra.
 625   // Let SOEP = ShenandoahOldEvacRatioPercent,
 626   //     OE = old evac,
 627   //     YE = young evac, and
 628   //     TE = total evac = OE + YE
 629   // By definition:
 630   //            SOEP/100 = OE/TE
 631   //                     = OE/(OE+YE)
 632   //  => SOEP/(100-SOEP) = OE/((OE+YE)-OE)      // componendo-dividendo: If a/b = c/d, then a/(b-a) = c/(d-c)
 633   //                     = OE/YE
 634   //  =>              OE = YE*SOEP/(100-SOEP)
 635 
 636   // We have to be careful in the event that SOEP is set to 100 by the user.
 637   assert(ShenandoahOldEvacRatioPercent <= 100, "Error");
 638   const size_t old_available = old_generation()->available();
 639   // The free set will reserve this amount of memory to hold young evacuations
 640   const size_t young_reserve = (young_generation()->max_capacity() * ShenandoahEvacReserve) / 100;
 641 
 642   // In the case that ShenandoahOldEvacRatioPercent equals 100, max_old_reserve is limited only by xfer_limit.
 643 
 644   const double bound_on_old_reserve = old_available + old_xfer_limit + young_reserve;
 645   const double max_old_reserve = (ShenandoahOldEvacRatioPercent == 100)?
 646                                  bound_on_old_reserve: MIN2(double(young_reserve * ShenandoahOldEvacRatioPercent) / double(100 - ShenandoahOldEvacRatioPercent),
 647                                                             bound_on_old_reserve);
 648 
 649   const size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
 650 
 651   // Decide how much old space we should reserve for a mixed collection
 652   double reserve_for_mixed = 0;
 653   if (old_generation()->has_unprocessed_collection_candidates()) {
 654     // We want this much memory to be unfragmented in order to reliably evacuate old.  This is conservative because we
 655     // may not evacuate the entirety of unprocessed candidates in a single mixed evacuation.
 656     const double max_evac_need = (double(old_generation()->unprocessed_collection_candidates_live_memory()) * ShenandoahOldEvacWaste);
 657     assert(old_available >= old_generation()->free_unaffiliated_regions() * region_size_bytes,
 658            "Unaffiliated available must be less than total available");
 659     const double old_fragmented_available = double(old_available - old_generation()->free_unaffiliated_regions() * region_size_bytes);
 660     reserve_for_mixed = max_evac_need + old_fragmented_available;
 661     if (reserve_for_mixed > max_old_reserve) {
 662       reserve_for_mixed = max_old_reserve;
 663     }
 664   }
 665 
 666   // Decide how much space we should reserve for promotions from young
 667   size_t reserve_for_promo = 0;
 668   const size_t promo_load = old_generation()->get_promotion_potential();
 669   const bool doing_promotions = promo_load > 0;
 670   if (doing_promotions) {
 671     // We're promoting and have a bound on the maximum amount that can be promoted
 672     assert(max_old_reserve >= reserve_for_mixed, "Sanity");
 673     const size_t available_for_promotions = max_old_reserve - reserve_for_mixed;
 674     reserve_for_promo = MIN2((size_t)(promo_load * ShenandoahPromoEvacWaste), available_for_promotions);
 675   }
 676 
 677   // This is the total old we want to ideally reserve
 678   const size_t old_reserve = reserve_for_mixed + reserve_for_promo;
 679   assert(old_reserve <= max_old_reserve, "cannot reserve more than max for old evacuations");
 680 
 681   // We now check if the old generation is running a surplus or a deficit.
 682   const size_t max_old_available = old_generation()->available() + old_cset_regions * region_size_bytes;
 683   if (max_old_available >= old_reserve) {
 684     // We are running a surplus, so the old region surplus can go to young
 685     const size_t old_surplus = (max_old_available - old_reserve) / region_size_bytes;
 686     const size_t unaffiliated_old_regions = old_generation()->free_unaffiliated_regions() + old_cset_regions;
 687     const size_t old_region_surplus = MIN2(old_surplus, unaffiliated_old_regions);
 688     old_generation()->set_region_balance(checked_cast<ssize_t>(old_region_surplus));
 689   } else {
 690     // We are running a deficit which we'd like to fill from young.
 691     // Ignore that this will directly impact young_generation()->max_capacity(),
 692     // indirectly impacting young_reserve and old_reserve.  These computations are conservative.
 693     // Note that deficit is rounded up by one region.
 694     const size_t old_need = (old_reserve - max_old_available + region_size_bytes - 1) / region_size_bytes;
 695     const size_t max_old_region_xfer = old_xfer_limit / region_size_bytes;
 696 
 697     // Round down the regions we can transfer from young to old. If we're running short
 698     // on young-gen memory, we restrict the xfer. Old-gen collection activities will be
 699     // curtailed if the budget is restricted.
 700     const size_t old_region_deficit = MIN2(old_need, max_old_region_xfer);
 701     old_generation()->set_region_balance(0 - checked_cast<ssize_t>(old_region_deficit));
 702   }
 703 }
 704 
 705 void ShenandoahGenerationalHeap::reset_generation_reserves() {
 706   young_generation()->set_evacuation_reserve(0);
 707   old_generation()->set_evacuation_reserve(0);
 708   old_generation()->set_promoted_reserve(0);
 709 }
 710 
 711 void ShenandoahGenerationalHeap::TransferResult::print_on(const char* when, outputStream* ss) const {
 712   auto heap = ShenandoahGenerationalHeap::heap();
 713   ShenandoahYoungGeneration* const young_gen = heap->young_generation();
 714   ShenandoahOldGeneration* const old_gen = heap->old_generation();
 715   const size_t young_available = young_gen->available();
 716   const size_t old_available = old_gen->available();
 717   ss->print_cr("After %s, %s " SIZE_FORMAT " regions to %s to prepare for next gc, old available: "
 718                      PROPERFMT ", young_available: " PROPERFMT,
 719                      when,
 720                      success? "successfully transferred": "failed to transfer", region_count, region_destination,
 721                      PROPERFMTARGS(old_available), PROPERFMTARGS(young_available));
 722 }
 723 
 724 void ShenandoahGenerationalHeap::coalesce_and_fill_old_regions(bool concurrent) {
 725   class ShenandoahGlobalCoalesceAndFill : public WorkerTask {
 726   private:
 727       ShenandoahPhaseTimings::Phase _phase;
 728       ShenandoahRegionIterator _regions;
 729   public:
 730     explicit ShenandoahGlobalCoalesceAndFill(ShenandoahPhaseTimings::Phase phase) :
 731       WorkerTask("Shenandoah Global Coalesce"),
 732       _phase(phase) {}
 733 
 734     void work(uint worker_id) override {
 735       ShenandoahWorkerTimingsTracker timer(_phase,
 736                                            ShenandoahPhaseTimings::ScanClusters,
 737                                            worker_id, true);
 738       ShenandoahHeapRegion* region;
 739       while ((region = _regions.next()) != nullptr) {
 740         // old region is not in the collection set and was not immediately trashed
 741         if (region->is_old() && region->is_active() && !region->is_humongous()) {
 742           // Reset the coalesce and fill boundary because this is a global collect
 743           // and cannot be preempted by young collects. We want to be sure the entire
 744           // region is coalesced here and does not resume from a previously interrupted
 745           // or completed coalescing.
 746           region->begin_preemptible_coalesce_and_fill();
 747           region->oop_coalesce_and_fill(false);
 748         }
 749       }
 750     }
 751   };
 752 
 753   ShenandoahPhaseTimings::Phase phase = concurrent ?
 754           ShenandoahPhaseTimings::conc_coalesce_and_fill :
 755           ShenandoahPhaseTimings::degen_gc_coalesce_and_fill;
 756 
 757   // This is not cancellable
 758   ShenandoahGlobalCoalesceAndFill coalesce(phase);
 759   workers()->run_task(&coalesce);
 760   old_generation()->set_parsable(true);
 761 }
 762 
 763 template<bool CONCURRENT>
 764 class ShenandoahGenerationalUpdateHeapRefsTask : public WorkerTask {
 765 private:
 766   ShenandoahGenerationalHeap* _heap;
 767   ShenandoahRegionIterator* _regions;
 768   ShenandoahRegionChunkIterator* _work_chunks;
 769 
 770 public:
 771   explicit ShenandoahGenerationalUpdateHeapRefsTask(ShenandoahRegionIterator* regions,
 772                                                     ShenandoahRegionChunkIterator* work_chunks) :
 773           WorkerTask("Shenandoah Update References"),
 774           _heap(ShenandoahGenerationalHeap::heap()),
 775           _regions(regions),
 776           _work_chunks(work_chunks)
 777   {
 778     bool old_bitmap_stable = _heap->old_generation()->is_mark_complete();
 779     log_debug(gc, remset)("Update refs, scan remembered set using bitmap: %s", BOOL_TO_STR(old_bitmap_stable));
 780   }
 781 
 782   void work(uint worker_id) {
 783     if (CONCURRENT) {
 784       ShenandoahConcurrentWorkerSession worker_session(worker_id);
 785       ShenandoahSuspendibleThreadSetJoiner stsj;
 786       do_work<ShenandoahConcUpdateRefsClosure>(worker_id);
 787     } else {
 788       ShenandoahParallelWorkerSession worker_session(worker_id);
 789       do_work<ShenandoahSTWUpdateRefsClosure>(worker_id);
 790     }
 791   }
 792 
 793 private:
 794   template<class T>
 795   void do_work(uint worker_id) {
 796     T cl;
 797 
 798     if (CONCURRENT && (worker_id == 0)) {
 799       // We ask the first worker to replenish the Mutator free set by moving regions previously reserved to hold the
 800       // results of evacuation.  These reserves are no longer necessary because evacuation has completed.
 801       size_t cset_regions = _heap->collection_set()->count();
 802 
 803       // Now that evacuation is done, we can reassign any regions that had been reserved to hold the results of evacuation
 804       // to the mutator free set.  At the end of GC, we will have cset_regions newly evacuated fully empty regions from
 805       // which we will be able to replenish the Collector free set and the OldCollector free set in preparation for the
 806       // next GC cycle.
 807       _heap->free_set()->move_regions_from_collector_to_mutator(cset_regions);
 808     }
 809     // If !CONCURRENT, there's no value in expanding Mutator free set
 810 
 811     ShenandoahHeapRegion* r = _regions->next();
 812     // We update references for global, old, and young collections.
 813     ShenandoahGeneration* const gc_generation = _heap->gc_generation();
 814     shenandoah_assert_generations_reconciled();
 815     assert(gc_generation->is_mark_complete(), "Expected complete marking");
 816     ShenandoahMarkingContext* const ctx = _heap->marking_context();
 817     bool is_mixed = _heap->collection_set()->has_old_regions();
 818     while (r != nullptr) {
 819       HeapWord* update_watermark = r->get_update_watermark();
 820       assert(update_watermark >= r->bottom(), "sanity");
 821 
 822       log_debug(gc)("Update refs worker " UINT32_FORMAT ", looking at region " SIZE_FORMAT, worker_id, r->index());
 823       bool region_progress = false;
 824       if (r->is_active() && !r->is_cset()) {
 825         if (r->is_young()) {
 826           _heap->marked_object_oop_iterate(r, &cl, update_watermark);
 827           region_progress = true;
 828         } else if (r->is_old()) {
 829           if (gc_generation->is_global()) {
 830 
 831             _heap->marked_object_oop_iterate(r, &cl, update_watermark);
 832             region_progress = true;
 833           }
 834           // Otherwise, this is an old region in a young or mixed cycle.  Process it during a second phase, below.
 835           // Don't bother to report pacing progress in this case.
 836         } else {
 837           // Because updating of references runs concurrently, it is possible that a FREE inactive region transitions
 838           // to a non-free active region while this loop is executing.  Whenever this happens, the changing of a region's
 839           // active status may propagate at a different speed than the changing of the region's affiliation.
 840 
 841           // When we reach this control point, it is because a race has allowed a region's is_active() status to be seen
 842           // by this thread before the region's affiliation() is seen by this thread.
 843 
 844           // It's ok for this race to occur because the newly transformed region does not have any references to be
 845           // updated.
 846 
 847           assert(r->get_update_watermark() == r->bottom(),
 848                  "%s Region " SIZE_FORMAT " is_active but not recognized as YOUNG or OLD so must be newly transitioned from FREE",
 849                  r->affiliation_name(), r->index());
 850         }
 851       }
 852 
 853       if (region_progress && ShenandoahPacing) {
 854         _heap->pacer()->report_updaterefs(pointer_delta(update_watermark, r->bottom()));
 855       }
 856 
 857       if (_heap->check_cancelled_gc_and_yield(CONCURRENT)) {
 858         return;
 859       }
 860 
 861       r = _regions->next();
 862     }
 863 
 864     if (!gc_generation->is_global()) {
 865       // Since this is generational and not GLOBAL, we have to process the remembered set.  There's no remembered
 866       // set processing if not in generational mode or if GLOBAL mode.
 867 
 868       // After this thread has exhausted its traditional update-refs work, it continues with updating refs within
 869       // remembered set. The remembered set workload is better balanced between threads, so threads that are "behind"
 870       // can catch up with other threads during this phase, allowing all threads to work more effectively in parallel.
 871       update_references_in_remembered_set(worker_id, cl, ctx, is_mixed);
 872     }
 873   }
 874 
 875   template<class T>
 876   void update_references_in_remembered_set(uint worker_id, T &cl, const ShenandoahMarkingContext* ctx, bool is_mixed) {
 877 
 878     struct ShenandoahRegionChunk assignment;
 879     ShenandoahScanRemembered* scanner = _heap->old_generation()->card_scan();
 880 
 881     while (!_heap->check_cancelled_gc_and_yield(CONCURRENT) && _work_chunks->next(&assignment)) {
 882       // Keep grabbing next work chunk to process until finished, or asked to yield
 883       ShenandoahHeapRegion* r = assignment._r;
 884       if (r->is_active() && !r->is_cset() && r->is_old()) {
 885         HeapWord* start_of_range = r->bottom() + assignment._chunk_offset;
 886         HeapWord* end_of_range = r->get_update_watermark();
 887         if (end_of_range > start_of_range + assignment._chunk_size) {
 888           end_of_range = start_of_range + assignment._chunk_size;
 889         }
 890 
 891         if (start_of_range >= end_of_range) {
 892           continue;
 893         }
 894 
 895         // Old region in a young cycle or mixed cycle.
 896         if (is_mixed) {
 897           if (r->is_humongous()) {
 898             // Need to examine both dirty and clean cards during mixed evac.
 899             r->oop_iterate_humongous_slice_all(&cl,start_of_range, assignment._chunk_size);
 900           } else {
 901             // Since this is mixed evacuation, old regions that are candidates for collection have not been coalesced
 902             // and filled.  This will use mark bits to find objects that need to be updated.
 903             update_references_in_old_region(cl, ctx, scanner, r, start_of_range, end_of_range);
 904           }
 905         } else {
 906           // This is a young evacuation
 907           size_t cluster_size = CardTable::card_size_in_words() * ShenandoahCardCluster::CardsPerCluster;
 908           size_t clusters = assignment._chunk_size / cluster_size;
 909           assert(clusters * cluster_size == assignment._chunk_size, "Chunk assignment must align on cluster boundaries");
 910           scanner->process_region_slice(r, assignment._chunk_offset, clusters, end_of_range, &cl, true, worker_id);
 911         }
 912 
 913         if (ShenandoahPacing) {
 914           _heap->pacer()->report_updaterefs(pointer_delta(end_of_range, start_of_range));
 915         }
 916       }
 917     }
 918   }
 919 
 920   template<class T>
 921   void update_references_in_old_region(T &cl, const ShenandoahMarkingContext* ctx, ShenandoahScanRemembered* scanner,
 922                                     const ShenandoahHeapRegion* r, HeapWord* start_of_range,
 923                                     HeapWord* end_of_range) const {
 924     // In case last object in my range spans boundary of my chunk, I may need to scan all the way to top()
 925     ShenandoahObjectToOopBoundedClosure<T> objs(&cl, start_of_range, r->top());
 926 
 927     // Any object that begins in a previous range is part of a different scanning assignment.  Any object that
 928     // starts after end_of_range is also not my responsibility.  (Either allocated during evacuation, so does
 929     // not hold pointers to from-space, or is beyond the range of my assigned work chunk.)
 930 
 931     // Find the first object that begins in my range, if there is one. Note that `p` will be set to `end_of_range`
 932     // when no live object is found in the range.
 933     HeapWord* tams = ctx->top_at_mark_start(r);
 934     HeapWord* p = get_first_object_start_word(ctx, scanner, tams, start_of_range, end_of_range);
 935 
 936     while (p < end_of_range) {
 937       // p is known to point to the beginning of marked object obj
 938       oop obj = cast_to_oop(p);
 939       objs.do_object(obj);
 940       HeapWord* prev_p = p;
 941       p += obj->size();
 942       if (p < tams) {
 943         p = ctx->get_next_marked_addr(p, tams);
 944         // If there are no more marked objects before tams, this returns tams.  Note that tams is
 945         // either >= end_of_range, or tams is the start of an object that is marked.
 946       }
 947       assert(p != prev_p, "Lack of forward progress");
 948     }
 949   }
 950 
 951   HeapWord* get_first_object_start_word(const ShenandoahMarkingContext* ctx, ShenandoahScanRemembered* scanner, HeapWord* tams,
 952                                         HeapWord* start_of_range, HeapWord* end_of_range) const {
 953     HeapWord* p = start_of_range;
 954 
 955     if (p >= tams) {
 956       // We cannot use ctx->is_marked(obj) to test whether an object begins at this address.  Instead,
 957       // we need to use the remembered set crossing map to advance p to the first object that starts
 958       // within the enclosing card.
 959       size_t card_index = scanner->card_index_for_addr(start_of_range);
 960       while (true) {
 961         HeapWord* first_object = scanner->first_object_in_card(card_index);
 962         if (first_object != nullptr) {
 963           p = first_object;
 964           break;
 965         } else if (scanner->addr_for_card_index(card_index + 1) < end_of_range) {
 966           card_index++;
 967         } else {
 968           // Signal that no object was found in range
 969           p = end_of_range;
 970           break;
 971         }
 972       }
 973     } else if (!ctx->is_marked(cast_to_oop(p))) {
 974       p = ctx->get_next_marked_addr(p, tams);
 975       // If there are no more marked objects before tams, this returns tams.
 976       // Note that tams is either >= end_of_range, or tams is the start of an object that is marked.
 977     }
 978     return p;
 979   }
 980 };
 981 
 982 void ShenandoahGenerationalHeap::update_heap_references(bool concurrent) {
 983   assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
 984   const uint nworkers = workers()->active_workers();
 985   ShenandoahRegionChunkIterator work_list(nworkers);
 986   if (concurrent) {
 987     ShenandoahGenerationalUpdateHeapRefsTask<true> task(&_update_refs_iterator, &work_list);
 988     workers()->run_task(&task);
 989   } else {
 990     ShenandoahGenerationalUpdateHeapRefsTask<false> task(&_update_refs_iterator, &work_list);
 991     workers()->run_task(&task);
 992   }
 993 
 994   if (ShenandoahEnableCardStats) {
 995     // Only do this if we are collecting card stats
 996     ShenandoahScanRemembered* card_scan = old_generation()->card_scan();
 997     assert(card_scan != nullptr, "Card table must exist when card stats are enabled");
 998     card_scan->log_card_stats(nworkers, CARD_STAT_UPDATE_REFS);
 999   }
1000 }
1001 
1002 struct ShenandoahCompositeRegionClosure {
1003   template<typename C1, typename C2>
1004   class Closure : public ShenandoahHeapRegionClosure {
1005   private:
1006     C1 &_c1;
1007     C2 &_c2;
1008 
1009   public:
1010     Closure(C1 &c1, C2 &c2) : ShenandoahHeapRegionClosure(), _c1(c1), _c2(c2) {}
1011 
1012     void heap_region_do(ShenandoahHeapRegion* r) override {
1013       _c1.heap_region_do(r);
1014       _c2.heap_region_do(r);
1015     }
1016 
1017     bool is_thread_safe() override {
1018       return _c1.is_thread_safe() && _c2.is_thread_safe();
1019     }
1020   };
1021 
1022   template<typename C1, typename C2>
1023   static Closure<C1, C2> of(C1 &c1, C2 &c2) {
1024     return Closure<C1, C2>(c1, c2);
1025   }
1026 };
1027 
1028 class ShenandoahUpdateRegionAges : public ShenandoahHeapRegionClosure {
1029 private:
1030   ShenandoahMarkingContext* _ctx;
1031 
1032 public:
1033   explicit ShenandoahUpdateRegionAges(ShenandoahMarkingContext* ctx) : _ctx(ctx) { }
1034 
1035   void heap_region_do(ShenandoahHeapRegion* r) override {
1036     // Maintenance of region age must follow evacuation in order to account for
1037     // evacuation allocations within survivor regions.  We consult region age during
1038     // the subsequent evacuation to determine whether certain objects need to
1039     // be promoted.
1040     if (r->is_young() && r->is_active()) {
1041       HeapWord *tams = _ctx->top_at_mark_start(r);
1042       HeapWord *top = r->top();
1043 
1044       // Allocations move the watermark when top moves.  However, compacting
1045       // objects will sometimes lower top beneath the watermark, after which,
1046       // attempts to read the watermark will assert out (watermark should not be
1047       // higher than top).
1048       if (top > tams) {
1049         // There have been allocations in this region since the start of the cycle.
1050         // Any objects new to this region must not assimilate elevated age.
1051         r->reset_age();
1052       } else if (ShenandoahGenerationalHeap::heap()->is_aging_cycle()) {
1053         r->increment_age();
1054       }
1055     }
1056   }
1057 
1058   bool is_thread_safe() override {
1059     return true;
1060   }
1061 };
1062 
1063 void ShenandoahGenerationalHeap::final_update_refs_update_region_states() {
1064   ShenandoahSynchronizePinnedRegionStates pins;
1065   ShenandoahUpdateRegionAges ages(active_generation()->complete_marking_context());
1066   auto cl = ShenandoahCompositeRegionClosure::of(pins, ages);
1067   parallel_heap_region_iterate(&cl);
1068 }
1069 
1070 void ShenandoahGenerationalHeap::complete_degenerated_cycle() {
1071   shenandoah_assert_heaplocked_or_safepoint();
1072   if (is_concurrent_old_mark_in_progress()) {
1073     // This is still necessary for degenerated cycles because the degeneration point may occur
1074     // after final mark of the young generation. See ShenandoahConcurrentGC::op_final_updaterefs for
1075     // a more detailed explanation.
1076     old_generation()->transfer_pointers_from_satb();
1077   }
1078 
1079   // We defer generation resizing actions until after cset regions have been recycled.
1080   TransferResult result = balance_generations();
1081   LogTarget(Info, gc, ergo) lt;
1082   if (lt.is_enabled()) {
1083     LogStream ls(lt);
1084     result.print_on("Degenerated GC", &ls);
1085   }
1086 
1087   // In case degeneration interrupted concurrent evacuation or update references, we need to clean up
1088   // transient state. Otherwise, these actions have no effect.
1089   reset_generation_reserves();
1090 
1091   if (!old_generation()->is_parsable()) {
1092     ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_coalesce_and_fill);
1093     coalesce_and_fill_old_regions(false);
1094   }
1095 }
1096 
1097 void ShenandoahGenerationalHeap::complete_concurrent_cycle() {
1098   if (!old_generation()->is_parsable()) {
1099     // Class unloading may render the card offsets unusable, so we must rebuild them before
1100     // the next remembered set scan. We _could_ let the control thread do this sometime after
1101     // the global cycle has completed and before the next young collection, but under memory
1102     // pressure the control thread may not have the time (that is, because it's running back
1103     // to back GCs). In that scenario, we would have to make the old regions parsable before
1104     // we could start a young collection. This could delay the start of the young cycle and
1105     // throw off the heuristics.
1106     entry_global_coalesce_and_fill();
1107   }
1108 
1109   TransferResult result;
1110   {
1111     ShenandoahHeapLocker locker(lock());
1112 
1113     result = balance_generations();
1114     reset_generation_reserves();
1115   }
1116 
1117   LogTarget(Info, gc, ergo) lt;
1118   if (lt.is_enabled()) {
1119     LogStream ls(lt);
1120     result.print_on("Concurrent GC", &ls);
1121   }
1122 }
1123 
1124 void ShenandoahGenerationalHeap::entry_global_coalesce_and_fill() {
1125   const char* msg = "Coalescing and filling old regions";
1126   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_coalesce_and_fill);
1127 
1128   TraceCollectorStats tcs(monitoring_support()->concurrent_collection_counters());
1129   EventMark em("%s", msg);
1130   ShenandoahWorkerScope scope(workers(),
1131                               ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
1132                               "concurrent coalesce and fill");
1133 
1134   coalesce_and_fill_old_regions(true);
1135 }
1136 
1137 void ShenandoahGenerationalHeap::update_region_ages(ShenandoahMarkingContext* ctx) {
1138   ShenandoahUpdateRegionAges cl(ctx);
1139   parallel_heap_region_iterate(&cl);
1140 }