1 /*
   2  * Copyright (c) 2013, 2022, Red Hat, Inc. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "memory/allocation.hpp"
  27 #include "memory/universe.hpp"
  28 
  29 #include "gc/shared/gcArguments.hpp"
  30 #include "gc/shared/gcTimer.hpp"
  31 #include "gc/shared/gcTraceTime.inline.hpp"
  32 #include "gc/shared/locationPrinter.inline.hpp"
  33 #include "gc/shared/memAllocator.hpp"
  34 #include "gc/shared/plab.hpp"
  35 #include "gc/shared/tlab_globals.hpp"
  36 
  37 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  38 #include "gc/shenandoah/shenandoahCardTable.hpp"
  39 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
  40 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  41 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  42 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
  43 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  44 #include "gc/shenandoah/shenandoahControlThread.hpp"
  45 #include "gc/shenandoah/shenandoahRegulatorThread.hpp"
  46 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  47 #include "gc/shenandoah/shenandoahGlobalGeneration.hpp"
  48 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  49 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  50 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
  51 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  52 #include "gc/shenandoah/shenandoahInitLogger.hpp"
  53 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  54 #include "gc/shenandoah/shenandoahMemoryPool.hpp"
  55 #include "gc/shenandoah/shenandoahMetrics.hpp"
  56 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  57 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
  58 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  59 #include "gc/shenandoah/shenandoahPacer.inline.hpp"
  60 #include "gc/shenandoah/shenandoahPadding.hpp"
  61 #include "gc/shenandoah/shenandoahParallelCleaning.inline.hpp"
  62 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
  63 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
  64 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
  65 #include "gc/shenandoah/shenandoahStringDedup.hpp"
  66 #include "gc/shenandoah/shenandoahSTWMark.hpp"
  67 #include "gc/shenandoah/shenandoahUtils.hpp"
  68 #include "gc/shenandoah/shenandoahVerifier.hpp"
  69 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
  70 #include "gc/shenandoah/shenandoahVMOperations.hpp"
  71 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
  72 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  73 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
  74 #include "gc/shenandoah/mode/shenandoahGenerationalMode.hpp"
  75 #include "gc/shenandoah/mode/shenandoahIUMode.hpp"
  76 #include "gc/shenandoah/mode/shenandoahPassiveMode.hpp"
  77 #include "gc/shenandoah/mode/shenandoahSATBMode.hpp"
  78 
  79 #if INCLUDE_JFR
  80 #include "gc/shenandoah/shenandoahJfrSupport.hpp"
  81 #endif
  82 
  83 #include "gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp"
  84 
  85 #include "classfile/systemDictionary.hpp"
  86 #include "code/codeCache.hpp"
  87 #include "memory/classLoaderMetaspace.hpp"
  88 #include "memory/metaspaceUtils.hpp"
  89 #include "oops/compressedOops.inline.hpp"
  90 #include "prims/jvmtiTagMap.hpp"
  91 #include "runtime/atomic.hpp"
  92 #include "runtime/globals.hpp"
  93 #include "runtime/interfaceSupport.inline.hpp"
  94 #include "runtime/java.hpp"
  95 #include "runtime/orderAccess.hpp"
  96 #include "runtime/safepointMechanism.hpp"
  97 #include "runtime/vmThread.hpp"
  98 #include "services/mallocTracker.hpp"
  99 #include "services/memTracker.hpp"
 100 #include "utilities/events.hpp"
 101 #include "utilities/powerOfTwo.hpp"
 102 
 103 class ShenandoahPretouchHeapTask : public WorkerTask {
 104 private:
 105   ShenandoahRegionIterator _regions;
 106   const size_t _page_size;
 107 public:
 108   ShenandoahPretouchHeapTask(size_t page_size) :
 109     WorkerTask("Shenandoah Pretouch Heap"),
 110     _page_size(page_size) {}
 111 
 112   virtual void work(uint worker_id) {
 113     ShenandoahHeapRegion* r = _regions.next();
 114     while (r != NULL) {
 115       if (r->is_committed()) {
 116         os::pretouch_memory(r->bottom(), r->end(), _page_size);
 117       }
 118       r = _regions.next();
 119     }
 120   }
 121 };
 122 
 123 class ShenandoahPretouchBitmapTask : public WorkerTask {
 124 private:
 125   ShenandoahRegionIterator _regions;
 126   char* _bitmap_base;
 127   const size_t _bitmap_size;
 128   const size_t _page_size;
 129 public:
 130   ShenandoahPretouchBitmapTask(char* bitmap_base, size_t bitmap_size, size_t page_size) :
 131     WorkerTask("Shenandoah Pretouch Bitmap"),
 132     _bitmap_base(bitmap_base),
 133     _bitmap_size(bitmap_size),
 134     _page_size(page_size) {}
 135 
 136   virtual void work(uint worker_id) {
 137     ShenandoahHeapRegion* r = _regions.next();
 138     while (r != NULL) {
 139       size_t start = r->index()       * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
 140       size_t end   = (r->index() + 1) * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
 141       assert (end <= _bitmap_size, "end is sane: " SIZE_FORMAT " < " SIZE_FORMAT, end, _bitmap_size);
 142 
 143       if (r->is_committed()) {
 144         os::pretouch_memory(_bitmap_base + start, _bitmap_base + end, _page_size);
 145       }
 146 
 147       r = _regions.next();
 148     }
 149   }
 150 };
 151 
 152 jint ShenandoahHeap::initialize() {
 153   //
 154   // Figure out heap sizing
 155   //
 156 
 157   size_t init_byte_size = InitialHeapSize;
 158   size_t min_byte_size  = MinHeapSize;
 159   size_t max_byte_size  = MaxHeapSize;
 160   size_t heap_alignment = HeapAlignment;
 161 
 162   size_t reg_size_bytes = ShenandoahHeapRegion::region_size_bytes();
 163 
 164   Universe::check_alignment(max_byte_size,  reg_size_bytes, "Shenandoah heap");
 165   Universe::check_alignment(init_byte_size, reg_size_bytes, "Shenandoah heap");
 166 
 167   _num_regions = ShenandoahHeapRegion::region_count();
 168   assert(_num_regions == (max_byte_size / reg_size_bytes),
 169          "Regions should cover entire heap exactly: " SIZE_FORMAT " != " SIZE_FORMAT "/" SIZE_FORMAT,
 170          _num_regions, max_byte_size, reg_size_bytes);
 171 
 172   size_t num_committed_regions = init_byte_size / reg_size_bytes;
 173   num_committed_regions = MIN2(num_committed_regions, _num_regions);
 174   assert(num_committed_regions <= _num_regions, "sanity");
 175   _initial_size = num_committed_regions * reg_size_bytes;
 176 
 177   size_t num_min_regions = min_byte_size / reg_size_bytes;
 178   num_min_regions = MIN2(num_min_regions, _num_regions);
 179   assert(num_min_regions <= _num_regions, "sanity");
 180   _minimum_size = num_min_regions * reg_size_bytes;
 181 
 182   // Default to max heap size.
 183   _soft_max_size = _num_regions * reg_size_bytes;
 184 
 185   _committed = _initial_size;
 186 
 187   // Now we know the number of regions and heap sizes, initialize the heuristics.
 188   initialize_generations();
 189   initialize_heuristics();
 190 
 191   size_t heap_page_size   = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
 192   size_t bitmap_page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
 193   size_t region_page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
 194 
 195   //
 196   // Reserve and commit memory for heap
 197   //
 198 
 199   ReservedHeapSpace heap_rs = Universe::reserve_heap(max_byte_size, heap_alignment);
 200   initialize_reserved_region(heap_rs);
 201   _heap_region = MemRegion((HeapWord*)heap_rs.base(), heap_rs.size() / HeapWordSize);
 202   _heap_region_special = heap_rs.special();
 203 
 204   assert((((size_t) base()) & ShenandoahHeapRegion::region_size_bytes_mask()) == 0,
 205          "Misaligned heap: " PTR_FORMAT, p2i(base()));
 206 
 207 #if SHENANDOAH_OPTIMIZED_MARKTASK
 208   // The optimized ShenandoahMarkTask takes some bits away from the full object bits.
 209   // Fail if we ever attempt to address more than we can.
 210   if ((uintptr_t)heap_rs.end() >= ShenandoahMarkTask::max_addressable()) {
 211     FormatBuffer<512> buf("Shenandoah reserved [" PTR_FORMAT ", " PTR_FORMAT") for the heap, \n"
 212                           "but max object address is " PTR_FORMAT ". Try to reduce heap size, or try other \n"
 213                           "VM options that allocate heap at lower addresses (HeapBaseMinAddress, AllocateHeapAt, etc).",
 214                 p2i(heap_rs.base()), p2i(heap_rs.end()), ShenandoahMarkTask::max_addressable());
 215     vm_exit_during_initialization("Fatal Error", buf);
 216   }
 217 #endif
 218 
 219   ReservedSpace sh_rs = heap_rs.first_part(max_byte_size);
 220   if (!_heap_region_special) {
 221     os::commit_memory_or_exit(sh_rs.base(), _initial_size, heap_alignment, false,
 222                               "Cannot commit heap memory");
 223   }
 224 
 225   BarrierSet::set_barrier_set(new ShenandoahBarrierSet(this, _heap_region));
 226 
 227   //
 228   // After reserving the Java heap, create the card table, barriers, and workers, in dependency order
 229   //
 230   if (mode()->is_generational()) {
 231     ShenandoahDirectCardMarkRememberedSet *rs;
 232     ShenandoahCardTable* card_table = ShenandoahBarrierSet::barrier_set()->card_table();
 233     size_t card_count = card_table->cards_required(heap_rs.size() / HeapWordSize) - 1;
 234     rs = new ShenandoahDirectCardMarkRememberedSet(ShenandoahBarrierSet::barrier_set()->card_table(), card_count);
 235     _card_scan = new ShenandoahScanRemembered<ShenandoahDirectCardMarkRememberedSet>(rs);
 236   }
 237 
 238   _workers = new ShenandoahWorkerThreads("Shenandoah GC Threads", _max_workers);
 239   if (_workers == NULL) {
 240     vm_exit_during_initialization("Failed necessary allocation.");
 241   } else {
 242     _workers->initialize_workers();
 243   }
 244 
 245   if (ParallelGCThreads > 1) {
 246     _safepoint_workers = new ShenandoahWorkerThreads("Safepoint Cleanup Thread", ParallelGCThreads);
 247     _safepoint_workers->initialize_workers();
 248   }
 249 
 250   //
 251   // Reserve and commit memory for bitmap(s)
 252   //
 253 
 254   _bitmap_size = ShenandoahMarkBitMap::compute_size(heap_rs.size());
 255   _bitmap_size = align_up(_bitmap_size, bitmap_page_size);
 256 
 257   size_t bitmap_bytes_per_region = reg_size_bytes / ShenandoahMarkBitMap::heap_map_factor();
 258 
 259   guarantee(bitmap_bytes_per_region != 0,
 260             "Bitmap bytes per region should not be zero");
 261   guarantee(is_power_of_2(bitmap_bytes_per_region),
 262             "Bitmap bytes per region should be power of two: " SIZE_FORMAT, bitmap_bytes_per_region);
 263 
 264   if (bitmap_page_size > bitmap_bytes_per_region) {
 265     _bitmap_regions_per_slice = bitmap_page_size / bitmap_bytes_per_region;
 266     _bitmap_bytes_per_slice = bitmap_page_size;
 267   } else {
 268     _bitmap_regions_per_slice = 1;
 269     _bitmap_bytes_per_slice = bitmap_bytes_per_region;
 270   }
 271 
 272   guarantee(_bitmap_regions_per_slice >= 1,
 273             "Should have at least one region per slice: " SIZE_FORMAT,
 274             _bitmap_regions_per_slice);
 275 
 276   guarantee(((_bitmap_bytes_per_slice) % bitmap_page_size) == 0,
 277             "Bitmap slices should be page-granular: bps = " SIZE_FORMAT ", page size = " SIZE_FORMAT,
 278             _bitmap_bytes_per_slice, bitmap_page_size);
 279 
 280   ReservedSpace bitmap(_bitmap_size, bitmap_page_size);
 281   MemTracker::record_virtual_memory_type(bitmap.base(), mtGC);
 282   _bitmap_region = MemRegion((HeapWord*) bitmap.base(), bitmap.size() / HeapWordSize);
 283   _bitmap_region_special = bitmap.special();
 284 
 285   size_t bitmap_init_commit = _bitmap_bytes_per_slice *
 286                               align_up(num_committed_regions, _bitmap_regions_per_slice) / _bitmap_regions_per_slice;
 287   bitmap_init_commit = MIN2(_bitmap_size, bitmap_init_commit);
 288   if (!_bitmap_region_special) {
 289     os::commit_memory_or_exit((char *) _bitmap_region.start(), bitmap_init_commit, bitmap_page_size, false,
 290                               "Cannot commit bitmap memory");
 291   }
 292 
 293   _marking_context = new ShenandoahMarkingContext(_heap_region, _bitmap_region, _num_regions);
 294 
 295   if (ShenandoahVerify) {
 296     ReservedSpace verify_bitmap(_bitmap_size, bitmap_page_size);
 297     if (!verify_bitmap.special()) {
 298       os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), bitmap_page_size, false,
 299                                 "Cannot commit verification bitmap memory");
 300     }
 301     MemTracker::record_virtual_memory_type(verify_bitmap.base(), mtGC);
 302     MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize);
 303     _verification_bit_map.initialize(_heap_region, verify_bitmap_region);
 304     _verifier = new ShenandoahVerifier(this, &_verification_bit_map);
 305   }
 306 
 307   // Reserve aux bitmap for use in object_iterate(). We don't commit it here.
 308   ReservedSpace aux_bitmap(_bitmap_size, bitmap_page_size);
 309   MemTracker::record_virtual_memory_type(aux_bitmap.base(), mtGC);
 310   _aux_bitmap_region = MemRegion((HeapWord*) aux_bitmap.base(), aux_bitmap.size() / HeapWordSize);
 311   _aux_bitmap_region_special = aux_bitmap.special();
 312   _aux_bit_map.initialize(_heap_region, _aux_bitmap_region);
 313 
 314   //
 315   // Create regions and region sets
 316   //
 317   size_t region_align = align_up(sizeof(ShenandoahHeapRegion), SHENANDOAH_CACHE_LINE_SIZE);
 318   size_t region_storage_size = align_up(region_align * _num_regions, region_page_size);
 319   region_storage_size = align_up(region_storage_size, os::vm_allocation_granularity());
 320 
 321   ReservedSpace region_storage(region_storage_size, region_page_size);
 322   MemTracker::record_virtual_memory_type(region_storage.base(), mtGC);
 323   if (!region_storage.special()) {
 324     os::commit_memory_or_exit(region_storage.base(), region_storage_size, region_page_size, false,
 325                               "Cannot commit region memory");
 326   }
 327 
 328   // Try to fit the collection set bitmap at lower addresses. This optimizes code generation for cset checks.
 329   // Go up until a sensible limit (subject to encoding constraints) and try to reserve the space there.
 330   // If not successful, bite a bullet and allocate at whatever address.
 331   {
 332     size_t cset_align = MAX2<size_t>(os::vm_page_size(), os::vm_allocation_granularity());
 333     size_t cset_size = align_up(((size_t) sh_rs.base() + sh_rs.size()) >> ShenandoahHeapRegion::region_size_bytes_shift(), cset_align);
 334 
 335     uintptr_t min = round_up_power_of_2(cset_align);
 336     uintptr_t max = (1u << 30u);
 337 
 338     for (uintptr_t addr = min; addr <= max; addr <<= 1u) {
 339       char* req_addr = (char*)addr;
 340       assert(is_aligned(req_addr, cset_align), "Should be aligned");
 341       ReservedSpace cset_rs(cset_size, cset_align, os::vm_page_size(), req_addr);
 342       if (cset_rs.is_reserved()) {
 343         assert(cset_rs.base() == req_addr, "Allocated where requested: " PTR_FORMAT ", " PTR_FORMAT, p2i(cset_rs.base()), addr);
 344         _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
 345         break;
 346       }
 347     }
 348 
 349     if (_collection_set == NULL) {
 350       ReservedSpace cset_rs(cset_size, cset_align, os::vm_page_size());
 351       _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
 352     }
 353   }
 354 
 355   _regions = NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, _num_regions, mtGC);
 356   _free_set = new ShenandoahFreeSet(this, _num_regions);
 357 
 358   {
 359     ShenandoahHeapLocker locker(lock());
 360 
 361     for (size_t i = 0; i < _num_regions; i++) {
 362       HeapWord* start = (HeapWord*)sh_rs.base() + ShenandoahHeapRegion::region_size_words() * i;
 363       bool is_committed = i < num_committed_regions;
 364       void* loc = region_storage.base() + i * region_align;
 365 
 366       ShenandoahHeapRegion* r = new (loc) ShenandoahHeapRegion(start, i, is_committed);
 367       assert(is_aligned(r, SHENANDOAH_CACHE_LINE_SIZE), "Sanity");
 368 
 369       _marking_context->initialize_top_at_mark_start(r);
 370       _regions[i] = r;
 371       assert(!collection_set()->is_in(i), "New region should not be in collection set");
 372     }
 373 
 374     // Initialize to complete
 375     _marking_context->mark_complete();
 376 
 377     _free_set->rebuild();
 378   }
 379 
 380   if (AlwaysPreTouch) {
 381     // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads,
 382     // before initialize() below zeroes it with initializing thread. For any given region,
 383     // we touch the region and the corresponding bitmaps from the same thread.
 384     ShenandoahPushWorkerScope scope(workers(), _max_workers, false);
 385 
 386     _pretouch_heap_page_size = heap_page_size;
 387     _pretouch_bitmap_page_size = bitmap_page_size;
 388 
 389 #ifdef LINUX
 390     // UseTransparentHugePages would madvise that backing memory can be coalesced into huge
 391     // pages. But, the kernel needs to know that every small page is used, in order to coalesce
 392     // them into huge one. Therefore, we need to pretouch with smaller pages.
 393     if (UseTransparentHugePages) {
 394       _pretouch_heap_page_size = (size_t)os::vm_page_size();
 395       _pretouch_bitmap_page_size = (size_t)os::vm_page_size();
 396     }
 397 #endif
 398 
 399     // OS memory managers may want to coalesce back-to-back pages. Make their jobs
 400     // simpler by pre-touching continuous spaces (heap and bitmap) separately.
 401 
 402     ShenandoahPretouchBitmapTask bcl(bitmap.base(), _bitmap_size, _pretouch_bitmap_page_size);
 403     _workers->run_task(&bcl);
 404 
 405     ShenandoahPretouchHeapTask hcl(_pretouch_heap_page_size);
 406     _workers->run_task(&hcl);
 407   }
 408 
 409   //
 410   // Initialize the rest of GC subsystems
 411   //
 412 
 413   _liveness_cache = NEW_C_HEAP_ARRAY(ShenandoahLiveData*, _max_workers, mtGC);
 414   for (uint worker = 0; worker < _max_workers; worker++) {
 415     _liveness_cache[worker] = NEW_C_HEAP_ARRAY(ShenandoahLiveData, _num_regions, mtGC);
 416     Copy::fill_to_bytes(_liveness_cache[worker], _num_regions * sizeof(ShenandoahLiveData));
 417   }
 418 
 419   // There should probably be Shenandoah-specific options for these,
 420   // just as there are G1-specific options.
 421   {
 422     ShenandoahSATBMarkQueueSet& satbqs = ShenandoahBarrierSet::satb_mark_queue_set();
 423     satbqs.set_process_completed_buffers_threshold(20); // G1SATBProcessCompletedThreshold
 424     satbqs.set_buffer_enqueue_threshold_percentage(60); // G1SATBBufferEnqueueingThresholdPercent
 425   }
 426 
 427   _monitoring_support = new ShenandoahMonitoringSupport(this);
 428   _phase_timings = new ShenandoahPhaseTimings(max_workers());
 429   ShenandoahCodeRoots::initialize();
 430 
 431   if (ShenandoahPacing) {
 432     _pacer = new ShenandoahPacer(this);
 433     _pacer->setup_for_idle();
 434   } else {
 435     _pacer = NULL;
 436   }
 437 
 438   _control_thread = new ShenandoahControlThread();
 439   _regulator_thread = new ShenandoahRegulatorThread(_control_thread);
 440 
 441   ShenandoahInitLogger::print();
 442 
 443   return JNI_OK;
 444 }
 445 
 446 void ShenandoahHeap::initialize_generations() {
 447   size_t max_capacity_new      = young_generation_capacity(max_capacity());
 448   size_t soft_max_capacity_new = young_generation_capacity(soft_max_capacity());
 449   size_t max_capacity_old      = max_capacity() - max_capacity_new;
 450   size_t soft_max_capacity_old = soft_max_capacity() - soft_max_capacity_new;
 451 
 452   _young_generation = new ShenandoahYoungGeneration(_max_workers, max_capacity_new, soft_max_capacity_new);
 453   _old_generation = new ShenandoahOldGeneration(_max_workers, max_capacity_old, soft_max_capacity_old);
 454   _global_generation = new ShenandoahGlobalGeneration(_max_workers);
 455 }
 456 
 457 void ShenandoahHeap::initialize_heuristics() {
 458   if (ShenandoahGCMode != NULL) {
 459     if (strcmp(ShenandoahGCMode, "satb") == 0) {
 460       _gc_mode = new ShenandoahSATBMode();
 461     } else if (strcmp(ShenandoahGCMode, "iu") == 0) {
 462       _gc_mode = new ShenandoahIUMode();
 463     } else if (strcmp(ShenandoahGCMode, "passive") == 0) {
 464       _gc_mode = new ShenandoahPassiveMode();
 465     } else if (strcmp(ShenandoahGCMode, "generational") == 0) {
 466       _gc_mode = new ShenandoahGenerationalMode();
 467     } else {
 468       vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option");
 469     }
 470   } else {
 471     vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option (null)");
 472   }
 473   _gc_mode->initialize_flags();
 474   if (_gc_mode->is_diagnostic() && !UnlockDiagnosticVMOptions) {
 475     vm_exit_during_initialization(
 476             err_msg("GC mode \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
 477                     _gc_mode->name()));
 478   }
 479   if (_gc_mode->is_experimental() && !UnlockExperimentalVMOptions) {
 480     vm_exit_during_initialization(
 481             err_msg("GC mode \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
 482                     _gc_mode->name()));
 483   }
 484 
 485   _global_generation->initialize_heuristics(_gc_mode);
 486   if (mode()->is_generational()) {
 487     _young_generation->initialize_heuristics(_gc_mode);
 488     _old_generation->initialize_heuristics(_gc_mode);
 489 
 490     ShenandoahEvacWaste = ShenandoahGenerationalEvacWaste;
 491   }
 492 }
 493 
 494 #ifdef _MSC_VER
 495 #pragma warning( push )
 496 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
 497 #endif
 498 
 499 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
 500   CollectedHeap(),
 501   _gc_generation(NULL),
 502   _prepare_for_old_mark(false),
 503   _initial_size(0),
 504   _used(0),
 505   _committed(0),
 506   _max_workers(MAX3(ConcGCThreads, ParallelGCThreads, 1U)),
 507   _workers(NULL),
 508   _safepoint_workers(NULL),
 509   _heap_region_special(false),
 510   _num_regions(0),
 511   _regions(NULL),
 512   _update_refs_iterator(this),
 513   _alloc_supplement_reserve(0),
 514   _promoted_reserve(0),
 515   _old_evac_reserve(0),
 516   _old_evac_expended(0),
 517   _young_evac_reserve(0),
 518   _captured_old_usage(0),
 519   _previous_promotion(0),
 520   _cancel_requested_time(0),
 521   _young_generation(NULL),
 522   _global_generation(NULL),
 523   _old_generation(NULL),
 524   _control_thread(NULL),
 525   _regulator_thread(NULL),
 526   _shenandoah_policy(policy),
 527   _free_set(NULL),
 528   _pacer(NULL),
 529   _verifier(NULL),
 530   _phase_timings(NULL),
 531   _monitoring_support(NULL),
 532   _memory_pool(NULL),
 533   _young_gen_memory_pool(NULL),
 534   _old_gen_memory_pool(NULL),
 535   _stw_memory_manager("Shenandoah Pauses", "end of GC pause"),
 536   _cycle_memory_manager("Shenandoah Cycles", "end of GC cycle"),
 537   _gc_timer(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
 538   _soft_ref_policy(),
 539   _log_min_obj_alignment_in_bytes(LogMinObjAlignmentInBytes),
 540   _marking_context(NULL),
 541   _bitmap_size(0),
 542   _bitmap_regions_per_slice(0),
 543   _bitmap_bytes_per_slice(0),
 544   _bitmap_region_special(false),
 545   _aux_bitmap_region_special(false),
 546   _liveness_cache(NULL),
 547   _collection_set(NULL),
 548   _card_scan(NULL)
 549 {
 550 }
 551 
 552 #ifdef _MSC_VER
 553 #pragma warning( pop )
 554 #endif
 555 
 556 void ShenandoahHeap::print_on(outputStream* st) const {
 557   st->print_cr("Shenandoah Heap");
 558   st->print_cr(" " SIZE_FORMAT "%s max, " SIZE_FORMAT "%s soft max, " SIZE_FORMAT "%s committed, " SIZE_FORMAT "%s used",
 559                byte_size_in_proper_unit(max_capacity()), proper_unit_for_byte_size(max_capacity()),
 560                byte_size_in_proper_unit(soft_max_capacity()), proper_unit_for_byte_size(soft_max_capacity()),
 561                byte_size_in_proper_unit(committed()),    proper_unit_for_byte_size(committed()),
 562                byte_size_in_proper_unit(used()),         proper_unit_for_byte_size(used()));
 563   st->print_cr(" " SIZE_FORMAT " x " SIZE_FORMAT"%s regions",
 564                num_regions(),
 565                byte_size_in_proper_unit(ShenandoahHeapRegion::region_size_bytes()),
 566                proper_unit_for_byte_size(ShenandoahHeapRegion::region_size_bytes()));
 567 
 568   st->print("Status: ");
 569   if (has_forwarded_objects())                 st->print("has forwarded objects, ");
 570   if (is_concurrent_old_mark_in_progress())    st->print("old marking, ");
 571   if (is_concurrent_young_mark_in_progress())  st->print("young marking, ");
 572   if (is_evacuation_in_progress())             st->print("evacuating, ");
 573   if (is_update_refs_in_progress())            st->print("updating refs, ");
 574   if (is_degenerated_gc_in_progress())         st->print("degenerated gc, ");
 575   if (is_full_gc_in_progress())                st->print("full gc, ");
 576   if (is_full_gc_move_in_progress())           st->print("full gc move, ");
 577   if (is_concurrent_weak_root_in_progress())   st->print("concurrent weak roots, ");
 578   if (is_concurrent_strong_root_in_progress() &&
 579       !is_concurrent_weak_root_in_progress())  st->print("concurrent strong roots, ");
 580 
 581   if (cancelled_gc()) {
 582     st->print("cancelled");
 583   } else {
 584     st->print("not cancelled");
 585   }
 586   st->cr();
 587 
 588   st->print_cr("Reserved region:");
 589   st->print_cr(" - [" PTR_FORMAT ", " PTR_FORMAT ") ",
 590                p2i(reserved_region().start()),
 591                p2i(reserved_region().end()));
 592 
 593   ShenandoahCollectionSet* cset = collection_set();
 594   st->print_cr("Collection set:");
 595   if (cset != NULL) {
 596     st->print_cr(" - map (vanilla): " PTR_FORMAT, p2i(cset->map_address()));
 597     st->print_cr(" - map (biased):  " PTR_FORMAT, p2i(cset->biased_map_address()));
 598   } else {
 599     st->print_cr(" (NULL)");
 600   }
 601 
 602   st->cr();
 603   MetaspaceUtils::print_on(st);
 604 
 605   if (Verbose) {
 606     print_heap_regions_on(st);
 607   }
 608 }
 609 
 610 class ShenandoahInitWorkerGCLABClosure : public ThreadClosure {
 611 public:
 612   void do_thread(Thread* thread) {
 613     assert(thread != NULL, "Sanity");
 614     assert(thread->is_Worker_thread(), "Only worker thread expected");
 615     ShenandoahThreadLocalData::initialize_gclab(thread);
 616   }
 617 };
 618 
 619 void ShenandoahHeap::post_initialize() {
 620   CollectedHeap::post_initialize();
 621   MutexLocker ml(Threads_lock);
 622 
 623   ShenandoahInitWorkerGCLABClosure init_gclabs;
 624   _workers->threads_do(&init_gclabs);
 625 
 626   // gclab can not be initialized early during VM startup, as it can not determinate its max_size.
 627   // Now, we will let WorkerThreads to initialize gclab when new worker is created.
 628   _workers->set_initialize_gclab();
 629   if (_safepoint_workers != NULL) {
 630     _safepoint_workers->threads_do(&init_gclabs);
 631     _safepoint_workers->set_initialize_gclab();
 632   }
 633 
 634   JFR_ONLY(ShenandoahJFRSupport::register_jfr_type_serializers());
 635 }
 636 
 637 
 638 ShenandoahOldHeuristics* ShenandoahHeap::old_heuristics() {
 639   return (ShenandoahOldHeuristics*) _old_generation->heuristics();
 640 }
 641 
 642 bool ShenandoahHeap::doing_mixed_evacuations() {
 643   return old_heuristics()->unprocessed_old_collection_candidates() > 0;
 644 }
 645 
 646 bool ShenandoahHeap::is_old_bitmap_stable() const {
 647   ShenandoahOldGeneration::State state = _old_generation->state();
 648   return state != ShenandoahOldGeneration::MARKING
 649       && state != ShenandoahOldGeneration::BOOTSTRAPPING;
 650 }
 651 
 652 bool ShenandoahHeap::is_gc_generation_young() const {
 653   return _gc_generation != NULL && _gc_generation->generation_mode() == YOUNG;
 654 }
 655 
 656 // There are three JVM parameters for setting young gen capacity:
 657 //    NewSize, MaxNewSize, NewRatio.
 658 //
 659 // If only NewSize is set, it assigns a fixed size and the other two parameters are ignored.
 660 // Otherwise NewRatio applies.
 661 //
 662 // If NewSize is set in any combination, it provides a lower bound.
 663 //
 664 // If MaxNewSize is set it provides an upper bound.
 665 // If this bound is smaller than NewSize, it supersedes,
 666 // resulting in a fixed size given by MaxNewSize.
 667 size_t ShenandoahHeap::young_generation_capacity(size_t capacity) {
 668   if (strcmp(ShenandoahGCMode, "generational") == 0) {
 669     if (FLAG_IS_CMDLINE(NewSize) && !FLAG_IS_CMDLINE(MaxNewSize) && !FLAG_IS_CMDLINE(NewRatio)) {
 670       capacity = MIN2(NewSize, capacity);
 671     } else {
 672       capacity /= NewRatio + 1;
 673       if (FLAG_IS_CMDLINE(NewSize)) {
 674         capacity = MAX2(NewSize, capacity);
 675       }
 676       if (FLAG_IS_CMDLINE(MaxNewSize)) {
 677         capacity = MIN2(MaxNewSize, capacity);
 678       }
 679     }
 680   }
 681   // else, make no adjustment to global capacity
 682   return capacity;
 683 }
 684 
 685 size_t ShenandoahHeap::used() const {
 686   return Atomic::load(&_used);
 687 }
 688 
 689 size_t ShenandoahHeap::committed() const {
 690   return Atomic::load(&_committed);
 691 }
 692 
 693 void ShenandoahHeap::increase_committed(size_t bytes) {
 694   shenandoah_assert_heaplocked_or_safepoint();
 695   _committed += bytes;
 696 }
 697 
 698 void ShenandoahHeap::decrease_committed(size_t bytes) {
 699   shenandoah_assert_heaplocked_or_safepoint();
 700   _committed -= bytes;
 701 }
 702 
 703 void ShenandoahHeap::increase_used(size_t bytes) {
 704   Atomic::add(&_used, bytes, memory_order_relaxed);
 705 }
 706 
 707 void ShenandoahHeap::set_used(size_t bytes) {
 708   Atomic::store(&_used, bytes);
 709 }
 710 
 711 void ShenandoahHeap::decrease_used(size_t bytes) {
 712   assert(used() >= bytes, "never decrease heap size by more than we've left");
 713   Atomic::sub(&_used, bytes, memory_order_relaxed);
 714 }
 715 
 716 void ShenandoahHeap::notify_mutator_alloc_words(size_t words, bool waste) {
 717   size_t bytes = words * HeapWordSize;
 718   if (!waste) {
 719     increase_used(bytes);
 720   }
 721 
 722   if (ShenandoahPacing) {
 723     control_thread()->pacing_notify_alloc(words);
 724     if (waste) {
 725       pacer()->claim_for_alloc(words, true);
 726     }
 727   }
 728 }
 729 
 730 size_t ShenandoahHeap::capacity() const {
 731   return committed();
 732 }
 733 
 734 size_t ShenandoahHeap::max_capacity() const {
 735   return _num_regions * ShenandoahHeapRegion::region_size_bytes();
 736 }
 737 
 738 size_t ShenandoahHeap::soft_max_capacity() const {
 739   size_t v = Atomic::load(&_soft_max_size);
 740   assert(min_capacity() <= v && v <= max_capacity(),
 741          "Should be in bounds: " SIZE_FORMAT " <= " SIZE_FORMAT " <= " SIZE_FORMAT,
 742          min_capacity(), v, max_capacity());
 743   return v;
 744 }
 745 
 746 void ShenandoahHeap::set_soft_max_capacity(size_t v) {
 747   assert(min_capacity() <= v && v <= max_capacity(),
 748          "Should be in bounds: " SIZE_FORMAT " <= " SIZE_FORMAT " <= " SIZE_FORMAT,
 749          min_capacity(), v, max_capacity());
 750   Atomic::store(&_soft_max_size, v);
 751 
 752   if (mode()->is_generational()) {
 753     size_t soft_max_capacity_young = young_generation_capacity(_soft_max_size);
 754     size_t soft_max_capacity_old = _soft_max_size - soft_max_capacity_young;
 755     _young_generation->set_soft_max_capacity(soft_max_capacity_young);
 756     _old_generation->set_soft_max_capacity(soft_max_capacity_old);
 757   }
 758 }
 759 
 760 size_t ShenandoahHeap::min_capacity() const {
 761   return _minimum_size;
 762 }
 763 
 764 size_t ShenandoahHeap::initial_capacity() const {
 765   return _initial_size;
 766 }
 767 
 768 bool ShenandoahHeap::is_in(const void* p) const {
 769   HeapWord* heap_base = (HeapWord*) base();
 770   HeapWord* last_region_end = heap_base + ShenandoahHeapRegion::region_size_words() * num_regions();
 771   return p >= heap_base && p < last_region_end;
 772 }
 773 
 774 bool ShenandoahHeap::is_in_young(const void* p) const {
 775   return is_in(p) && heap_region_containing(p)->affiliation() == ShenandoahRegionAffiliation::YOUNG_GENERATION;
 776 }
 777 
 778 bool ShenandoahHeap::is_in_old(const void* p) const {
 779   return is_in(p) && heap_region_containing(p)->affiliation() == ShenandoahRegionAffiliation::OLD_GENERATION;
 780 }
 781 
 782 bool ShenandoahHeap::is_in_active_generation(oop obj) const {
 783   if (!mode()->is_generational()) {
 784     // everything is the same single generation
 785     return true;
 786   }
 787 
 788   if (active_generation() == NULL) {
 789     // no collection is happening, only expect this to be called
 790     // when concurrent processing is active, but that could change
 791     return false;
 792   }
 793 
 794   return active_generation()->contains(obj);
 795 }
 796 
 797 void ShenandoahHeap::op_uncommit(double shrink_before, size_t shrink_until) {
 798   assert (ShenandoahUncommit, "should be enabled");
 799 
 800   // Application allocates from the beginning of the heap, and GC allocates at
 801   // the end of it. It is more efficient to uncommit from the end, so that applications
 802   // could enjoy the near committed regions. GC allocations are much less frequent,
 803   // and therefore can accept the committing costs.
 804 
 805   size_t count = 0;
 806   for (size_t i = num_regions(); i > 0; i--) { // care about size_t underflow
 807     ShenandoahHeapRegion* r = get_region(i - 1);
 808     if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
 809       ShenandoahHeapLocker locker(lock());
 810       if (r->is_empty_committed()) {
 811         if (committed() < shrink_until + ShenandoahHeapRegion::region_size_bytes()) {
 812           break;
 813         }
 814 
 815         r->make_uncommitted();
 816         count++;
 817       }
 818     }
 819     SpinPause(); // allow allocators to take the lock
 820   }
 821 
 822   if (count > 0) {
 823     control_thread()->notify_heap_changed();
 824     regulator_thread()->notify_heap_changed();
 825   }
 826 }
 827 
 828 void ShenandoahHeap::handle_old_evacuation(HeapWord* obj, size_t words, bool promotion) {
 829   // Only register the copy of the object that won the evacuation race.
 830   card_scan()->register_object_wo_lock(obj);
 831 
 832   // Mark the entire range of the evacuated object as dirty.  At next remembered set scan,
 833   // we will clear dirty bits that do not hold interesting pointers.  It's more efficient to
 834   // do this in batch, in a background GC thread than to try to carefully dirty only cards
 835   // that hold interesting pointers right now.
 836   card_scan()->mark_range_as_dirty(obj, words);
 837 
 838   if (promotion) {
 839     // This evacuation was a promotion, track this as allocation against old gen
 840     old_generation()->increase_allocated(words * HeapWordSize);
 841   }
 842 }
 843 
 844 void ShenandoahHeap::handle_old_evacuation_failure() {
 845   if (_old_gen_oom_evac.try_set()) {
 846     log_info(gc)("Old gen evac failure.");
 847   }
 848 }
 849 
 850 void ShenandoahHeap::handle_promotion_failure() {
 851   old_heuristics()->handle_promotion_failure();
 852 }
 853 
 854 HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) {
 855   // New object should fit the GCLAB size
 856   size_t min_size = MAX2(size, PLAB::min_size());
 857 
 858   // Figure out size of new GCLAB, looking back at heuristics. Expand aggressively.
 859   size_t new_size = ShenandoahThreadLocalData::gclab_size(thread) * 2;
 860 
 861   // Limit growth of GCLABs to ShenandoahMaxEvacLABRatio * the minimum size.  This enables more equitable distribution of
 862   // available evacuation buidget between the many threads that are coordinating in the evacuation effort.
 863   if (ShenandoahMaxEvacLABRatio > 0) {
 864     new_size = MIN2(new_size, PLAB::min_size() * ShenandoahMaxEvacLABRatio);
 865   }
 866   new_size = MIN2(new_size, PLAB::max_size());
 867   new_size = MAX2(new_size, PLAB::min_size());
 868 
 869   // Record new heuristic value even if we take any shortcut. This captures
 870   // the case when moderately-sized objects always take a shortcut. At some point,
 871   // heuristics should catch up with them.
 872   ShenandoahThreadLocalData::set_gclab_size(thread, new_size);
 873 
 874   if (new_size < size) {
 875     // New size still does not fit the object. Fall back to shared allocation.
 876     // This avoids retiring perfectly good GCLABs, when we encounter a large object.
 877     return NULL;
 878   }
 879 
 880   // Retire current GCLAB, and allocate a new one.
 881   PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
 882   gclab->retire();
 883 
 884   size_t actual_size = 0;
 885   HeapWord* gclab_buf = allocate_new_gclab(min_size, new_size, &actual_size);
 886   if (gclab_buf == NULL) {
 887     return NULL;
 888   }
 889 
 890   assert (size <= actual_size, "allocation should fit");
 891 
 892   if (ZeroTLAB) {
 893     // ..and clear it.
 894     Copy::zero_to_words(gclab_buf, actual_size);
 895   } else {
 896     // ...and zap just allocated object.
 897 #ifdef ASSERT
 898     // Skip mangling the space corresponding to the object header to
 899     // ensure that the returned space is not considered parsable by
 900     // any concurrent GC thread.
 901     size_t hdr_size = oopDesc::header_size();
 902     Copy::fill_to_words(gclab_buf + hdr_size, actual_size - hdr_size, badHeapWordVal);
 903 #endif // ASSERT
 904   }
 905   gclab->set_buf(gclab_buf, actual_size);
 906   return gclab->allocate(size);
 907 }
 908 
 909 // Establish a new PLAB and allocate size HeapWords within it.
 910 HeapWord* ShenandoahHeap::allocate_from_plab_slow(Thread* thread, size_t size, bool is_promotion) {
 911   // New object should fit the PLAB size
 912   size_t min_size = MAX2(size, PLAB::min_size());
 913 
 914   // Figure out size of new PLAB, looking back at heuristics. Expand aggressively.
 915   size_t cur_size = ShenandoahThreadLocalData::plab_size(thread);
 916   if (cur_size == 0) {
 917     cur_size = PLAB::min_size();
 918   }
 919   size_t future_size = cur_size * 2;
 920   // Limit growth of PLABs to ShenandoahMaxEvacLABRatio * the minimum size.  This enables more equitable distribution of
 921   // available evacuation buidget between the many threads that are coordinating in the evacuation effort.
 922   if (ShenandoahMaxEvacLABRatio > 0) {
 923     future_size = MIN2(future_size, PLAB::min_size() * ShenandoahMaxEvacLABRatio);
 924   }
 925   future_size = MIN2(future_size, PLAB::max_size());
 926   future_size = MAX2(future_size, PLAB::min_size());
 927 
 928   size_t unalignment = future_size % CardTable::card_size_in_words();
 929   if (unalignment != 0) {
 930     future_size = future_size - unalignment + CardTable::card_size_in_words();
 931   }
 932 
 933   // Record new heuristic value even if we take any shortcut. This captures
 934   // the case when moderately-sized objects always take a shortcut. At some point,
 935   // heuristics should catch up with them.  Note that the requested cur_size may
 936   // not be honored, but we remember that this is the preferred size.
 937   ShenandoahThreadLocalData::set_plab_size(thread, future_size);
 938   if (cur_size < size) {
 939     // The PLAB to be allocated is still not large enough to hold the object. Fall back to shared allocation.
 940     // This avoids retiring perfectly good PLABs in order to represent a single large object allocation.
 941     return nullptr;
 942   }
 943 
 944   // Retire current PLAB, and allocate a new one.
 945   PLAB* plab = ShenandoahThreadLocalData::plab(thread);
 946   if (plab->words_remaining() < PLAB::min_size()) {
 947     // Retire current PLAB, and allocate a new one.
 948     // CAUTION: retire_plab may register the remnant filler object with the remembered set scanner without a lock.  This
 949     // is safe iff it is assured that each PLAB is a whole-number multiple of card-mark memory size and each PLAB is
 950     // aligned with the start of a card's memory range.
 951 
 952     retire_plab(plab, thread);
 953 
 954     size_t actual_size = 0;
 955     // allocate_new_plab resets plab_evacuated and plab_promoted and disables promotions if old-gen available is
 956     // less than the remaining evacuation need.  It also adjusts plab_preallocated and expend_promoted if appropriate.
 957     HeapWord* plab_buf = allocate_new_plab(min_size, cur_size, &actual_size);
 958     if (plab_buf == NULL) {
 959       return NULL;
 960     } else {
 961       ShenandoahThreadLocalData::enable_plab_retries(thread);
 962     }
 963     assert (size <= actual_size, "allocation should fit");
 964     if (ZeroTLAB) {
 965       // ..and clear it.
 966       Copy::zero_to_words(plab_buf, actual_size);
 967     } else {
 968       // ...and zap just allocated object.
 969 #ifdef ASSERT
 970       // Skip mangling the space corresponding to the object header to
 971       // ensure that the returned space is not considered parsable by
 972       // any concurrent GC thread.
 973       size_t hdr_size = oopDesc::header_size();
 974       Copy::fill_to_words(plab_buf + hdr_size, actual_size - hdr_size, badHeapWordVal);
 975 #endif // ASSERT
 976     }
 977     plab->set_buf(plab_buf, actual_size);
 978 
 979     if (is_promotion && !ShenandoahThreadLocalData::allow_plab_promotions(thread)) {
 980       return nullptr;
 981     }
 982     return plab->allocate(size);
 983   } else {
 984     // If there's still at least min_size() words available within the current plab, don't retire it.  Let's gnaw
 985     // away on this plab as long as we can.  Meanwhile, return nullptr to force this particular allocation request
 986     // to be satisfied with a shared allocation.  By packing more promotions into the previously allocated PLAB, we
 987     // reduce the likelihood of evacuation failures, and we we reduce the need for downsizing our PLABs.
 988     return nullptr;
 989   }
 990 }
 991 
 992 // TODO: It is probably most efficient to register all objects (both promotions and evacuations) that were allocated within
 993 // this plab at the time we retire the plab.  A tight registration loop will run within both code and data caches.  This change
 994 // would allow smaller and faster in-line implementation of alloc_from_plab().  Since plabs are aligned on card-table boundaries,
 995 // this object registration loop can be performed without acquiring a lock.
 996 void ShenandoahHeap::retire_plab(PLAB* plab, Thread* thread) {
 997   // We don't enforce limits on plab_evacuated.  We let it consume all available old-gen memory in order to reduce
 998   // probability of an evacuation failure.  We do enforce limits on promotion, to make sure that excessive promotion
 999   // does not result in an old-gen evacuation failure.  Note that a failed promotion is relatively harmless.  Any
1000   // object that fails to promote in the current cycle will be eligible for promotion in a subsequent cycle.
1001 
1002   // When the plab was instantiated, its entirety was treated as if the entire buffer was going to be dedicated to
1003   // promotions.  Now that we are retiring the buffer, we adjust for the reality that the plab is not entirely promotions.
1004   //  1. Some of the plab may have been dedicated to evacuations.
1005   //  2. Some of the plab may have been abandoned due to waste (at the end of the plab).
1006   size_t not_promoted =
1007     ShenandoahThreadLocalData::get_plab_preallocated_promoted(thread) - ShenandoahThreadLocalData::get_plab_promoted(thread);
1008   ShenandoahThreadLocalData::reset_plab_promoted(thread);
1009   ShenandoahThreadLocalData::reset_plab_evacuated(thread);
1010   ShenandoahThreadLocalData::set_plab_preallocated_promoted(thread, 0);
1011   if (not_promoted > 0) {
1012     unexpend_promoted(not_promoted);
1013   }
1014   size_t waste = plab->waste();
1015   HeapWord* top = plab->top();
1016   plab->retire();
1017   if (top != NULL && plab->waste() > waste && is_in_old(top)) {
1018     // If retiring the plab created a filler object, then we
1019     // need to register it with our card scanner so it can
1020     // safely walk the region backing the plab.
1021     log_debug(gc)("retire_plab() is registering remnant of size " SIZE_FORMAT " at " PTR_FORMAT,
1022                   plab->waste() - waste, p2i(top));
1023     card_scan()->register_object_wo_lock(top);
1024   }
1025 }
1026 
1027 void ShenandoahHeap::retire_plab(PLAB* plab) {
1028   Thread* thread = Thread::current();
1029   retire_plab(plab, thread);
1030 }
1031 
1032 void ShenandoahHeap::cancel_old_gc() {
1033   shenandoah_assert_safepoint();
1034   assert(_old_generation != NULL, "Should only have mixed collections in generation mode.");
1035   log_info(gc)("Terminating old gc cycle.");
1036 
1037   // Stop marking
1038   old_generation()->cancel_marking();
1039   // Stop coalescing undead objects
1040   set_prepare_for_old_mark_in_progress(false);
1041   // Stop tracking old regions
1042   old_heuristics()->abandon_collection_candidates();
1043   // Remove old generation access to young generation mark queues
1044   young_generation()->set_old_gen_task_queues(nullptr);
1045   // Transition to IDLE now.
1046   _old_generation->transition_to(ShenandoahOldGeneration::IDLE);
1047 }
1048 
1049 bool ShenandoahHeap::is_old_gc_active() {
1050   return is_concurrent_old_mark_in_progress()
1051          || is_prepare_for_old_mark_in_progress()
1052          || old_heuristics()->unprocessed_old_collection_candidates() > 0
1053          || young_generation()->old_gen_task_queues() != nullptr;
1054 }
1055 
1056 void ShenandoahHeap::coalesce_and_fill_old_regions() {
1057   class ShenandoahGlobalCoalesceAndFill : public ShenandoahHeapRegionClosure {
1058    public:
1059     virtual void heap_region_do(ShenandoahHeapRegion* region) override {
1060       // old region is not in the collection set and was not immediately trashed
1061       if (region->is_old() && region->is_active() && !region->is_humongous()) {
1062         // Reset the coalesce and fill boundary because this is a global collect
1063         // and cannot be preempted by young collects. We want to be sure the entire
1064         // region is coalesced here and does not resume from a previously interrupted
1065         // or completed coalescing.
1066         region->begin_preemptible_coalesce_and_fill();
1067         region->oop_fill_and_coalesce();
1068       }
1069     }
1070 
1071     virtual bool is_thread_safe() override {
1072       return true;
1073     }
1074   };
1075   ShenandoahGlobalCoalesceAndFill coalesce;
1076   parallel_heap_region_iterate(&coalesce);
1077 }
1078 
1079 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t min_size,
1080                                             size_t requested_size,
1081                                             size_t* actual_size) {
1082   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_tlab(min_size, requested_size);
1083   HeapWord* res = allocate_memory(req, false);
1084   if (res != NULL) {
1085     *actual_size = req.actual_size();
1086   } else {
1087     *actual_size = 0;
1088   }
1089   return res;
1090 }
1091 
1092 HeapWord* ShenandoahHeap::allocate_new_gclab(size_t min_size,
1093                                              size_t word_size,
1094                                              size_t* actual_size) {
1095   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_gclab(min_size, word_size);
1096   HeapWord* res = allocate_memory(req, false);
1097   if (res != NULL) {
1098     *actual_size = req.actual_size();
1099   } else {
1100     *actual_size = 0;
1101   }
1102   return res;
1103 }
1104 
1105 HeapWord* ShenandoahHeap::allocate_new_plab(size_t min_size,
1106                                             size_t word_size,
1107                                             size_t* actual_size) {
1108   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_plab(min_size, word_size);
1109   // Note that allocate_memory() sets a thread-local flag to prohibit further promotions by this thread
1110   // if we are at risk of exceeding the old-gen evacuation budget.
1111   HeapWord* res = allocate_memory(req, false);
1112   if (res != NULL) {
1113     *actual_size = req.actual_size();
1114   } else {
1115     *actual_size = 0;
1116   }
1117   return res;
1118 }
1119 
1120 // is_promotion is true iff this allocation is known for sure to hold the result of young-gen evacuation
1121 // to old-gen.  plab allocates arre not known as such, since they may hold old-gen evacuations.
1122 HeapWord* ShenandoahHeap::allocate_memory(ShenandoahAllocRequest& req, bool is_promotion) {
1123   intptr_t pacer_epoch = 0;
1124   bool in_new_region = false;
1125   HeapWord* result = NULL;
1126 
1127   if (req.is_mutator_alloc()) {
1128     if (ShenandoahPacing) {
1129       pacer()->pace_for_alloc(req.size());
1130       pacer_epoch = pacer()->epoch();
1131     }
1132 
1133     if (!ShenandoahAllocFailureALot || !should_inject_alloc_failure()) {
1134       result = allocate_memory_under_lock(req, in_new_region, is_promotion);
1135     }
1136 
1137     // Allocation failed, block until control thread reacted, then retry allocation.
1138     //
1139     // It might happen that one of the threads requesting allocation would unblock
1140     // way later after GC happened, only to fail the second allocation, because
1141     // other threads have already depleted the free storage. In this case, a better
1142     // strategy is to try again, as long as GC makes progress.
1143     //
1144     // Then, we need to make sure the allocation was retried after at least one
1145     // Full GC, which means we want to try more than ShenandoahFullGCThreshold times.
1146 
1147     size_t tries = 0;
1148 
1149     while (result == NULL && _progress_last_gc.is_set()) {
1150       tries++;
1151       control_thread()->handle_alloc_failure(req);
1152       result = allocate_memory_under_lock(req, in_new_region, is_promotion);
1153     }
1154 
1155     while (result == NULL && tries <= ShenandoahFullGCThreshold) {
1156       tries++;
1157       control_thread()->handle_alloc_failure(req);
1158       result = allocate_memory_under_lock(req, in_new_region, is_promotion);
1159     }
1160 
1161   } else {
1162     assert(req.is_gc_alloc(), "Can only accept GC allocs here");
1163     result = allocate_memory_under_lock(req, in_new_region, is_promotion);
1164     // Do not call handle_alloc_failure() here, because we cannot block.
1165     // The allocation failure would be handled by the LRB slowpath with handle_alloc_failure_evac().
1166   }
1167 
1168   if (in_new_region) {
1169     control_thread()->notify_heap_changed();
1170     regulator_thread()->notify_heap_changed();
1171   }
1172 
1173   if (result != NULL) {
1174     ShenandoahGeneration* alloc_generation = generation_for(req.affiliation());
1175     size_t requested = req.size();
1176     size_t actual = req.actual_size();
1177     size_t actual_bytes = actual * HeapWordSize;
1178 
1179     assert (req.is_lab_alloc() || (requested == actual),
1180             "Only LAB allocations are elastic: %s, requested = " SIZE_FORMAT ", actual = " SIZE_FORMAT,
1181             ShenandoahAllocRequest::alloc_type_to_string(req.type()), requested, actual);
1182 
1183     if (req.is_mutator_alloc()) {
1184       notify_mutator_alloc_words(actual, false);
1185       alloc_generation->increase_allocated(actual_bytes);
1186 
1187       // If we requested more than we were granted, give the rest back to pacer.
1188       // This only matters if we are in the same pacing epoch: do not try to unpace
1189       // over the budget for the other phase.
1190       if (ShenandoahPacing && (pacer_epoch > 0) && (requested > actual)) {
1191         pacer()->unpace_for_alloc(pacer_epoch, requested - actual);
1192       }
1193     } else {
1194       increase_used(actual_bytes);
1195     }
1196   }
1197 
1198   return result;
1199 }
1200 
1201 HeapWord* ShenandoahHeap::allocate_memory_under_lock(ShenandoahAllocRequest& req, bool& in_new_region, bool is_promotion) {
1202   // promotion_eligible pertains only to PLAB allocations, denoting that the PLAB is allowed to allocate for promotions.
1203   bool promotion_eligible = false;
1204   bool allow_allocation = true;
1205   bool plab_alloc = false;
1206   size_t requested_bytes = req.size() * HeapWordSize;
1207   HeapWord* result = nullptr;
1208   ShenandoahHeapLocker locker(lock());
1209   Thread* thread = Thread::current();
1210   if (mode()->is_generational()) {
1211     if (req.affiliation() == YOUNG_GENERATION) {
1212       if (req.is_mutator_alloc()) {
1213         if (requested_bytes >= young_generation()->adjusted_available()) {
1214           // We know this is not a GCLAB.  This must be a TLAB or a shared allocation.  Reject the allocation request if
1215           // exceeds established capacity limits.
1216           return nullptr;
1217         }
1218       }
1219     } else {                    // reg.affiliation() == OLD_GENERATION
1220       assert(req.type() != ShenandoahAllocRequest::_alloc_gclab, "GCLAB pertains only to young-gen memory");
1221       if (req.type() ==  ShenandoahAllocRequest::_alloc_plab) {
1222         plab_alloc = true;
1223         size_t promotion_avail = get_promoted_reserve();
1224         size_t promotion_expended = get_promoted_expended();
1225         if (promotion_expended + requested_bytes > promotion_avail) {
1226           promotion_avail = 0;
1227           if (get_old_evac_reserve() == 0) {
1228             // There are no old-gen evacuations in this pass.  There's no value in creating a plab that cannot
1229             // be used for promotions.
1230             allow_allocation = false;
1231           }
1232         } else {
1233           promotion_avail = promotion_avail - (promotion_expended + requested_bytes);
1234           promotion_eligible = true;
1235         }
1236       } else if (is_promotion) {
1237         // This is a shared alloc for promotion
1238         size_t promotion_avail = get_promoted_reserve();
1239         size_t promotion_expended = get_promoted_expended();
1240         if (promotion_expended + requested_bytes > promotion_avail) {
1241           promotion_avail = 0;
1242         } else {
1243           promotion_avail = promotion_avail - (promotion_expended + requested_bytes);
1244         }
1245 
1246         if (promotion_avail == 0) {
1247           // We need to reserve the remaining memory for evacuation.  Reject this allocation.  The object will be
1248           // evacuated to young-gen memory and promoted during a future GC pass.
1249           return nullptr;
1250         }
1251         // Else, we'll allow the allocation to proceed.  (Since we hold heap lock, the tested condition remains true.)
1252       } else {
1253         // This is a shared allocation for evacuation.  Memory has already been reserved for this purpose.
1254       }
1255     }
1256   }
1257   result = (allow_allocation)? _free_set->allocate(req, in_new_region): nullptr;
1258   if (result != NULL) {
1259     if (req.affiliation() == ShenandoahRegionAffiliation::OLD_GENERATION) {
1260       ShenandoahThreadLocalData::reset_plab_promoted(thread);
1261       if (req.is_gc_alloc()) {
1262         if (req.type() ==  ShenandoahAllocRequest::_alloc_plab) {
1263           if (promotion_eligible) {
1264             size_t actual_size = req.actual_size() * HeapWordSize;
1265             // Assume the entirety of this PLAB will be used for promotion.  This prevents promotion from overreach.
1266             // When we retire this plab, we'll unexpend what we don't really use.
1267             ShenandoahThreadLocalData::enable_plab_promotions(thread);
1268             expend_promoted(actual_size);
1269             assert(get_promoted_expended() <= get_promoted_reserve(), "Do not expend more promotion than budgeted");
1270             ShenandoahThreadLocalData::set_plab_preallocated_promoted(thread, actual_size);
1271           } else {
1272             // Disable promotions in this thread because entirety of this PLAB must be available to hold old-gen evacuations.
1273             ShenandoahThreadLocalData::disable_plab_promotions(thread);
1274             ShenandoahThreadLocalData::set_plab_preallocated_promoted(thread, 0);
1275           }
1276         } else if (is_promotion) {
1277           // Shared promotion.  Assume size is requested_bytes.
1278           expend_promoted(requested_bytes);
1279           assert(get_promoted_expended() <= get_promoted_reserve(), "Do not expend more promotion than budgeted");
1280         }
1281       }
1282 
1283       // Register the newly allocated object while we're holding the global lock since there's no synchronization
1284       // built in to the implementation of register_object().  There are potential races when multiple independent
1285       // threads are allocating objects, some of which might span the same card region.  For example, consider
1286       // a card table's memory region within which three objects are being allocated by three different threads:
1287       //
1288       // objects being "concurrently" allocated:
1289       //    [-----a------][-----b-----][--------------c------------------]
1290       //            [---- card table memory range --------------]
1291       //
1292       // Before any objects are allocated, this card's memory range holds no objects.  Note that:
1293       //   allocation of object a wants to set the has-object, first-start, and last-start attributes of the preceding card region.
1294       //   allocation of object b wants to set the has-object, first-start, and last-start attributes of this card region.
1295       //   allocation of object c also wants to set the has-object, first-start, and last-start attributes of this card region.
1296       //
1297       // The thread allocating b and the thread allocating c can "race" in various ways, resulting in confusion, such as last-start
1298       // representing object b while first-start represents object c.  This is why we need to require all register_object()
1299       // invocations to be "mutually exclusive" with respect to each card's memory range.
1300       ShenandoahHeap::heap()->card_scan()->register_object(result);
1301     }
1302   } else {
1303     // The allocation failed.  If this was a plab allocation, We've already retired it and no longer have a plab.
1304     if ((req.affiliation() == ShenandoahRegionAffiliation::OLD_GENERATION) && req.is_gc_alloc() &&
1305         (req.type() == ShenandoahAllocRequest::_alloc_plab)) {
1306       // We don't need to disable PLAB promotions because there is no PLAB.  We leave promotions enabled because
1307       // this allows the surrounding infrastructure to retry alloc_plab_slow() with a smaller PLAB size.
1308       ShenandoahThreadLocalData::set_plab_preallocated_promoted(thread, 0);
1309     }
1310   }
1311   return result;
1312 }
1313 
1314 HeapWord* ShenandoahHeap::mem_allocate(size_t size,
1315                                         bool*  gc_overhead_limit_was_exceeded) {
1316   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared(size);
1317   return allocate_memory(req, false);
1318 }
1319 
1320 MetaWord* ShenandoahHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
1321                                                              size_t size,
1322                                                              Metaspace::MetadataType mdtype) {
1323   MetaWord* result;
1324 
1325   // Inform metaspace OOM to GC heuristics if class unloading is possible.
1326   ShenandoahHeuristics* h = global_generation()->heuristics();
1327   if (h->can_unload_classes()) {
1328     h->record_metaspace_oom();
1329   }
1330 
1331   // Expand and retry allocation
1332   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
1333   if (result != NULL) {
1334     return result;
1335   }
1336 
1337   // Start full GC
1338   collect(GCCause::_metadata_GC_clear_soft_refs);
1339 
1340   // Retry allocation
1341   result = loader_data->metaspace_non_null()->allocate(size, mdtype);
1342   if (result != NULL) {
1343     return result;
1344   }
1345 
1346   // Expand and retry allocation
1347   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
1348   if (result != NULL) {
1349     return result;
1350   }
1351 
1352   // Out of memory
1353   return NULL;
1354 }
1355 
1356 class ShenandoahConcurrentEvacuateRegionObjectClosure : public ObjectClosure {
1357 private:
1358   ShenandoahHeap* const _heap;
1359   Thread* const _thread;
1360 public:
1361   ShenandoahConcurrentEvacuateRegionObjectClosure(ShenandoahHeap* heap) :
1362     _heap(heap), _thread(Thread::current()) {}
1363 
1364   void do_object(oop p) {
1365     shenandoah_assert_marked(NULL, p);
1366     if (!p->is_forwarded()) {
1367       _heap->evacuate_object(p, _thread);
1368     }
1369   }
1370 };
1371 
1372 class ShenandoahEvacuationTask : public WorkerTask {
1373 private:
1374   ShenandoahHeap* const _sh;
1375   ShenandoahCollectionSet* const _cs;
1376   bool _concurrent;
1377 public:
1378   ShenandoahEvacuationTask(ShenandoahHeap* sh,
1379                            ShenandoahCollectionSet* cs,
1380                            bool concurrent) :
1381     WorkerTask("Shenandoah Evacuation"),
1382     _sh(sh),
1383     _cs(cs),
1384     _concurrent(concurrent)
1385   {}
1386 
1387   void work(uint worker_id) {
1388     if (_concurrent) {
1389       ShenandoahConcurrentWorkerSession worker_session(worker_id);
1390       ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
1391       ShenandoahEvacOOMScope oom_evac_scope;
1392       do_work();
1393     } else {
1394       ShenandoahParallelWorkerSession worker_session(worker_id);
1395       ShenandoahEvacOOMScope oom_evac_scope;
1396       do_work();
1397     }
1398   }
1399 
1400 private:
1401   void do_work() {
1402     ShenandoahConcurrentEvacuateRegionObjectClosure cl(_sh);
1403     ShenandoahHeapRegion* r;
1404     while ((r =_cs->claim_next()) != NULL) {
1405       assert(r->has_live(), "Region " SIZE_FORMAT " should have been reclaimed early", r->index());
1406 
1407       _sh->marked_object_iterate(r, &cl);
1408 
1409       if (ShenandoahPacing) {
1410         _sh->pacer()->report_evac(r->used() >> LogHeapWordSize);
1411       }
1412       if (_sh->check_cancelled_gc_and_yield(_concurrent)) {
1413         break;
1414       }
1415     }
1416   }
1417 };
1418 
1419 // Unlike ShenandoahEvacuationTask, this iterates over all regions rather than just the collection set.
1420 // This is needed in order to promote humongous start regions if age() >= tenure threshold.
1421 class ShenandoahGenerationalEvacuationTask : public WorkerTask {
1422 private:
1423   ShenandoahHeap* const _sh;
1424   ShenandoahRegionIterator *_regions;
1425   bool _concurrent;
1426 public:
1427   ShenandoahGenerationalEvacuationTask(ShenandoahHeap* sh,
1428                                        ShenandoahRegionIterator* iterator,
1429                                        bool concurrent) :
1430     WorkerTask("Shenandoah Evacuation"),
1431     _sh(sh),
1432     _regions(iterator),
1433     _concurrent(concurrent)
1434   {}
1435 
1436   void work(uint worker_id) {
1437     if (_concurrent) {
1438       ShenandoahConcurrentWorkerSession worker_session(worker_id);
1439       ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
1440       ShenandoahEvacOOMScope oom_evac_scope;
1441       do_work();
1442     } else {
1443       ShenandoahParallelWorkerSession worker_session(worker_id);
1444       ShenandoahEvacOOMScope oom_evac_scope;
1445       do_work();
1446     }
1447   }
1448 
1449 private:
1450   void do_work() {
1451     ShenandoahConcurrentEvacuateRegionObjectClosure cl(_sh);
1452     ShenandoahHeapRegion* r;
1453     while ((r = _regions->next()) != nullptr) {
1454       log_debug(gc)("GenerationalEvacuationTask do_work(), looking at %s region " SIZE_FORMAT ", (age: %d) [%s, %s]",
1455                     r->is_old()? "old": r->is_young()? "young": "free", r->index(), r->age(),
1456                     r->is_active()? "active": "inactive",
1457                     r->is_humongous()? (r->is_humongous_start()? "humongous_start": "humongous_continuation"): "regular");
1458       if (r->is_cset()) {
1459         assert(r->has_live(), "Region " SIZE_FORMAT " should have been reclaimed early", r->index());
1460         _sh->marked_object_iterate(r, &cl);
1461         if (ShenandoahPacing) {
1462           _sh->pacer()->report_evac(r->used() >> LogHeapWordSize);
1463         }
1464       } else if (r->is_young() && r->is_active() && r->is_humongous_start() && (r->age() > InitialTenuringThreshold)) {
1465         // We promote humongous_start regions along with their affiliated continuations during evacuation rather than
1466         // doing this work during a safepoint.  We cannot put humongous regions into the collection set because that
1467         // triggers the load-reference barrier (LRB) to copy on reference fetch.
1468         r->promote_humongous();
1469       }
1470       // else, region is free, or OLD, or not in collection set, or humongous_continuation,
1471       // or is young humongous_start that is too young to be promoted
1472 
1473       if (_sh->check_cancelled_gc_and_yield(_concurrent)) {
1474         break;
1475       }
1476     }
1477   }
1478 };
1479 
1480 void ShenandoahHeap::evacuate_collection_set(bool concurrent) {
1481   if (ShenandoahHeap::heap()->mode()->is_generational()) {
1482     ShenandoahRegionIterator regions;
1483     ShenandoahGenerationalEvacuationTask task(this, &regions, concurrent);
1484     workers()->run_task(&task);
1485   } else {
1486     ShenandoahEvacuationTask task(this, _collection_set, concurrent);
1487     workers()->run_task(&task);
1488   }
1489 }
1490 
1491 void ShenandoahHeap::trash_cset_regions() {
1492   ShenandoahHeapLocker locker(lock());
1493 
1494   ShenandoahCollectionSet* set = collection_set();
1495   ShenandoahHeapRegion* r;
1496   set->clear_current_index();
1497   while ((r = set->next()) != NULL) {
1498     r->make_trash();
1499   }
1500   collection_set()->clear();
1501 }
1502 
1503 void ShenandoahHeap::print_heap_regions_on(outputStream* st) const {
1504   st->print_cr("Heap Regions:");
1505   st->print_cr("EU=empty-uncommitted, EC=empty-committed, R=regular, H=humongous start, HC=humongous continuation, CS=collection set, T=trash, P=pinned");
1506   st->print_cr("BTE=bottom/top/end, U=used, T=TLAB allocs, G=GCLAB allocs, S=shared allocs, L=live data");
1507   st->print_cr("R=root, CP=critical pins, TAMS=top-at-mark-start, UWM=update watermark");
1508   st->print_cr("SN=alloc sequence number");
1509 
1510   for (size_t i = 0; i < num_regions(); i++) {
1511     get_region(i)->print_on(st);
1512   }
1513 }
1514 
1515 size_t ShenandoahHeap::trash_humongous_region_at(ShenandoahHeapRegion* start) {
1516   assert(start->is_humongous_start(), "reclaim regions starting with the first one");
1517 
1518   oop humongous_obj = cast_to_oop(start->bottom());
1519   size_t size = humongous_obj->size();
1520   size_t required_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize);
1521   size_t index = start->index() + required_regions - 1;
1522 
1523   assert(!start->has_live(), "liveness must be zero");
1524 
1525   for(size_t i = 0; i < required_regions; i++) {
1526     // Reclaim from tail. Otherwise, assertion fails when printing region to trace log,
1527     // as it expects that every region belongs to a humongous region starting with a humongous start region.
1528     ShenandoahHeapRegion* region = get_region(index --);
1529 
1530     assert(region->is_humongous(), "expect correct humongous start or continuation");
1531     assert(!region->is_cset(), "Humongous region should not be in collection set");
1532 
1533     region->make_trash_immediate();
1534   }
1535   return required_regions;
1536 }
1537 
1538 class ShenandoahCheckCleanGCLABClosure : public ThreadClosure {
1539 public:
1540   ShenandoahCheckCleanGCLABClosure() {}
1541   void do_thread(Thread* thread) {
1542     PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1543     assert(gclab != NULL, "GCLAB should be initialized for %s", thread->name());
1544     assert(gclab->words_remaining() == 0, "GCLAB should not need retirement");
1545 
1546     PLAB* plab = ShenandoahThreadLocalData::plab(thread);
1547     assert(plab != NULL, "PLAB should be initialized for %s", thread->name());
1548     assert(plab->words_remaining() == 0, "PLAB should not need retirement");
1549   }
1550 };
1551 
1552 class ShenandoahRetireGCLABClosure : public ThreadClosure {
1553 private:
1554   bool const _resize;
1555 public:
1556   ShenandoahRetireGCLABClosure(bool resize) : _resize(resize) {}
1557   void do_thread(Thread* thread) {
1558     PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1559     assert(gclab != NULL, "GCLAB should be initialized for %s", thread->name());
1560     gclab->retire();
1561     if (_resize && ShenandoahThreadLocalData::gclab_size(thread) > 0) {
1562       ShenandoahThreadLocalData::set_gclab_size(thread, 0);
1563     }
1564 
1565     PLAB* plab = ShenandoahThreadLocalData::plab(thread);
1566     assert(plab != NULL, "PLAB should be initialized for %s", thread->name());
1567 
1568     // There are two reasons to retire all plabs between old-gen evacuation passes.
1569     //  1. We need to make the plab memory parseable by remembered-set scanning.
1570     //  2. We need to establish a trustworthy UpdateWaterMark value within each old-gen heap region
1571     ShenandoahHeap::heap()->retire_plab(plab, thread);
1572     if (_resize && ShenandoahThreadLocalData::plab_size(thread) > 0) {
1573       ShenandoahThreadLocalData::set_plab_size(thread, 0);
1574     }
1575   }
1576 };
1577 
1578 void ShenandoahHeap::labs_make_parsable() {
1579   assert(UseTLAB, "Only call with UseTLAB");
1580 
1581   ShenandoahRetireGCLABClosure cl(false);
1582 
1583   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1584     ThreadLocalAllocBuffer& tlab = t->tlab();
1585     tlab.make_parsable();
1586     cl.do_thread(t);
1587   }
1588 
1589   workers()->threads_do(&cl);
1590 }
1591 
1592 void ShenandoahHeap::tlabs_retire(bool resize) {
1593   assert(UseTLAB, "Only call with UseTLAB");
1594   assert(!resize || ResizeTLAB, "Only call for resize when ResizeTLAB is enabled");
1595 
1596   ThreadLocalAllocStats stats;
1597 
1598   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1599     ThreadLocalAllocBuffer& tlab = t->tlab();
1600     tlab.retire(&stats);
1601     if (resize) {
1602       tlab.resize();
1603     }
1604   }
1605 
1606   stats.publish();
1607 
1608 #ifdef ASSERT
1609   ShenandoahCheckCleanGCLABClosure cl;
1610   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1611     cl.do_thread(t);
1612   }
1613   workers()->threads_do(&cl);
1614 #endif
1615 }
1616 
1617 void ShenandoahHeap::gclabs_retire(bool resize) {
1618   assert(UseTLAB, "Only call with UseTLAB");
1619   assert(!resize || ResizeTLAB, "Only call for resize when ResizeTLAB is enabled");
1620 
1621   ShenandoahRetireGCLABClosure cl(resize);
1622   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1623     cl.do_thread(t);
1624   }
1625   workers()->threads_do(&cl);
1626 
1627   if (safepoint_workers() != NULL) {
1628     safepoint_workers()->threads_do(&cl);
1629   }
1630 }
1631 
1632 class ShenandoahTagGCLABClosure : public ThreadClosure {
1633 public:
1634   void do_thread(Thread* thread) {
1635     PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1636     assert(gclab != NULL, "GCLAB should be initialized for %s", thread->name());
1637     if (gclab->words_remaining() > 0) {
1638       ShenandoahHeapRegion* r = ShenandoahHeap::heap()->heap_region_containing(gclab->allocate(0));
1639       r->set_young_lab_flag();
1640     }
1641   }
1642 };
1643 
1644 void ShenandoahHeap::set_young_lab_region_flags() {
1645   if (!UseTLAB) {
1646     return;
1647   }
1648   for (size_t i = 0; i < _num_regions; i++) {
1649     _regions[i]->clear_young_lab_flags();
1650   }
1651   ShenandoahTagGCLABClosure cl;
1652   workers()->threads_do(&cl);
1653   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1654     cl.do_thread(t);
1655     ThreadLocalAllocBuffer& tlab = t->tlab();
1656     if (tlab.end() != NULL) {
1657       ShenandoahHeapRegion* r = heap_region_containing(tlab.start());
1658       r->set_young_lab_flag();
1659     }
1660   }
1661 }
1662 
1663 // Returns size in bytes
1664 size_t ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const {
1665   if (ShenandoahElasticTLAB) {
1666     // With Elastic TLABs, return the max allowed size, and let the allocation path
1667     // figure out the safe size for current allocation.
1668     return ShenandoahHeapRegion::max_tlab_size_bytes();
1669   } else {
1670     return MIN2(_free_set->unsafe_peek_free(), ShenandoahHeapRegion::max_tlab_size_bytes());
1671   }
1672 }
1673 
1674 size_t ShenandoahHeap::max_tlab_size() const {
1675   // Returns size in words
1676   return ShenandoahHeapRegion::max_tlab_size_words();
1677 }
1678 
1679 void ShenandoahHeap::collect(GCCause::Cause cause) {
1680   control_thread()->request_gc(cause);
1681 }
1682 
1683 void ShenandoahHeap::do_full_collection(bool clear_all_soft_refs) {
1684   //assert(false, "Shouldn't need to do full collections");
1685 }
1686 
1687 HeapWord* ShenandoahHeap::block_start(const void* addr) const {
1688   ShenandoahHeapRegion* r = heap_region_containing(addr);
1689   if (r != NULL) {
1690     return r->block_start(addr);
1691   }
1692   return NULL;
1693 }
1694 
1695 bool ShenandoahHeap::block_is_obj(const HeapWord* addr) const {
1696   ShenandoahHeapRegion* r = heap_region_containing(addr);
1697   return r->block_is_obj(addr);
1698 }
1699 
1700 bool ShenandoahHeap::print_location(outputStream* st, void* addr) const {
1701   return BlockLocationPrinter<ShenandoahHeap>::print_location(st, addr);
1702 }
1703 
1704 void ShenandoahHeap::prepare_for_verify() {
1705   if (SafepointSynchronize::is_at_safepoint() && UseTLAB) {
1706     labs_make_parsable();
1707   }
1708 }
1709 
1710 void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const {
1711   workers()->threads_do(tcl);
1712   if (_safepoint_workers != NULL) {
1713     _safepoint_workers->threads_do(tcl);
1714   }
1715   if (ShenandoahStringDedup::is_enabled()) {
1716     ShenandoahStringDedup::threads_do(tcl);
1717   }
1718 }
1719 
1720 void ShenandoahHeap::print_tracing_info() const {
1721   LogTarget(Info, gc, stats) lt;
1722   if (lt.is_enabled()) {
1723     ResourceMark rm;
1724     LogStream ls(lt);
1725 
1726     phase_timings()->print_global_on(&ls);
1727 
1728     ls.cr();
1729     ls.cr();
1730 
1731     shenandoah_policy()->print_gc_stats(&ls);
1732 
1733     ls.cr();
1734     ls.cr();
1735   }
1736 }
1737 
1738 void ShenandoahHeap::verify(VerifyOption vo) {
1739   if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
1740     if (ShenandoahVerify) {
1741       verifier()->verify_generic(vo);
1742     } else {
1743       // TODO: Consider allocating verification bitmaps on demand,
1744       // and turn this on unconditionally.
1745     }
1746   }
1747 }
1748 size_t ShenandoahHeap::tlab_capacity(Thread *thr) const {
1749   return _free_set->capacity();
1750 }
1751 
1752 class ObjectIterateScanRootClosure : public BasicOopIterateClosure {
1753 private:
1754   MarkBitMap* _bitmap;
1755   ShenandoahScanObjectStack* _oop_stack;
1756   ShenandoahHeap* const _heap;
1757   ShenandoahMarkingContext* const _marking_context;
1758 
1759   template <class T>
1760   void do_oop_work(T* p) {
1761     T o = RawAccess<>::oop_load(p);
1762     if (!CompressedOops::is_null(o)) {
1763       oop obj = CompressedOops::decode_not_null(o);
1764       if (_heap->is_concurrent_weak_root_in_progress() && !_marking_context->is_marked(obj)) {
1765         // There may be dead oops in weak roots in concurrent root phase, do not touch them.
1766         return;
1767       }
1768       obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
1769 
1770       assert(oopDesc::is_oop(obj), "must be a valid oop");
1771       if (!_bitmap->is_marked(obj)) {
1772         _bitmap->mark(obj);
1773         _oop_stack->push(obj);
1774       }
1775     }
1776   }
1777 public:
1778   ObjectIterateScanRootClosure(MarkBitMap* bitmap, ShenandoahScanObjectStack* oop_stack) :
1779     _bitmap(bitmap), _oop_stack(oop_stack), _heap(ShenandoahHeap::heap()),
1780     _marking_context(_heap->marking_context()) {}
1781   void do_oop(oop* p)       { do_oop_work(p); }
1782   void do_oop(narrowOop* p) { do_oop_work(p); }
1783 };
1784 
1785 /*
1786  * This is public API, used in preparation of object_iterate().
1787  * Since we don't do linear scan of heap in object_iterate() (see comment below), we don't
1788  * need to make the heap parsable. For Shenandoah-internal linear heap scans that we can
1789  * control, we call SH::tlabs_retire, SH::gclabs_retire.
1790  */
1791 void ShenandoahHeap::ensure_parsability(bool retire_tlabs) {
1792   // No-op.
1793 }
1794 
1795 /*
1796  * Iterates objects in the heap. This is public API, used for, e.g., heap dumping.
1797  *
1798  * We cannot safely iterate objects by doing a linear scan at random points in time. Linear
1799  * scanning needs to deal with dead objects, which may have dead Klass* pointers (e.g.
1800  * calling oopDesc::size() would crash) or dangling reference fields (crashes) etc. Linear
1801  * scanning therefore depends on having a valid marking bitmap to support it. However, we only
1802  * have a valid marking bitmap after successful marking. In particular, we *don't* have a valid
1803  * marking bitmap during marking, after aborted marking or during/after cleanup (when we just
1804  * wiped the bitmap in preparation for next marking).
1805  *
1806  * For all those reasons, we implement object iteration as a single marking traversal, reporting
1807  * objects as we mark+traverse through the heap, starting from GC roots. JVMTI IterateThroughHeap
1808  * is allowed to report dead objects, but is not required to do so.
1809  */
1810 void ShenandoahHeap::object_iterate(ObjectClosure* cl) {
1811   // Reset bitmap
1812   if (!prepare_aux_bitmap_for_iteration())
1813     return;
1814 
1815   ShenandoahScanObjectStack oop_stack;
1816   ObjectIterateScanRootClosure oops(&_aux_bit_map, &oop_stack);
1817   // Seed the stack with root scan
1818   scan_roots_for_iteration(&oop_stack, &oops);
1819 
1820   // Work through the oop stack to traverse heap
1821   while (! oop_stack.is_empty()) {
1822     oop obj = oop_stack.pop();
1823     assert(oopDesc::is_oop(obj), "must be a valid oop");
1824     cl->do_object(obj);
1825     obj->oop_iterate(&oops);
1826   }
1827 
1828   assert(oop_stack.is_empty(), "should be empty");
1829   // Reclaim bitmap
1830   reclaim_aux_bitmap_for_iteration();
1831 }
1832 
1833 bool ShenandoahHeap::prepare_aux_bitmap_for_iteration() {
1834   assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1835 
1836   if (!_aux_bitmap_region_special && !os::commit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size(), false)) {
1837     log_warning(gc)("Could not commit native memory for auxiliary marking bitmap for heap iteration");
1838     return false;
1839   }
1840   // Reset bitmap
1841   _aux_bit_map.clear();
1842   return true;
1843 }
1844 
1845 void ShenandoahHeap::scan_roots_for_iteration(ShenandoahScanObjectStack* oop_stack, ObjectIterateScanRootClosure* oops) {
1846   // Process GC roots according to current GC cycle
1847   // This populates the work stack with initial objects
1848   // It is important to relinquish the associated locks before diving
1849   // into heap dumper
1850   uint n_workers = safepoint_workers() != NULL ? safepoint_workers()->active_workers() : 1;
1851   ShenandoahHeapIterationRootScanner rp(n_workers);
1852   rp.roots_do(oops);
1853 }
1854 
1855 void ShenandoahHeap::reclaim_aux_bitmap_for_iteration() {
1856   if (!_aux_bitmap_region_special && !os::uncommit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size())) {
1857     log_warning(gc)("Could not uncommit native memory for auxiliary marking bitmap for heap iteration");
1858   }
1859 }
1860 
1861 // Closure for parallelly iterate objects
1862 class ShenandoahObjectIterateParScanClosure : public BasicOopIterateClosure {
1863 private:
1864   MarkBitMap* _bitmap;
1865   ShenandoahObjToScanQueue* _queue;
1866   ShenandoahHeap* const _heap;
1867   ShenandoahMarkingContext* const _marking_context;
1868 
1869   template <class T>
1870   void do_oop_work(T* p) {
1871     T o = RawAccess<>::oop_load(p);
1872     if (!CompressedOops::is_null(o)) {
1873       oop obj = CompressedOops::decode_not_null(o);
1874       if (_heap->is_concurrent_weak_root_in_progress() && !_marking_context->is_marked(obj)) {
1875         // There may be dead oops in weak roots in concurrent root phase, do not touch them.
1876         return;
1877       }
1878       obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
1879 
1880       assert(oopDesc::is_oop(obj), "Must be a valid oop");
1881       if (_bitmap->par_mark(obj)) {
1882         _queue->push(ShenandoahMarkTask(obj));
1883       }
1884     }
1885   }
1886 public:
1887   ShenandoahObjectIterateParScanClosure(MarkBitMap* bitmap, ShenandoahObjToScanQueue* q) :
1888     _bitmap(bitmap), _queue(q), _heap(ShenandoahHeap::heap()),
1889     _marking_context(_heap->marking_context()) {}
1890   void do_oop(oop* p)       { do_oop_work(p); }
1891   void do_oop(narrowOop* p) { do_oop_work(p); }
1892 };
1893 
1894 // Object iterator for parallel heap iteraion.
1895 // The root scanning phase happenes in construction as a preparation of
1896 // parallel marking queues.
1897 // Every worker processes it's own marking queue. work-stealing is used
1898 // to balance workload.
1899 class ShenandoahParallelObjectIterator : public ParallelObjectIteratorImpl {
1900 private:
1901   uint                         _num_workers;
1902   bool                         _init_ready;
1903   MarkBitMap*                  _aux_bit_map;
1904   ShenandoahHeap*              _heap;
1905   ShenandoahScanObjectStack    _roots_stack; // global roots stack
1906   ShenandoahObjToScanQueueSet* _task_queues;
1907 public:
1908   ShenandoahParallelObjectIterator(uint num_workers, MarkBitMap* bitmap) :
1909         _num_workers(num_workers),
1910         _init_ready(false),
1911         _aux_bit_map(bitmap),
1912         _heap(ShenandoahHeap::heap()) {
1913     // Initialize bitmap
1914     _init_ready = _heap->prepare_aux_bitmap_for_iteration();
1915     if (!_init_ready) {
1916       return;
1917     }
1918 
1919     ObjectIterateScanRootClosure oops(_aux_bit_map, &_roots_stack);
1920     _heap->scan_roots_for_iteration(&_roots_stack, &oops);
1921 
1922     _init_ready = prepare_worker_queues();
1923   }
1924 
1925   ~ShenandoahParallelObjectIterator() {
1926     // Reclaim bitmap
1927     _heap->reclaim_aux_bitmap_for_iteration();
1928     // Reclaim queue for workers
1929     if (_task_queues!= NULL) {
1930       for (uint i = 0; i < _num_workers; ++i) {
1931         ShenandoahObjToScanQueue* q = _task_queues->queue(i);
1932         if (q != NULL) {
1933           delete q;
1934           _task_queues->register_queue(i, NULL);
1935         }
1936       }
1937       delete _task_queues;
1938       _task_queues = NULL;
1939     }
1940   }
1941 
1942   virtual void object_iterate(ObjectClosure* cl, uint worker_id) {
1943     if (_init_ready) {
1944       object_iterate_parallel(cl, worker_id, _task_queues);
1945     }
1946   }
1947 
1948 private:
1949   // Divide global root_stack into worker queues
1950   bool prepare_worker_queues() {
1951     _task_queues = new ShenandoahObjToScanQueueSet((int) _num_workers);
1952     // Initialize queues for every workers
1953     for (uint i = 0; i < _num_workers; ++i) {
1954       ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue();
1955       _task_queues->register_queue(i, task_queue);
1956     }
1957     // Divide roots among the workers. Assume that object referencing distribution
1958     // is related with root kind, use round-robin to make every worker have same chance
1959     // to process every kind of roots
1960     size_t roots_num = _roots_stack.size();
1961     if (roots_num == 0) {
1962       // No work to do
1963       return false;
1964     }
1965 
1966     for (uint j = 0; j < roots_num; j++) {
1967       uint stack_id = j % _num_workers;
1968       oop obj = _roots_stack.pop();
1969       _task_queues->queue(stack_id)->push(ShenandoahMarkTask(obj));
1970     }
1971     return true;
1972   }
1973 
1974   void object_iterate_parallel(ObjectClosure* cl,
1975                                uint worker_id,
1976                                ShenandoahObjToScanQueueSet* queue_set) {
1977     assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1978     assert(queue_set != NULL, "task queue must not be NULL");
1979 
1980     ShenandoahObjToScanQueue* q = queue_set->queue(worker_id);
1981     assert(q != NULL, "object iterate queue must not be NULL");
1982 
1983     ShenandoahMarkTask t;
1984     ShenandoahObjectIterateParScanClosure oops(_aux_bit_map, q);
1985 
1986     // Work through the queue to traverse heap.
1987     // Steal when there is no task in queue.
1988     while (q->pop(t) || queue_set->steal(worker_id, t)) {
1989       oop obj = t.obj();
1990       assert(oopDesc::is_oop(obj), "must be a valid oop");
1991       cl->do_object(obj);
1992       obj->oop_iterate(&oops);
1993     }
1994     assert(q->is_empty(), "should be empty");
1995   }
1996 };
1997 
1998 ParallelObjectIteratorImpl* ShenandoahHeap::parallel_object_iterator(uint workers) {
1999   return new ShenandoahParallelObjectIterator(workers, &_aux_bit_map);
2000 }
2001 
2002 // Keep alive an object that was loaded with AS_NO_KEEPALIVE.
2003 void ShenandoahHeap::keep_alive(oop obj) {
2004   if (is_concurrent_mark_in_progress() && (obj != NULL)) {
2005     ShenandoahBarrierSet::barrier_set()->enqueue(obj);
2006   }
2007 }
2008 
2009 void ShenandoahHeap::heap_region_iterate(ShenandoahHeapRegionClosure* blk) const {
2010   for (size_t i = 0; i < num_regions(); i++) {
2011     ShenandoahHeapRegion* current = get_region(i);
2012     blk->heap_region_do(current);
2013   }
2014 }
2015 
2016 class ShenandoahParallelHeapRegionTask : public WorkerTask {
2017 private:
2018   ShenandoahHeap* const _heap;
2019   ShenandoahHeapRegionClosure* const _blk;
2020 
2021   shenandoah_padding(0);
2022   volatile size_t _index;
2023   shenandoah_padding(1);
2024 
2025 public:
2026   ShenandoahParallelHeapRegionTask(ShenandoahHeapRegionClosure* blk) :
2027           WorkerTask("Shenandoah Parallel Region Operation"),
2028           _heap(ShenandoahHeap::heap()), _blk(blk), _index(0) {}
2029 
2030   void work(uint worker_id) {
2031     ShenandoahParallelWorkerSession worker_session(worker_id);
2032     size_t stride = ShenandoahParallelRegionStride;
2033 
2034     size_t max = _heap->num_regions();
2035     while (Atomic::load(&_index) < max) {
2036       size_t cur = Atomic::fetch_and_add(&_index, stride, memory_order_relaxed);
2037       size_t start = cur;
2038       size_t end = MIN2(cur + stride, max);
2039       if (start >= max) break;
2040 
2041       for (size_t i = cur; i < end; i++) {
2042         ShenandoahHeapRegion* current = _heap->get_region(i);
2043         _blk->heap_region_do(current);
2044       }
2045     }
2046   }
2047 };
2048 
2049 void ShenandoahHeap::parallel_heap_region_iterate(ShenandoahHeapRegionClosure* blk) const {
2050   assert(blk->is_thread_safe(), "Only thread-safe closures here");
2051   if (num_regions() > ShenandoahParallelRegionStride) {
2052     ShenandoahParallelHeapRegionTask task(blk);
2053     workers()->run_task(&task);
2054   } else {
2055     heap_region_iterate(blk);
2056   }
2057 }
2058 
2059 class ShenandoahRendezvousClosure : public HandshakeClosure {
2060 public:
2061   inline ShenandoahRendezvousClosure() : HandshakeClosure("ShenandoahRendezvous") {}
2062   inline void do_thread(Thread* thread) {}
2063 };
2064 
2065 void ShenandoahHeap::rendezvous_threads() {
2066   ShenandoahRendezvousClosure cl;
2067   Handshake::execute(&cl);
2068 }
2069 
2070 void ShenandoahHeap::recycle_trash() {
2071   free_set()->recycle_trash();
2072 }
2073 
2074 void ShenandoahHeap::do_class_unloading() {
2075   _unloader.unload();
2076 }
2077 
2078 void ShenandoahHeap::stw_weak_refs(bool full_gc) {
2079   // Weak refs processing
2080   ShenandoahPhaseTimings::Phase phase = full_gc ? ShenandoahPhaseTimings::full_gc_weakrefs
2081                                                 : ShenandoahPhaseTimings::degen_gc_weakrefs;
2082   ShenandoahTimingsTracker t(phase);
2083   ShenandoahGCWorkerPhase worker_phase(phase);
2084   active_generation()->ref_processor()->process_references(phase, workers(), false /* concurrent */);
2085 }
2086 
2087 void ShenandoahHeap::prepare_update_heap_references(bool concurrent) {
2088   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
2089 
2090   // Evacuation is over, no GCLABs are needed anymore. GCLABs are under URWM, so we need to
2091   // make them parsable for update code to work correctly. Plus, we can compute new sizes
2092   // for future GCLABs here.
2093   if (UseTLAB) {
2094     ShenandoahGCPhase phase(concurrent ?
2095                             ShenandoahPhaseTimings::init_update_refs_manage_gclabs :
2096                             ShenandoahPhaseTimings::degen_gc_init_update_refs_manage_gclabs);
2097     gclabs_retire(ResizeTLAB);
2098   }
2099 
2100   _update_refs_iterator.reset();
2101 }
2102 
2103 void ShenandoahHeap::set_gc_state_all_threads(char state) {
2104   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
2105     ShenandoahThreadLocalData::set_gc_state(t, state);
2106   }
2107 }
2108 
2109 void ShenandoahHeap::set_gc_state_mask(uint mask, bool value) {
2110   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should really be Shenandoah safepoint");
2111   _gc_state.set_cond(mask, value);
2112   set_gc_state_all_threads(_gc_state.raw_value());
2113 }
2114 
2115 void ShenandoahHeap::set_concurrent_young_mark_in_progress(bool in_progress) {
2116   if (has_forwarded_objects()) {
2117     set_gc_state_mask(YOUNG_MARKING | UPDATEREFS, in_progress);
2118   } else {
2119     set_gc_state_mask(YOUNG_MARKING, in_progress);
2120   }
2121 
2122   manage_satb_barrier(in_progress);
2123 }
2124 
2125 void ShenandoahHeap::set_concurrent_old_mark_in_progress(bool in_progress) {
2126   if (has_forwarded_objects()) {
2127     set_gc_state_mask(OLD_MARKING | UPDATEREFS, in_progress);
2128   } else {
2129     set_gc_state_mask(OLD_MARKING, in_progress);
2130   }
2131 
2132   manage_satb_barrier(in_progress);
2133 }
2134 
2135 void ShenandoahHeap::set_prepare_for_old_mark_in_progress(bool in_progress) {
2136   // Unlike other set-gc-state functions, this may happen outside safepoint.
2137   // Is only set and queried by control thread, so no coherence issues.
2138   _prepare_for_old_mark = in_progress;
2139 }
2140 
2141 void ShenandoahHeap::set_aging_cycle(bool in_progress) {
2142   _is_aging_cycle.set_cond(in_progress);
2143 }
2144 
2145 void ShenandoahHeap::manage_satb_barrier(bool active) {
2146   if (is_concurrent_mark_in_progress()) {
2147     // Ignore request to deactivate barrier while concurrent mark is in progress.
2148     // Do not attempt to re-activate the barrier if it is already active.
2149     if (active && !ShenandoahBarrierSet::satb_mark_queue_set().is_active()) {
2150       ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(active, !active);
2151     }
2152   } else {
2153     // No concurrent marking is in progress so honor request to deactivate,
2154     // but only if the barrier is already active.
2155     if (!active && ShenandoahBarrierSet::satb_mark_queue_set().is_active()) {
2156       ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(active, !active);
2157     }
2158   }
2159 }
2160 
2161 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
2162   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only call this at safepoint");
2163   set_gc_state_mask(EVACUATION, in_progress);
2164 }
2165 
2166 void ShenandoahHeap::set_concurrent_strong_root_in_progress(bool in_progress) {
2167   if (in_progress) {
2168     _concurrent_strong_root_in_progress.set();
2169   } else {
2170     _concurrent_strong_root_in_progress.unset();
2171   }
2172 }
2173 
2174 void ShenandoahHeap::set_concurrent_weak_root_in_progress(bool cond) {
2175   set_gc_state_mask(WEAK_ROOTS, cond);
2176 }
2177 
2178 GCTracer* ShenandoahHeap::tracer() {
2179   return shenandoah_policy()->tracer();
2180 }
2181 
2182 size_t ShenandoahHeap::tlab_used(Thread* thread) const {
2183   return _free_set->used();
2184 }
2185 
2186 bool ShenandoahHeap::try_cancel_gc() {
2187   while (true) {
2188     jbyte prev = _cancelled_gc.cmpxchg(CANCELLED, CANCELLABLE);
2189     if (prev == CANCELLABLE) return true;
2190     else if (prev == CANCELLED) return false;
2191     assert(ShenandoahSuspendibleWorkers, "should not get here when not using suspendible workers");
2192     assert(prev == NOT_CANCELLED, "must be NOT_CANCELLED");
2193     Thread* thread = Thread::current();
2194     if (thread->is_Java_thread()) {
2195       // We need to provide a safepoint here, otherwise we might
2196       // spin forever if a SP is pending.
2197       ThreadBlockInVM sp(JavaThread::cast(thread));
2198       SpinPause();
2199     }
2200   }
2201 }
2202 
2203 void ShenandoahHeap::cancel_concurrent_mark() {
2204   _young_generation->cancel_marking();
2205   _old_generation->cancel_marking();
2206   _global_generation->cancel_marking();
2207 
2208   ShenandoahBarrierSet::satb_mark_queue_set().abandon_partial_marking();
2209 }
2210 
2211 void ShenandoahHeap::cancel_gc(GCCause::Cause cause) {
2212   if (try_cancel_gc()) {
2213     FormatBuffer<> msg("Cancelling GC: %s", GCCause::to_string(cause));
2214     log_info(gc)("%s", msg.buffer());
2215     Events::log(Thread::current(), "%s", msg.buffer());
2216     _cancel_requested_time = os::elapsedTime();
2217     if (cause == GCCause::_shenandoah_upgrade_to_full_gc) {
2218       _upgraded_to_full = true;
2219     }
2220   }
2221 }
2222 
2223 uint ShenandoahHeap::max_workers() {
2224   return _max_workers;
2225 }
2226 
2227 void ShenandoahHeap::stop() {
2228   // The shutdown sequence should be able to terminate when GC is running.
2229 
2230   // Step 0a. Stop requesting collections.
2231   regulator_thread()->stop();
2232 
2233   // Step 0. Notify policy to disable event recording.
2234   _shenandoah_policy->record_shutdown();
2235 
2236   // Step 1. Notify control thread that we are in shutdown.
2237   // Note that we cannot do that with stop(), because stop() is blocking and waits for the actual shutdown.
2238   // Doing stop() here would wait for the normal GC cycle to complete, never falling through to cancel below.
2239   control_thread()->prepare_for_graceful_shutdown();
2240 
2241   // Step 2. Notify GC workers that we are cancelling GC.
2242   cancel_gc(GCCause::_shenandoah_stop_vm);
2243 
2244   // Step 3. Wait until GC worker exits normally.
2245   control_thread()->stop();
2246 }
2247 
2248 void ShenandoahHeap::stw_unload_classes(bool full_gc) {
2249   if (!unload_classes()) return;
2250   // Unload classes and purge SystemDictionary.
2251   {
2252     ShenandoahPhaseTimings::Phase phase = full_gc ?
2253                                           ShenandoahPhaseTimings::full_gc_purge_class_unload :
2254                                           ShenandoahPhaseTimings::degen_gc_purge_class_unload;
2255     ShenandoahIsAliveSelector is_alive;
2256     CodeCache::UnloadingScope scope(is_alive.is_alive_closure());
2257     ShenandoahGCPhase gc_phase(phase);
2258     ShenandoahGCWorkerPhase worker_phase(phase);
2259     bool purged_class = SystemDictionary::do_unloading(gc_timer());
2260 
2261     uint num_workers = _workers->active_workers();
2262     ShenandoahClassUnloadingTask unlink_task(phase, num_workers, purged_class);
2263     _workers->run_task(&unlink_task);
2264   }
2265 
2266   {
2267     ShenandoahGCPhase phase(full_gc ?
2268                             ShenandoahPhaseTimings::full_gc_purge_cldg :
2269                             ShenandoahPhaseTimings::degen_gc_purge_cldg);
2270     ClassLoaderDataGraph::purge(/*at_safepoint*/true);
2271   }
2272   // Resize and verify metaspace
2273   MetaspaceGC::compute_new_size();
2274   DEBUG_ONLY(MetaspaceUtils::verify();)
2275 }
2276 
2277 // Weak roots are either pre-evacuated (final mark) or updated (final updaterefs),
2278 // so they should not have forwarded oops.
2279 // However, we do need to "null" dead oops in the roots, if can not be done
2280 // in concurrent cycles.
2281 void ShenandoahHeap::stw_process_weak_roots(bool full_gc) {
2282   uint num_workers = _workers->active_workers();
2283   ShenandoahPhaseTimings::Phase timing_phase = full_gc ?
2284                                                ShenandoahPhaseTimings::full_gc_purge_weak_par :
2285                                                ShenandoahPhaseTimings::degen_gc_purge_weak_par;
2286   ShenandoahGCPhase phase(timing_phase);
2287   ShenandoahGCWorkerPhase worker_phase(timing_phase);
2288   // Cleanup weak roots
2289   if (has_forwarded_objects()) {
2290     ShenandoahForwardedIsAliveClosure is_alive;
2291     ShenandoahUpdateRefsClosure keep_alive;
2292     ShenandoahParallelWeakRootsCleaningTask<ShenandoahForwardedIsAliveClosure, ShenandoahUpdateRefsClosure>
2293       cleaning_task(timing_phase, &is_alive, &keep_alive, num_workers);
2294     _workers->run_task(&cleaning_task);
2295   } else {
2296     ShenandoahIsAliveClosure is_alive;
2297 #ifdef ASSERT
2298     ShenandoahAssertNotForwardedClosure verify_cl;
2299     ShenandoahParallelWeakRootsCleaningTask<ShenandoahIsAliveClosure, ShenandoahAssertNotForwardedClosure>
2300       cleaning_task(timing_phase, &is_alive, &verify_cl, num_workers);
2301 #else
2302     ShenandoahParallelWeakRootsCleaningTask<ShenandoahIsAliveClosure, DoNothingClosure>
2303       cleaning_task(timing_phase, &is_alive, &do_nothing_cl, num_workers);
2304 #endif
2305     _workers->run_task(&cleaning_task);
2306   }
2307 }
2308 
2309 void ShenandoahHeap::parallel_cleaning(bool full_gc) {
2310   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2311   assert(is_stw_gc_in_progress(), "Only for Degenerated and Full GC");
2312   ShenandoahGCPhase phase(full_gc ?
2313                           ShenandoahPhaseTimings::full_gc_purge :
2314                           ShenandoahPhaseTimings::degen_gc_purge);
2315   stw_weak_refs(full_gc);
2316   stw_process_weak_roots(full_gc);
2317   stw_unload_classes(full_gc);
2318 }
2319 
2320 void ShenandoahHeap::set_has_forwarded_objects(bool cond) {
2321   set_gc_state_mask(HAS_FORWARDED, cond);
2322 }
2323 
2324 void ShenandoahHeap::set_unload_classes(bool uc) {
2325   _unload_classes.set_cond(uc);
2326 }
2327 
2328 bool ShenandoahHeap::unload_classes() const {
2329   return _unload_classes.is_set();
2330 }
2331 
2332 address ShenandoahHeap::in_cset_fast_test_addr() {
2333   ShenandoahHeap* heap = ShenandoahHeap::heap();
2334   assert(heap->collection_set() != NULL, "Sanity");
2335   return (address) heap->collection_set()->biased_map_address();
2336 }
2337 
2338 address ShenandoahHeap::gc_state_addr() {
2339   return (address) ShenandoahHeap::heap()->_gc_state.addr_of();
2340 }
2341 
2342 void ShenandoahHeap::reset_bytes_allocated_since_gc_start() {
2343   if (mode()->is_generational()) {
2344     young_generation()->reset_bytes_allocated_since_gc_start();
2345     old_generation()->reset_bytes_allocated_since_gc_start();
2346   }
2347 
2348   global_generation()->reset_bytes_allocated_since_gc_start();
2349 }
2350 
2351 void ShenandoahHeap::set_degenerated_gc_in_progress(bool in_progress) {
2352   _degenerated_gc_in_progress.set_cond(in_progress);
2353 }
2354 
2355 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) {
2356   _full_gc_in_progress.set_cond(in_progress);
2357 }
2358 
2359 void ShenandoahHeap::set_full_gc_move_in_progress(bool in_progress) {
2360   assert (is_full_gc_in_progress(), "should be");
2361   _full_gc_move_in_progress.set_cond(in_progress);
2362 }
2363 
2364 void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) {
2365   set_gc_state_mask(UPDATEREFS, in_progress);
2366 }
2367 
2368 void ShenandoahHeap::register_nmethod(nmethod* nm) {
2369   ShenandoahCodeRoots::register_nmethod(nm);
2370 }
2371 
2372 void ShenandoahHeap::unregister_nmethod(nmethod* nm) {
2373   ShenandoahCodeRoots::unregister_nmethod(nm);
2374 }
2375 
2376 void ShenandoahHeap::flush_nmethod(nmethod* nm) {
2377   ShenandoahCodeRoots::flush_nmethod(nm);
2378 }
2379 
2380 oop ShenandoahHeap::pin_object(JavaThread* thr, oop o) {
2381   heap_region_containing(o)->record_pin();
2382   return o;
2383 }
2384 
2385 void ShenandoahHeap::unpin_object(JavaThread* thr, oop o) {
2386   ShenandoahHeapRegion* r = heap_region_containing(o);
2387   assert(r != NULL, "Sanity");
2388   assert(r->pin_count() > 0, "Region " SIZE_FORMAT " should have non-zero pins", r->index());
2389   r->record_unpin();
2390 }
2391 
2392 void ShenandoahHeap::sync_pinned_region_status() {
2393   ShenandoahHeapLocker locker(lock());
2394 
2395   for (size_t i = 0; i < num_regions(); i++) {
2396     ShenandoahHeapRegion *r = get_region(i);
2397     if (r->is_active()) {
2398       if (r->is_pinned()) {
2399         if (r->pin_count() == 0) {
2400           r->make_unpinned();
2401         }
2402       } else {
2403         if (r->pin_count() > 0) {
2404           r->make_pinned();
2405         }
2406       }
2407     }
2408   }
2409 
2410   assert_pinned_region_status();
2411 }
2412 
2413 #ifdef ASSERT
2414 void ShenandoahHeap::assert_pinned_region_status() {
2415   for (size_t i = 0; i < num_regions(); i++) {
2416     ShenandoahHeapRegion* r = get_region(i);
2417     if (active_generation()->contains(r)) {
2418       assert((r->is_pinned() && r->pin_count() > 0) || (!r->is_pinned() && r->pin_count() == 0),
2419              "Region " SIZE_FORMAT " pinning status is inconsistent", i);
2420     }
2421   }
2422 }
2423 #endif
2424 
2425 ConcurrentGCTimer* ShenandoahHeap::gc_timer() const {
2426   return _gc_timer;
2427 }
2428 
2429 void ShenandoahHeap::prepare_concurrent_roots() {
2430   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2431   assert(!is_stw_gc_in_progress(), "Only concurrent GC");
2432   set_concurrent_strong_root_in_progress(!collection_set()->is_empty());
2433   set_concurrent_weak_root_in_progress(true);
2434   if (unload_classes()) {
2435     _unloader.prepare();
2436   }
2437 }
2438 
2439 void ShenandoahHeap::finish_concurrent_roots() {
2440   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2441   assert(!is_stw_gc_in_progress(), "Only concurrent GC");
2442   if (unload_classes()) {
2443     _unloader.finish();
2444   }
2445 }
2446 
2447 #ifdef ASSERT
2448 void ShenandoahHeap::assert_gc_workers(uint nworkers) {
2449   assert(nworkers > 0 && nworkers <= max_workers(), "Sanity");
2450 
2451   if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
2452     if (UseDynamicNumberOfGCThreads) {
2453       assert(nworkers <= ParallelGCThreads, "Cannot use more than it has");
2454     } else {
2455       // Use ParallelGCThreads inside safepoints
2456       assert(nworkers == ParallelGCThreads, "Use ParallelGCThreads within safepoints");
2457     }
2458   } else {
2459     if (UseDynamicNumberOfGCThreads) {
2460       assert(nworkers <= ConcGCThreads, "Cannot use more than it has");
2461     } else {
2462       // Use ConcGCThreads outside safepoints
2463       assert(nworkers == ConcGCThreads, "Use ConcGCThreads outside safepoints");
2464     }
2465   }
2466 }
2467 #endif
2468 
2469 ShenandoahVerifier* ShenandoahHeap::verifier() {
2470   guarantee(ShenandoahVerify, "Should be enabled");
2471   assert (_verifier != NULL, "sanity");
2472   return _verifier;
2473 }
2474 
2475 template<bool CONCURRENT>
2476 class ShenandoahUpdateHeapRefsTask : public WorkerTask {
2477 private:
2478   ShenandoahHeap* _heap;
2479   ShenandoahRegionIterator* _regions;
2480   ShenandoahRegionChunkIterator* _work_chunks;
2481 
2482 public:
2483   explicit ShenandoahUpdateHeapRefsTask(ShenandoahRegionIterator* regions,
2484                                         ShenandoahRegionChunkIterator* work_chunks) :
2485     WorkerTask("Shenandoah Update References"),
2486     _heap(ShenandoahHeap::heap()),
2487     _regions(regions),
2488     _work_chunks(work_chunks)
2489   {
2490   }
2491 
2492   void work(uint worker_id) {
2493     if (CONCURRENT) {
2494       ShenandoahConcurrentWorkerSession worker_session(worker_id);
2495       ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
2496       do_work<ShenandoahConcUpdateRefsClosure>(worker_id);
2497     } else {
2498       ShenandoahParallelWorkerSession worker_session(worker_id);
2499       do_work<ShenandoahSTWUpdateRefsClosure>(worker_id);
2500     }
2501   }
2502 
2503 private:
2504   template<class T>
2505   void do_work(uint worker_id) {
2506     T cl;
2507     ShenandoahHeapRegion* r = _regions->next();
2508     // We update references for global, old, and young collections.
2509     assert(_heap->active_generation()->is_mark_complete(), "Expected complete marking");
2510     ShenandoahMarkingContext* const ctx = _heap->marking_context();
2511     bool is_mixed = _heap->collection_set()->has_old_regions();
2512     while (r != NULL) {
2513       HeapWord* update_watermark = r->get_update_watermark();
2514       assert (update_watermark >= r->bottom(), "sanity");
2515 
2516       log_debug(gc)("ShenandoahUpdateHeapRefsTask::do_work(%u) looking at region " SIZE_FORMAT, worker_id, r->index());
2517       bool region_progress = false;
2518       if (r->is_active() && !r->is_cset()) {
2519         if (!_heap->mode()->is_generational() || (r->affiliation() == ShenandoahRegionAffiliation::YOUNG_GENERATION)) {
2520           _heap->marked_object_oop_iterate(r, &cl, update_watermark);
2521           region_progress = true;
2522         } else if (r->affiliation() == ShenandoahRegionAffiliation::OLD_GENERATION) {
2523           if (_heap->active_generation()->generation_mode() == GLOBAL) {
2524             // Note that GLOBAL collection is not as effectively balanced as young and mixed cycles.  This is because
2525             // concurrent GC threads are parceled out entire heap regions of work at a time and there
2526             // is no "catchup phase" consisting of remembered set scanning, during which parcels of work are smaller
2527             // and more easily distributed more fairly across threads.
2528 
2529             // TODO: Consider an improvement to load balance GLOBAL GC.
2530             _heap->marked_object_oop_iterate(r, &cl, update_watermark);
2531             region_progress = true;
2532           }
2533           // Otherwise, this is an old region in a young or mixed cycle.  Process it during a second phase, below.
2534           // Don't bother to report pacing progress in this case.
2535         } else {
2536           // Because updating of references runs concurrently, it is possible that a FREE inactive region transitions
2537           // to a non-free active region while this loop is executing.  Whenever this happens, the changing of a region's
2538           // active status may propagate at a different speed than the changing of the region's affiliation.
2539 
2540           // When we reach this control point, it is because a race has allowed a region's is_active() status to be seen
2541           // by this thread before the region's affiliation() is seen by this thread.
2542 
2543           // It's ok for this race to occur because the newly transformed region does not have any references to be
2544           // updated.
2545 
2546           assert(r->get_update_watermark() == r->bottom(),
2547                  "%s Region " SIZE_FORMAT " is_active but not recognized as YOUNG or OLD so must be newly transitioned from FREE",
2548                  affiliation_name(r->affiliation()), r->index());
2549         }
2550       }
2551       if (region_progress && ShenandoahPacing) {
2552         _heap->pacer()->report_updaterefs(pointer_delta(update_watermark, r->bottom()));
2553       }
2554       if (_heap->check_cancelled_gc_and_yield(CONCURRENT)) {
2555         return;
2556       }
2557       r = _regions->next();
2558     }
2559     if (_heap->mode()->is_generational() && (_heap->active_generation()->generation_mode() != GLOBAL)) {
2560       // Since this is generational and not GLOBAL, we have to process the remembered set.  There's no remembered
2561       // set processing if not in generational mode or if GLOBAL mode.
2562 
2563       // After this thread has exhausted its traditional update-refs work, it continues with updating refs within remembered set.
2564       // The remembered set workload is better balanced between threads, so threads that are "behind" can catch up with other
2565       // threads during this phase, allowing all threads to work more effectively in parallel.
2566       struct ShenandoahRegionChunk assignment;
2567       bool have_work = _work_chunks->next(&assignment);
2568       RememberedScanner* scanner = _heap->card_scan();
2569       while (have_work) {
2570         ShenandoahHeapRegion* r = assignment._r;
2571         if (r->is_active() && !r->is_cset() && (r->affiliation() == ShenandoahRegionAffiliation::OLD_GENERATION)) {
2572           HeapWord* start_of_range = r->bottom() + assignment._chunk_offset;
2573           HeapWord* end_of_range = r->get_update_watermark();
2574           if (end_of_range > start_of_range + assignment._chunk_size) {
2575             end_of_range = start_of_range + assignment._chunk_size;
2576           }
2577 
2578           // Old region in a young cycle or mixed cycle.
2579           if (is_mixed) {
2580             // TODO: For mixed evac, consider building an old-gen remembered set that allows restricted updating
2581             // within old-gen HeapRegions.  This remembered set can be constructed by old-gen concurrent marking
2582             // and augmented by card marking.  For example, old-gen concurrent marking can remember for each old-gen
2583             // card which other old-gen regions it refers to: none, one-other specifically, multiple-other non-specific.
2584             // Update-references when _mixed_evac processess each old-gen memory range that has a traditional DIRTY
2585             // card or if the "old-gen remembered set" indicates that this card holds pointers specifically to an
2586             // old-gen region in the most recent collection set, or if this card holds pointers to other non-specific
2587             // old-gen heap regions.
2588 
2589             if (r->is_humongous()) {
2590               if (start_of_range < end_of_range) {
2591                 // Need to examine both dirty and clean cards during mixed evac.
2592                 r->oop_iterate_humongous_slice(&cl, false, start_of_range, assignment._chunk_size, true, CONCURRENT);
2593               }
2594             } else {
2595               // Since this is mixed evacuation, old regions that are candidates for collection have not been coalesced
2596               // and filled.  Use mark bits to find objects that need to be updated.
2597               //
2598               // Future TODO: establish a second remembered set to identify which old-gen regions point to other old-gen
2599               // regions which are in the collection set for a particular mixed evacuation.
2600               if (start_of_range < end_of_range) {
2601                 HeapWord* p = nullptr;
2602                 size_t card_index = scanner->card_index_for_addr(start_of_range);
2603                 // In case last object in my range spans boundary of my chunk, I may need to scan all the way to top()
2604                 ShenandoahObjectToOopBoundedClosure<T> objs(&cl, start_of_range, r->top());
2605 
2606                 // Any object that begins in a previous range is part of a different scanning assignment.  Any object that
2607                 // starts after end_of_range is also not my responsibility.  (Either allocated during evacuation, so does
2608                 // not hold pointers to from-space, or is beyond the range of my assigned work chunk.)
2609 
2610                 // Find the first object that begins in my range, if there is one.
2611                 p = start_of_range;
2612                 oop obj = cast_to_oop(p);
2613                 HeapWord* tams = ctx->top_at_mark_start(r);
2614                 if (p >= tams) {
2615                   // We cannot use ctx->is_marked(obj) to test whether an object begins at this address.  Instead,
2616                   // we need to use the remembered set crossing map to advance p to the first object that starts
2617                   // within the enclosing card.
2618 
2619                   while (true) {
2620                     HeapWord* first_object = scanner->first_object_in_card(card_index);
2621                     if (first_object != nullptr) {
2622                       p = first_object;
2623                       break;
2624                     } else if (scanner->addr_for_card_index(card_index + 1) < end_of_range) {
2625                       card_index++;
2626                     } else {
2627                       // Force the loop that follows to immediately terminate.
2628                       p = end_of_range;
2629                       break;
2630                     }
2631                   }
2632                   obj = cast_to_oop(p);
2633                   // Note: p may be >= end_of_range
2634                 } else if (!ctx->is_marked(obj)) {
2635                   p = ctx->get_next_marked_addr(p, tams);
2636                   obj = cast_to_oop(p);
2637                   // If there are no more marked objects before tams, this returns tams.
2638                   // Note that tams is either >= end_of_range, or tams is the start of an object that is marked.
2639                 }
2640                 while (p < end_of_range) {
2641                   // p is known to point to the beginning of marked object obj
2642                   objs.do_object(obj);
2643                   HeapWord* prev_p = p;
2644                   p += obj->size();
2645                   if (p < tams) {
2646                     p = ctx->get_next_marked_addr(p, tams);
2647                     // If there are no more marked objects before tams, this returns tams.  Note that tams is
2648                     // either >= end_of_range, or tams is the start of an object that is marked.
2649                   }
2650                   assert(p != prev_p, "Lack of forward progress");
2651                   obj = cast_to_oop(p);
2652                 }
2653               }
2654             }
2655           } else {
2656             // This is a young evac..
2657             if (start_of_range < end_of_range) {
2658               size_t cluster_size =
2659                 CardTable::card_size_in_words() * ShenandoahCardCluster<ShenandoahDirectCardMarkRememberedSet>::CardsPerCluster;
2660               size_t clusters = assignment._chunk_size / cluster_size;
2661               assert(clusters * cluster_size == assignment._chunk_size, "Chunk assignment must align on cluster boundaries");
2662               scanner->process_region_slice(r, assignment._chunk_offset, clusters, end_of_range, &cl, true, CONCURRENT);
2663             }
2664           }
2665           if (ShenandoahPacing && (start_of_range < end_of_range)) {
2666             _heap->pacer()->report_updaterefs(pointer_delta(end_of_range, start_of_range));
2667           }
2668         }
2669         // Otherwise, this work chunk had nothing for me to do, so do not report pacer progress.
2670 
2671         // Before we take responsibility for another chunk of work, see if cancellation is requested.
2672         if (_heap->check_cancelled_gc_and_yield(CONCURRENT)) {
2673           return;
2674         }
2675         have_work = _work_chunks->next(&assignment);
2676       }
2677     }
2678   }
2679 };
2680 
2681 void ShenandoahHeap::update_heap_references(bool concurrent) {
2682   assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
2683   ShenandoahRegionChunkIterator work_list(workers()->active_workers());
2684 
2685   if (concurrent) {
2686     ShenandoahUpdateHeapRefsTask<true> task(&_update_refs_iterator, &work_list);
2687     workers()->run_task(&task);
2688   } else {
2689     ShenandoahUpdateHeapRefsTask<false> task(&_update_refs_iterator, &work_list);
2690     workers()->run_task(&task);
2691   }
2692 }
2693 
2694 class ShenandoahFinalUpdateRefsUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
2695 private:
2696   ShenandoahMarkingContext* _ctx;
2697   ShenandoahHeapLock* const _lock;
2698   bool _is_generational;
2699 
2700 public:
2701   ShenandoahFinalUpdateRefsUpdateRegionStateClosure(
2702     ShenandoahMarkingContext* ctx) : _ctx(ctx), _lock(ShenandoahHeap::heap()->lock()),
2703                                      _is_generational(ShenandoahHeap::heap()->mode()->is_generational()) { }
2704 
2705   void heap_region_do(ShenandoahHeapRegion* r) {
2706 
2707     // Maintenance of region age must follow evacuation in order to account for evacuation allocations within survivor
2708     // regions.  We consult region age during the subsequent evacuation to determine whether certain objects need to
2709     // be promoted.
2710     if (_is_generational && r->is_young()) {
2711       HeapWord *tams = _ctx->top_at_mark_start(r);
2712       HeapWord *top = r->top();
2713 
2714       // Allocations move the watermark when top moves.  However compacting
2715       // objects will sometimes lower top beneath the watermark, after which,
2716       // attempts to read the watermark will assert out (watermark should not be
2717       // higher than top).
2718       if (top > tams) {
2719         // There have been allocations in this region since the start of the cycle.
2720         // Any objects new to this region must not assimilate elevated age.
2721         r->reset_age();
2722       } else if (ShenandoahHeap::heap()->is_aging_cycle()) {
2723         r->increment_age();
2724       }
2725     }
2726 
2727     // Drop unnecessary "pinned" state from regions that does not have CP marks
2728     // anymore, as this would allow trashing them.
2729     if (r->is_active()) {
2730       if (r->is_pinned()) {
2731         if (r->pin_count() == 0) {
2732           ShenandoahHeapLocker locker(_lock);
2733           r->make_unpinned();
2734         }
2735       } else {
2736         if (r->pin_count() > 0) {
2737           ShenandoahHeapLocker locker(_lock);
2738           r->make_pinned();
2739         }
2740       }
2741     }
2742   }
2743 
2744   bool is_thread_safe() { return true; }
2745 };
2746 
2747 void ShenandoahHeap::update_heap_region_states(bool concurrent) {
2748   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2749   assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
2750 
2751   {
2752     ShenandoahGCPhase phase(concurrent ?
2753                             ShenandoahPhaseTimings::final_update_refs_update_region_states :
2754                             ShenandoahPhaseTimings::degen_gc_final_update_refs_update_region_states);
2755     ShenandoahFinalUpdateRefsUpdateRegionStateClosure cl (active_generation()->complete_marking_context());
2756     parallel_heap_region_iterate(&cl);
2757 
2758     assert_pinned_region_status();
2759   }
2760 
2761   {
2762     ShenandoahGCPhase phase(concurrent ?
2763                             ShenandoahPhaseTimings::final_update_refs_trash_cset :
2764                             ShenandoahPhaseTimings::degen_gc_final_update_refs_trash_cset);
2765     trash_cset_regions();
2766   }
2767 }
2768 
2769 void ShenandoahHeap::rebuild_free_set(bool concurrent) {
2770   {
2771     ShenandoahGCPhase phase(concurrent ?
2772                             ShenandoahPhaseTimings::final_update_refs_rebuild_freeset :
2773                             ShenandoahPhaseTimings::degen_gc_final_update_refs_rebuild_freeset);
2774     ShenandoahHeapLocker locker(lock());
2775     _free_set->rebuild();
2776   }
2777 }
2778 
2779 void ShenandoahHeap::print_extended_on(outputStream *st) const {
2780   print_on(st);
2781   print_heap_regions_on(st);
2782 }
2783 
2784 bool ShenandoahHeap::is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self) {
2785   size_t slice = r->index() / _bitmap_regions_per_slice;
2786 
2787   size_t regions_from = _bitmap_regions_per_slice * slice;
2788   size_t regions_to   = MIN2(num_regions(), _bitmap_regions_per_slice * (slice + 1));
2789   for (size_t g = regions_from; g < regions_to; g++) {
2790     assert (g / _bitmap_regions_per_slice == slice, "same slice");
2791     if (skip_self && g == r->index()) continue;
2792     if (get_region(g)->is_committed()) {
2793       return true;
2794     }
2795   }
2796   return false;
2797 }
2798 
2799 bool ShenandoahHeap::commit_bitmap_slice(ShenandoahHeapRegion* r) {
2800   shenandoah_assert_heaplocked();
2801 
2802   // Bitmaps in special regions do not need commits
2803   if (_bitmap_region_special) {
2804     return true;
2805   }
2806 
2807   if (is_bitmap_slice_committed(r, true)) {
2808     // Some other region from the group is already committed, meaning the bitmap
2809     // slice is already committed, we exit right away.
2810     return true;
2811   }
2812 
2813   // Commit the bitmap slice:
2814   size_t slice = r->index() / _bitmap_regions_per_slice;
2815   size_t off = _bitmap_bytes_per_slice * slice;
2816   size_t len = _bitmap_bytes_per_slice;
2817   char* start = (char*) _bitmap_region.start() + off;
2818 
2819   if (!os::commit_memory(start, len, false)) {
2820     return false;
2821   }
2822 
2823   if (AlwaysPreTouch) {
2824     os::pretouch_memory(start, start + len, _pretouch_bitmap_page_size);
2825   }
2826 
2827   return true;
2828 }
2829 
2830 bool ShenandoahHeap::uncommit_bitmap_slice(ShenandoahHeapRegion *r) {
2831   shenandoah_assert_heaplocked();
2832 
2833   // Bitmaps in special regions do not need uncommits
2834   if (_bitmap_region_special) {
2835     return true;
2836   }
2837 
2838   if (is_bitmap_slice_committed(r, true)) {
2839     // Some other region from the group is still committed, meaning the bitmap
2840     // slice is should stay committed, exit right away.
2841     return true;
2842   }
2843 
2844   // Uncommit the bitmap slice:
2845   size_t slice = r->index() / _bitmap_regions_per_slice;
2846   size_t off = _bitmap_bytes_per_slice * slice;
2847   size_t len = _bitmap_bytes_per_slice;
2848   if (!os::uncommit_memory((char*)_bitmap_region.start() + off, len)) {
2849     return false;
2850   }
2851   return true;
2852 }
2853 
2854 void ShenandoahHeap::safepoint_synchronize_begin() {
2855   if (ShenandoahSuspendibleWorkers || UseStringDeduplication) {
2856     SuspendibleThreadSet::synchronize();
2857   }
2858 }
2859 
2860 void ShenandoahHeap::safepoint_synchronize_end() {
2861   if (ShenandoahSuspendibleWorkers || UseStringDeduplication) {
2862     SuspendibleThreadSet::desynchronize();
2863   }
2864 }
2865 
2866 void ShenandoahHeap::entry_uncommit(double shrink_before, size_t shrink_until) {
2867   static const char *msg = "Concurrent uncommit";
2868   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_uncommit, true /* log_heap_usage */);
2869   EventMark em("%s", msg);
2870 
2871   op_uncommit(shrink_before, shrink_until);
2872 }
2873 
2874 void ShenandoahHeap::try_inject_alloc_failure() {
2875   if (ShenandoahAllocFailureALot && !cancelled_gc() && ((os::random() % 1000) > 950)) {
2876     _inject_alloc_failure.set();
2877     os::naked_short_sleep(1);
2878     if (cancelled_gc()) {
2879       log_info(gc)("Allocation failure was successfully injected");
2880     }
2881   }
2882 }
2883 
2884 bool ShenandoahHeap::should_inject_alloc_failure() {
2885   return _inject_alloc_failure.is_set() && _inject_alloc_failure.try_unset();
2886 }
2887 
2888 void ShenandoahHeap::initialize_serviceability() {
2889   if (mode()->is_generational()) {
2890     _young_gen_memory_pool = new ShenandoahYoungGenMemoryPool(this);
2891     _old_gen_memory_pool = new ShenandoahOldGenMemoryPool(this);
2892     _cycle_memory_manager.add_pool(_young_gen_memory_pool);
2893     _cycle_memory_manager.add_pool(_old_gen_memory_pool);
2894     _stw_memory_manager.add_pool(_young_gen_memory_pool);
2895     _stw_memory_manager.add_pool(_old_gen_memory_pool);
2896   } else {
2897     _memory_pool = new ShenandoahMemoryPool(this);
2898     _cycle_memory_manager.add_pool(_memory_pool);
2899     _stw_memory_manager.add_pool(_memory_pool);
2900   }
2901 }
2902 
2903 GrowableArray<GCMemoryManager*> ShenandoahHeap::memory_managers() {
2904   GrowableArray<GCMemoryManager*> memory_managers(2);
2905   memory_managers.append(&_cycle_memory_manager);
2906   memory_managers.append(&_stw_memory_manager);
2907   return memory_managers;
2908 }
2909 
2910 GrowableArray<MemoryPool*> ShenandoahHeap::memory_pools() {
2911   GrowableArray<MemoryPool*> memory_pools(1);
2912   if (mode()->is_generational()) {
2913     memory_pools.append(_young_gen_memory_pool);
2914     memory_pools.append(_old_gen_memory_pool);
2915   } else {
2916     memory_pools.append(_memory_pool);
2917   }
2918   return memory_pools;
2919 }
2920 
2921 MemoryUsage ShenandoahHeap::memory_usage() {
2922   return MemoryUsage(_initial_size, used(), committed(), max_capacity());
2923 }
2924 
2925 ShenandoahRegionIterator::ShenandoahRegionIterator() :
2926   _heap(ShenandoahHeap::heap()),
2927   _index(0) {}
2928 
2929 ShenandoahRegionIterator::ShenandoahRegionIterator(ShenandoahHeap* heap) :
2930   _heap(heap),
2931   _index(0) {}
2932 
2933 void ShenandoahRegionIterator::reset() {
2934   _index = 0;
2935 }
2936 
2937 bool ShenandoahRegionIterator::has_next() const {
2938   return _index < _heap->num_regions();
2939 }
2940 
2941 char ShenandoahHeap::gc_state() const {
2942   return _gc_state.raw_value();
2943 }
2944 
2945 ShenandoahLiveData* ShenandoahHeap::get_liveness_cache(uint worker_id) {
2946 #ifdef ASSERT
2947   assert(_liveness_cache != NULL, "sanity");
2948   assert(worker_id < _max_workers, "sanity");
2949   for (uint i = 0; i < num_regions(); i++) {
2950     assert(_liveness_cache[worker_id][i] == 0, "liveness cache should be empty");
2951   }
2952 #endif
2953   return _liveness_cache[worker_id];
2954 }
2955 
2956 void ShenandoahHeap::flush_liveness_cache(uint worker_id) {
2957   assert(worker_id < _max_workers, "sanity");
2958   assert(_liveness_cache != NULL, "sanity");
2959   ShenandoahLiveData* ld = _liveness_cache[worker_id];
2960 
2961   for (uint i = 0; i < num_regions(); i++) {
2962     ShenandoahLiveData live = ld[i];
2963     if (live > 0) {
2964       ShenandoahHeapRegion* r = get_region(i);
2965       r->increase_live_data_gc_words(live);
2966       ld[i] = 0;
2967     }
2968   }
2969 }
2970 
2971 bool ShenandoahHeap::requires_barriers(stackChunkOop obj) const {
2972   if (is_idle()) return false;
2973 
2974   // Objects allocated after marking start are implicitly alive, don't need any barriers during
2975   // marking phase.
2976   if (is_concurrent_mark_in_progress() &&
2977      !marking_context()->allocated_after_mark_start(obj)) {
2978     return true;
2979   }
2980 
2981   // Can not guarantee obj is deeply good.
2982   if (has_forwarded_objects()) {
2983     return true;
2984   }
2985 
2986   return false;
2987 }
2988 
2989 void ShenandoahHeap::transfer_old_pointers_from_satb() {
2990   _old_generation->transfer_pointers_from_satb();
2991 }
2992 
2993 template<>
2994 void ShenandoahGenerationRegionClosure<YOUNG>::heap_region_do(ShenandoahHeapRegion* region) {
2995   // Visit young and free regions
2996   if (region->affiliation() != OLD_GENERATION) {
2997     _cl->heap_region_do(region);
2998   }
2999 }
3000 
3001 template<>
3002 void ShenandoahGenerationRegionClosure<OLD>::heap_region_do(ShenandoahHeapRegion* region) {
3003   // Visit old and free regions
3004   if (region->affiliation() != YOUNG_GENERATION) {
3005     _cl->heap_region_do(region);
3006   }
3007 }
3008 
3009 template<>
3010 void ShenandoahGenerationRegionClosure<GLOBAL>::heap_region_do(ShenandoahHeapRegion* region) {
3011   _cl->heap_region_do(region);
3012 }
3013 
3014 // Assure that the remember set has a dirty card everywhere there is an interesting pointer.
3015 // This examines the read_card_table between bottom() and top() since all PLABS are retired
3016 // before the safepoint for init_mark.  Actually, we retire them before update-references and don't
3017 // restore them until the start of evacuation.
3018 void ShenandoahHeap::verify_rem_set_at_mark() {
3019   shenandoah_assert_safepoint();
3020   assert(mode()->is_generational(), "Only verify remembered set for generational operational modes");
3021 
3022   ShenandoahRegionIterator iterator;
3023   RememberedScanner* scanner = card_scan();
3024   ShenandoahVerifyRemSetClosure check_interesting_pointers(true);
3025   ShenandoahMarkingContext* ctx;
3026 
3027   log_debug(gc)("Verifying remembered set at %s mark", doing_mixed_evacuations()? "mixed": "young");
3028 
3029   if (is_old_bitmap_stable() || active_generation()->generation_mode() == GLOBAL) {
3030     ctx = complete_marking_context();
3031   } else {
3032     ctx = nullptr;
3033   }
3034 
3035   while (iterator.has_next()) {
3036     ShenandoahHeapRegion* r = iterator.next();
3037     if (r == nullptr)
3038       break;
3039     if (r->is_old() && r->is_active()) {
3040       HeapWord* obj_addr = r->bottom();
3041       if (r->is_humongous_start()) {
3042         oop obj = cast_to_oop(obj_addr);
3043         if (!ctx || ctx->is_marked(obj)) {
3044           // For humongous objects, the typical object is an array, so the following checks may be overkill
3045           // For regular objects (not object arrays), if the card holding the start of the object is dirty,
3046           // we do not need to verify that cards spanning interesting pointers within this object are dirty.
3047           if (!scanner->is_card_dirty(obj_addr) || obj->is_objArray()) {
3048             obj->oop_iterate(&check_interesting_pointers);
3049           }
3050           // else, object's start is marked dirty and obj is not an objArray, so any interesting pointers are covered
3051         }
3052         // else, this humongous object is not marked so no need to verify its internal pointers
3053         if (!scanner->verify_registration(obj_addr, ctx)) {
3054           ShenandoahAsserts::print_failure(ShenandoahAsserts::_safe_all, obj, obj_addr, NULL,
3055                                           "Verify init-mark remembered set violation", "object not properly registered", __FILE__, __LINE__);
3056         }
3057       } else if (!r->is_humongous()) {
3058         HeapWord* top = r->top();
3059         while (obj_addr < top) {
3060           oop obj = cast_to_oop(obj_addr);
3061           // ctx->is_marked() returns true if mark bit set (TAMS not relevant during init mark)
3062           if (!ctx || ctx->is_marked(obj)) {
3063             // For regular objects (not object arrays), if the card holding the start of the object is dirty,
3064             // we do not need to verify that cards spanning interesting pointers within this object are dirty.
3065             if (!scanner->is_card_dirty(obj_addr) || obj->is_objArray()) {
3066               obj->oop_iterate(&check_interesting_pointers);
3067             }
3068             // else, object's start is marked dirty and obj is not an objArray, so any interesting pointers are covered
3069             if (!scanner->verify_registration(obj_addr, ctx)) {
3070               ShenandoahAsserts::print_failure(ShenandoahAsserts::_safe_all, obj, obj_addr, NULL,
3071                                             "Verify init-mark remembered set violation", "object not properly registered", __FILE__, __LINE__);
3072             }
3073             obj_addr += obj->size();
3074           } else {
3075             // This object is not live so we don't verify dirty cards contained therein
3076             assert(ctx->top_at_mark_start(r) == top, "Expect tams == top at start of mark.");
3077             obj_addr = ctx->get_next_marked_addr(obj_addr, top);
3078           }
3079         }
3080       } // else, we ignore humongous continuation region
3081     } // else, this is not an OLD region so we ignore it
3082   } // all regions have been processed
3083 }
3084 
3085 void ShenandoahHeap::help_verify_region_rem_set(ShenandoahHeapRegion* r, ShenandoahMarkingContext* ctx, HeapWord* from,
3086                                                 HeapWord* top, HeapWord* registration_watermark, const char* message) {
3087   RememberedScanner* scanner = card_scan();
3088   ShenandoahVerifyRemSetClosure check_interesting_pointers(false);
3089 
3090   HeapWord* obj_addr = from;
3091   if (r->is_humongous_start()) {
3092     oop obj = cast_to_oop(obj_addr);
3093     if (!ctx || ctx->is_marked(obj)) {
3094       size_t card_index = scanner->card_index_for_addr(obj_addr);
3095       // For humongous objects, the typical object is an array, so the following checks may be overkill
3096       // For regular objects (not object arrays), if the card holding the start of the object is dirty,
3097       // we do not need to verify that cards spanning interesting pointers within this object are dirty.
3098       if (!scanner->is_write_card_dirty(card_index) || obj->is_objArray()) {
3099         obj->oop_iterate(&check_interesting_pointers);
3100       }
3101       // else, object's start is marked dirty and obj is not an objArray, so any interesting pointers are covered
3102     }
3103     // else, this humongous object is not live so no need to verify its internal pointers
3104 
3105     if ((obj_addr < registration_watermark) && !scanner->verify_registration(obj_addr, ctx)) {
3106       ShenandoahAsserts::print_failure(ShenandoahAsserts::_safe_all, obj, obj_addr, NULL, message,
3107                                        "object not properly registered", __FILE__, __LINE__);
3108     }
3109   } else if (!r->is_humongous()) {
3110     while (obj_addr < top) {
3111       oop obj = cast_to_oop(obj_addr);
3112       // ctx->is_marked() returns true if mark bit set or if obj above TAMS.
3113       if (!ctx || ctx->is_marked(obj)) {
3114         size_t card_index = scanner->card_index_for_addr(obj_addr);
3115         // For regular objects (not object arrays), if the card holding the start of the object is dirty,
3116         // we do not need to verify that cards spanning interesting pointers within this object are dirty.
3117         if (!scanner->is_write_card_dirty(card_index) || obj->is_objArray()) {
3118           obj->oop_iterate(&check_interesting_pointers);
3119         }
3120         // else, object's start is marked dirty and obj is not an objArray, so any interesting pointers are covered
3121 
3122         if ((obj_addr < registration_watermark) && !scanner->verify_registration(obj_addr, ctx)) {
3123           ShenandoahAsserts::print_failure(ShenandoahAsserts::_safe_all, obj, obj_addr, NULL, message,
3124                                            "object not properly registered", __FILE__, __LINE__);
3125         }
3126         obj_addr += obj->size();
3127       } else {
3128         // This object is not live so we don't verify dirty cards contained therein
3129         HeapWord* tams = ctx->top_at_mark_start(r);
3130         obj_addr = ctx->get_next_marked_addr(obj_addr, tams);
3131       }
3132     }
3133   }
3134 }
3135 
3136 void ShenandoahHeap::verify_rem_set_after_full_gc() {
3137   shenandoah_assert_safepoint();
3138   assert(mode()->is_generational(), "Only verify remembered set for generational operational modes");
3139 
3140   ShenandoahRegionIterator iterator;
3141 
3142   while (iterator.has_next()) {
3143     ShenandoahHeapRegion* r = iterator.next();
3144     if (r == nullptr)
3145       break;
3146     if (r->is_old() && !r->is_cset()) {
3147       help_verify_region_rem_set(r, nullptr, r->bottom(), r->top(), r->top(), "Remembered set violation at end of Full GC");
3148     }
3149   }
3150 }
3151 
3152 // Assure that the remember set has a dirty card everywhere there is an interesting pointer.  Even though
3153 // the update-references scan of remembered set only examines cards up to update_watermark, the remembered
3154 // set should be valid through top.  This examines the write_card_table between bottom() and top() because
3155 // all PLABS are retired immediately before the start of update refs.
3156 void ShenandoahHeap::verify_rem_set_at_update_ref() {
3157   shenandoah_assert_safepoint();
3158   assert(mode()->is_generational(), "Only verify remembered set for generational operational modes");
3159 
3160   ShenandoahRegionIterator iterator;
3161   ShenandoahMarkingContext* ctx;
3162 
3163   if (is_old_bitmap_stable() || active_generation()->generation_mode() == GLOBAL) {
3164     ctx = complete_marking_context();
3165   } else {
3166     ctx = nullptr;
3167   }
3168 
3169   while (iterator.has_next()) {
3170     ShenandoahHeapRegion* r = iterator.next();
3171     if (r == nullptr)
3172       break;
3173     if (r->is_old() && !r->is_cset()) {
3174       help_verify_region_rem_set(r, ctx, r->bottom(), r->top(), r->get_update_watermark(),
3175                                  "Remembered set violation at init-update-references");
3176     }
3177   }
3178 }
3179 
3180 ShenandoahGeneration* ShenandoahHeap::generation_for(ShenandoahRegionAffiliation affiliation) const {
3181   if (!mode()->is_generational()) {
3182     return global_generation();
3183   } else if (affiliation == YOUNG_GENERATION) {
3184     return young_generation();
3185   } else if (affiliation == OLD_GENERATION) {
3186     return old_generation();
3187   }
3188 
3189   ShouldNotReachHere();
3190   return nullptr;
3191 }