1 /*
   2  * Copyright (c) 2023, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2013, 2022, Red Hat, Inc. All rights reserved.
   4  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
   5  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   6  *
   7  * This code is free software; you can redistribute it and/or modify it
   8  * under the terms of the GNU General Public License version 2 only, as
   9  * published by the Free Software Foundation.
  10  *
  11  * This code is distributed in the hope that it will be useful, but WITHOUT
  12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  14  * version 2 for more details (a copy is included in the LICENSE file that
  15  * accompanied this code).
  16  *
  17  * You should have received a copy of the GNU General Public License version
  18  * 2 along with this work; if not, write to the Free Software Foundation,
  19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20  *
  21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22  * or visit www.oracle.com if you need additional information or have any
  23  * questions.
  24  *
  25  */
  26 
  27 
  28 #include "cds/archiveHeapWriter.hpp"
  29 #include "classfile/systemDictionary.hpp"
  30 
  31 #include "gc/shared/classUnloadingContext.hpp"
  32 #include "gc/shared/fullGCForwarding.hpp"
  33 #include "gc/shared/gcArguments.hpp"
  34 #include "gc/shared/gcTimer.hpp"
  35 #include "gc/shared/gcTraceTime.inline.hpp"
  36 #include "gc/shared/locationPrinter.inline.hpp"
  37 #include "gc/shared/memAllocator.hpp"
  38 #include "gc/shared/plab.hpp"
  39 #include "gc/shared/tlab_globals.hpp"
  40 
  41 #include "gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp"
  42 #include "gc/shenandoah/heuristics/shenandoahYoungHeuristics.hpp"
  43 #include "gc/shenandoah/shenandoahAllocRequest.hpp"
  44 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  45 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
  46 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  47 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  48 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
  49 #include "gc/shenandoah/shenandoahControlThread.hpp"
  50 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
  51 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  52 #include "gc/shenandoah/shenandoahGenerationalEvacuationTask.hpp"
  53 #include "gc/shenandoah/shenandoahGenerationalHeap.hpp"
  54 #include "gc/shenandoah/shenandoahGlobalGeneration.hpp"
  55 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  56 #include "gc/shenandoah/shenandoahHeapRegionClosures.hpp"
  57 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
  58 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  59 #include "gc/shenandoah/shenandoahInitLogger.hpp"
  60 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  61 #include "gc/shenandoah/shenandoahMemoryPool.hpp"
  62 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  63 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
  64 #include "gc/shenandoah/shenandoahPacer.inline.hpp"
  65 #include "gc/shenandoah/shenandoahPadding.hpp"
  66 #include "gc/shenandoah/shenandoahParallelCleaning.inline.hpp"
  67 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  68 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
  69 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
  70 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
  71 #include "gc/shenandoah/shenandoahSTWMark.hpp"
  72 #include "gc/shenandoah/shenandoahUncommitThread.hpp"
  73 #include "gc/shenandoah/shenandoahUtils.hpp"
  74 #include "gc/shenandoah/shenandoahVerifier.hpp"
  75 #include "gc/shenandoah/shenandoahVMOperations.hpp"
  76 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
  77 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  78 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
  79 #include "gc/shenandoah/mode/shenandoahGenerationalMode.hpp"
  80 #include "gc/shenandoah/mode/shenandoahPassiveMode.hpp"
  81 #include "gc/shenandoah/mode/shenandoahSATBMode.hpp"
  82 
  83 #if INCLUDE_JFR
  84 #include "gc/shenandoah/shenandoahJfrSupport.hpp"
  85 #endif
  86 
  87 #include "memory/allocation.hpp"
  88 #include "memory/allocation.hpp"
  89 #include "memory/classLoaderMetaspace.hpp"
  90 #include "memory/memoryReserver.hpp"
  91 #include "memory/metaspaceUtils.hpp"
  92 #include "memory/universe.hpp"
  93 #include "nmt/mallocTracker.hpp"
  94 #include "nmt/memTracker.hpp"
  95 #include "oops/compressedOops.inline.hpp"
  96 #include "prims/jvmtiTagMap.hpp"
  97 #include "runtime/atomic.hpp"
  98 #include "runtime/globals.hpp"
  99 #include "runtime/interfaceSupport.inline.hpp"
 100 #include "runtime/java.hpp"
 101 #include "runtime/orderAccess.hpp"
 102 #include "runtime/safepointMechanism.hpp"
 103 #include "runtime/stackWatermarkSet.hpp"
 104 #include "runtime/threads.hpp"
 105 #include "runtime/vmThread.hpp"
 106 #include "utilities/globalDefinitions.hpp"
 107 #include "utilities/events.hpp"
 108 #include "utilities/powerOfTwo.hpp"
 109 
 110 class ShenandoahPretouchHeapTask : public WorkerTask {
 111 private:
 112   ShenandoahRegionIterator _regions;
 113   const size_t _page_size;
 114 public:
 115   ShenandoahPretouchHeapTask(size_t page_size) :
 116     WorkerTask("Shenandoah Pretouch Heap"),
 117     _page_size(page_size) {}
 118 
 119   virtual void work(uint worker_id) {
 120     ShenandoahHeapRegion* r = _regions.next();
 121     while (r != nullptr) {
 122       if (r->is_committed()) {
 123         os::pretouch_memory(r->bottom(), r->end(), _page_size);
 124       }
 125       r = _regions.next();
 126     }
 127   }
 128 };
 129 
 130 class ShenandoahPretouchBitmapTask : public WorkerTask {
 131 private:
 132   ShenandoahRegionIterator _regions;
 133   char* _bitmap_base;
 134   const size_t _bitmap_size;
 135   const size_t _page_size;
 136 public:
 137   ShenandoahPretouchBitmapTask(char* bitmap_base, size_t bitmap_size, size_t page_size) :
 138     WorkerTask("Shenandoah Pretouch Bitmap"),
 139     _bitmap_base(bitmap_base),
 140     _bitmap_size(bitmap_size),
 141     _page_size(page_size) {}
 142 
 143   virtual void work(uint worker_id) {
 144     ShenandoahHeapRegion* r = _regions.next();
 145     while (r != nullptr) {
 146       size_t start = r->index()       * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
 147       size_t end   = (r->index() + 1) * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
 148       assert (end <= _bitmap_size, "end is sane: %zu < %zu", end, _bitmap_size);
 149 
 150       if (r->is_committed()) {
 151         os::pretouch_memory(_bitmap_base + start, _bitmap_base + end, _page_size);
 152       }
 153 
 154       r = _regions.next();
 155     }
 156   }
 157 };
 158 
 159 static ReservedSpace reserve(size_t size, size_t preferred_page_size) {
 160   // When a page size is given we don't want to mix large
 161   // and normal pages. If the size is not a multiple of the
 162   // page size it will be aligned up to achieve this.
 163   size_t alignment = os::vm_allocation_granularity();
 164   if (preferred_page_size != os::vm_page_size()) {
 165     alignment = MAX2(preferred_page_size, alignment);
 166     size = align_up(size, alignment);
 167   }
 168 
 169   const ReservedSpace reserved = MemoryReserver::reserve(size, alignment, preferred_page_size);
 170   if (!reserved.is_reserved()) {
 171     vm_exit_during_initialization("Could not reserve space");
 172   }
 173   return reserved;
 174 }
 175 
 176 jint ShenandoahHeap::initialize() {
 177   //
 178   // Figure out heap sizing
 179   //
 180 
 181   size_t init_byte_size = InitialHeapSize;
 182   size_t min_byte_size  = MinHeapSize;
 183   size_t max_byte_size  = MaxHeapSize;
 184   size_t heap_alignment = HeapAlignment;
 185 
 186   size_t reg_size_bytes = ShenandoahHeapRegion::region_size_bytes();
 187 
 188   Universe::check_alignment(max_byte_size,  reg_size_bytes, "Shenandoah heap");
 189   Universe::check_alignment(init_byte_size, reg_size_bytes, "Shenandoah heap");
 190 
 191   _num_regions = ShenandoahHeapRegion::region_count();
 192   assert(_num_regions == (max_byte_size / reg_size_bytes),
 193          "Regions should cover entire heap exactly: %zu != %zu/%zu",
 194          _num_regions, max_byte_size, reg_size_bytes);
 195 
 196   size_t num_committed_regions = init_byte_size / reg_size_bytes;
 197   num_committed_regions = MIN2(num_committed_regions, _num_regions);
 198   assert(num_committed_regions <= _num_regions, "sanity");
 199   _initial_size = num_committed_regions * reg_size_bytes;
 200 
 201   size_t num_min_regions = min_byte_size / reg_size_bytes;
 202   num_min_regions = MIN2(num_min_regions, _num_regions);
 203   assert(num_min_regions <= _num_regions, "sanity");
 204   _minimum_size = num_min_regions * reg_size_bytes;
 205 
 206   // Default to max heap size.
 207   _soft_max_size = _num_regions * reg_size_bytes;
 208 
 209   _committed = _initial_size;
 210 
 211   size_t heap_page_size   = UseLargePages ? os::large_page_size() : os::vm_page_size();
 212   size_t bitmap_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
 213   size_t region_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
 214 
 215   //
 216   // Reserve and commit memory for heap
 217   //
 218 
 219   ReservedHeapSpace heap_rs = Universe::reserve_heap(max_byte_size, heap_alignment);
 220   initialize_reserved_region(heap_rs);
 221   _heap_region = MemRegion((HeapWord*)heap_rs.base(), heap_rs.size() / HeapWordSize);
 222   _heap_region_special = heap_rs.special();
 223 
 224   assert((((size_t) base()) & ShenandoahHeapRegion::region_size_bytes_mask()) == 0,
 225          "Misaligned heap: " PTR_FORMAT, p2i(base()));
 226   os::trace_page_sizes_for_requested_size("Heap",
 227                                           max_byte_size, heap_alignment,
 228                                           heap_rs.base(),
 229                                           heap_rs.size(), heap_rs.page_size());
 230 
 231 #if SHENANDOAH_OPTIMIZED_MARKTASK
 232   // The optimized ShenandoahMarkTask takes some bits away from the full object bits.
 233   // Fail if we ever attempt to address more than we can.
 234   if ((uintptr_t)heap_rs.end() >= ShenandoahMarkTask::max_addressable()) {
 235     FormatBuffer<512> buf("Shenandoah reserved [" PTR_FORMAT ", " PTR_FORMAT") for the heap, \n"
 236                           "but max object address is " PTR_FORMAT ". Try to reduce heap size, or try other \n"
 237                           "VM options that allocate heap at lower addresses (HeapBaseMinAddress, AllocateHeapAt, etc).",
 238                 p2i(heap_rs.base()), p2i(heap_rs.end()), ShenandoahMarkTask::max_addressable());
 239     vm_exit_during_initialization("Fatal Error", buf);
 240   }
 241 #endif
 242 
 243   ReservedSpace sh_rs = heap_rs.first_part(max_byte_size);
 244   if (!_heap_region_special) {
 245     os::commit_memory_or_exit(sh_rs.base(), _initial_size, heap_alignment, false,
 246                               "Cannot commit heap memory");
 247   }
 248 
 249   BarrierSet::set_barrier_set(new ShenandoahBarrierSet(this, _heap_region));
 250 
 251   // Now we know the number of regions and heap sizes, initialize the heuristics.
 252   initialize_heuristics();
 253 
 254   assert(_heap_region.byte_size() == heap_rs.size(), "Need to know reserved size for card table");
 255 
 256   //
 257   // Worker threads must be initialized after the barrier is configured
 258   //
 259   _workers = new ShenandoahWorkerThreads("Shenandoah GC Threads", _max_workers);
 260   if (_workers == nullptr) {
 261     vm_exit_during_initialization("Failed necessary allocation.");
 262   } else {
 263     _workers->initialize_workers();
 264   }
 265 
 266   if (ParallelGCThreads > 1) {
 267     _safepoint_workers = new ShenandoahWorkerThreads("Safepoint Cleanup Thread", ParallelGCThreads);
 268     _safepoint_workers->initialize_workers();
 269   }
 270 
 271   //
 272   // Reserve and commit memory for bitmap(s)
 273   //
 274 
 275   size_t bitmap_size_orig = ShenandoahMarkBitMap::compute_size(heap_rs.size());
 276   _bitmap_size = align_up(bitmap_size_orig, bitmap_page_size);
 277 
 278   size_t bitmap_bytes_per_region = reg_size_bytes / ShenandoahMarkBitMap::heap_map_factor();
 279 
 280   guarantee(bitmap_bytes_per_region != 0,
 281             "Bitmap bytes per region should not be zero");
 282   guarantee(is_power_of_2(bitmap_bytes_per_region),
 283             "Bitmap bytes per region should be power of two: %zu", bitmap_bytes_per_region);
 284 
 285   if (bitmap_page_size > bitmap_bytes_per_region) {
 286     _bitmap_regions_per_slice = bitmap_page_size / bitmap_bytes_per_region;
 287     _bitmap_bytes_per_slice = bitmap_page_size;
 288   } else {
 289     _bitmap_regions_per_slice = 1;
 290     _bitmap_bytes_per_slice = bitmap_bytes_per_region;
 291   }
 292 
 293   guarantee(_bitmap_regions_per_slice >= 1,
 294             "Should have at least one region per slice: %zu",
 295             _bitmap_regions_per_slice);
 296 
 297   guarantee(((_bitmap_bytes_per_slice) % bitmap_page_size) == 0,
 298             "Bitmap slices should be page-granular: bps = %zu, page size = %zu",
 299             _bitmap_bytes_per_slice, bitmap_page_size);
 300 
 301   ReservedSpace bitmap = reserve(_bitmap_size, bitmap_page_size);
 302   os::trace_page_sizes_for_requested_size("Mark Bitmap",
 303                                           bitmap_size_orig, bitmap_page_size,
 304                                           bitmap.base(),
 305                                           bitmap.size(), bitmap.page_size());
 306   MemTracker::record_virtual_memory_tag(bitmap.base(), mtGC);
 307   _bitmap_region = MemRegion((HeapWord*) bitmap.base(), bitmap.size() / HeapWordSize);
 308   _bitmap_region_special = bitmap.special();
 309 
 310   size_t bitmap_init_commit = _bitmap_bytes_per_slice *
 311     align_up(num_committed_regions, _bitmap_regions_per_slice) / _bitmap_regions_per_slice;
 312   bitmap_init_commit = MIN2(_bitmap_size, bitmap_init_commit);
 313   if (!_bitmap_region_special) {
 314     os::commit_memory_or_exit((char *) _bitmap_region.start(), bitmap_init_commit, bitmap_page_size, false,
 315                               "Cannot commit bitmap memory");
 316   }
 317 
 318   _marking_context = new ShenandoahMarkingContext(_heap_region, _bitmap_region, _num_regions);
 319 
 320   if (ShenandoahVerify) {
 321     ReservedSpace verify_bitmap = reserve(_bitmap_size, bitmap_page_size);
 322     os::trace_page_sizes_for_requested_size("Verify Bitmap",
 323                                             bitmap_size_orig, bitmap_page_size,
 324                                             verify_bitmap.base(),
 325                                             verify_bitmap.size(), verify_bitmap.page_size());
 326     if (!verify_bitmap.special()) {
 327       os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), bitmap_page_size, false,
 328                                 "Cannot commit verification bitmap memory");
 329     }
 330     MemTracker::record_virtual_memory_tag(verify_bitmap.base(), mtGC);
 331     MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize);
 332     _verification_bit_map.initialize(_heap_region, verify_bitmap_region);
 333     _verifier = new ShenandoahVerifier(this, &_verification_bit_map);
 334   }
 335 
 336   // Reserve aux bitmap for use in object_iterate(). We don't commit it here.
 337   size_t aux_bitmap_page_size = bitmap_page_size;
 338 
 339   ReservedSpace aux_bitmap = reserve(_bitmap_size, aux_bitmap_page_size);
 340   os::trace_page_sizes_for_requested_size("Aux Bitmap",
 341                                           bitmap_size_orig, aux_bitmap_page_size,
 342                                           aux_bitmap.base(),
 343                                           aux_bitmap.size(), aux_bitmap.page_size());
 344   MemTracker::record_virtual_memory_tag(aux_bitmap.base(), mtGC);
 345   _aux_bitmap_region = MemRegion((HeapWord*) aux_bitmap.base(), aux_bitmap.size() / HeapWordSize);
 346   _aux_bitmap_region_special = aux_bitmap.special();
 347   _aux_bit_map.initialize(_heap_region, _aux_bitmap_region);
 348 
 349   //
 350   // Create regions and region sets
 351   //
 352   size_t region_align = align_up(sizeof(ShenandoahHeapRegion), SHENANDOAH_CACHE_LINE_SIZE);
 353   size_t region_storage_size_orig = region_align * _num_regions;
 354   size_t region_storage_size = align_up(region_storage_size_orig,
 355                                         MAX2(region_page_size, os::vm_allocation_granularity()));
 356 
 357   ReservedSpace region_storage = reserve(region_storage_size, region_page_size);
 358   os::trace_page_sizes_for_requested_size("Region Storage",
 359                                           region_storage_size_orig, region_page_size,
 360                                           region_storage.base(),
 361                                           region_storage.size(), region_storage.page_size());
 362   MemTracker::record_virtual_memory_tag(region_storage.base(), mtGC);
 363   if (!region_storage.special()) {
 364     os::commit_memory_or_exit(region_storage.base(), region_storage_size, region_page_size, false,
 365                               "Cannot commit region memory");
 366   }
 367 
 368   // Try to fit the collection set bitmap at lower addresses. This optimizes code generation for cset checks.
 369   // Go up until a sensible limit (subject to encoding constraints) and try to reserve the space there.
 370   // If not successful, bite a bullet and allocate at whatever address.
 371   {
 372     const size_t cset_align = MAX2<size_t>(os::vm_page_size(), os::vm_allocation_granularity());
 373     const size_t cset_size = align_up(((size_t) sh_rs.base() + sh_rs.size()) >> ShenandoahHeapRegion::region_size_bytes_shift(), cset_align);
 374     const size_t cset_page_size = os::vm_page_size();
 375 
 376     uintptr_t min = round_up_power_of_2(cset_align);
 377     uintptr_t max = (1u << 30u);
 378     ReservedSpace cset_rs;
 379 
 380     for (uintptr_t addr = min; addr <= max; addr <<= 1u) {
 381       char* req_addr = (char*)addr;
 382       assert(is_aligned(req_addr, cset_align), "Should be aligned");
 383       cset_rs = MemoryReserver::reserve(req_addr, cset_size, cset_align, cset_page_size);
 384       if (cset_rs.is_reserved()) {
 385         assert(cset_rs.base() == req_addr, "Allocated where requested: " PTR_FORMAT ", " PTR_FORMAT, p2i(cset_rs.base()), addr);
 386         _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
 387         break;
 388       }
 389     }
 390 
 391     if (_collection_set == nullptr) {
 392       cset_rs = MemoryReserver::reserve(cset_size, cset_align, os::vm_page_size());
 393       if (!cset_rs.is_reserved()) {
 394         vm_exit_during_initialization("Cannot reserve memory for collection set");
 395       }
 396 
 397       _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
 398     }
 399     os::trace_page_sizes_for_requested_size("Collection Set",
 400                                             cset_size, cset_page_size,
 401                                             cset_rs.base(),
 402                                             cset_rs.size(), cset_rs.page_size());
 403   }
 404 
 405   _regions = NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, _num_regions, mtGC);
 406   _affiliations = NEW_C_HEAP_ARRAY(uint8_t, _num_regions, mtGC);
 407   _free_set = new ShenandoahFreeSet(this, _num_regions);
 408 
 409   {
 410     ShenandoahHeapLocker locker(lock());
 411 
 412     for (size_t i = 0; i < _num_regions; i++) {
 413       HeapWord* start = (HeapWord*)sh_rs.base() + ShenandoahHeapRegion::region_size_words() * i;
 414       bool is_committed = i < num_committed_regions;
 415       void* loc = region_storage.base() + i * region_align;
 416 
 417       ShenandoahHeapRegion* r = new (loc) ShenandoahHeapRegion(start, i, is_committed);
 418       assert(is_aligned(r, SHENANDOAH_CACHE_LINE_SIZE), "Sanity");
 419 
 420       _marking_context->initialize_top_at_mark_start(r);
 421       _regions[i] = r;
 422       assert(!collection_set()->is_in(i), "New region should not be in collection set");
 423 
 424       _affiliations[i] = ShenandoahAffiliation::FREE;
 425     }
 426 
 427     // Initialize to complete
 428     _marking_context->mark_complete();
 429     size_t young_cset_regions, old_cset_regions;
 430 
 431     // We are initializing free set.  We ignore cset region tallies.
 432     size_t first_old, last_old, num_old;
 433     _free_set->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old, last_old, num_old);
 434     _free_set->finish_rebuild(young_cset_regions, old_cset_regions, num_old);
 435   }
 436 
 437   if (AlwaysPreTouch) {
 438     // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads,
 439     // before initialize() below zeroes it with initializing thread. For any given region,
 440     // we touch the region and the corresponding bitmaps from the same thread.
 441     ShenandoahPushWorkerScope scope(workers(), _max_workers, false);
 442 
 443     _pretouch_heap_page_size = heap_page_size;
 444     _pretouch_bitmap_page_size = bitmap_page_size;
 445 
 446     // OS memory managers may want to coalesce back-to-back pages. Make their jobs
 447     // simpler by pre-touching continuous spaces (heap and bitmap) separately.
 448 
 449     ShenandoahPretouchBitmapTask bcl(bitmap.base(), _bitmap_size, _pretouch_bitmap_page_size);
 450     _workers->run_task(&bcl);
 451 
 452     ShenandoahPretouchHeapTask hcl(_pretouch_heap_page_size);
 453     _workers->run_task(&hcl);
 454   }
 455 
 456   //
 457   // Initialize the rest of GC subsystems
 458   //
 459 
 460   _liveness_cache = NEW_C_HEAP_ARRAY(ShenandoahLiveData*, _max_workers, mtGC);
 461   for (uint worker = 0; worker < _max_workers; worker++) {
 462     _liveness_cache[worker] = NEW_C_HEAP_ARRAY(ShenandoahLiveData, _num_regions, mtGC);
 463     Copy::fill_to_bytes(_liveness_cache[worker], _num_regions * sizeof(ShenandoahLiveData));
 464   }
 465 
 466   // There should probably be Shenandoah-specific options for these,
 467   // just as there are G1-specific options.
 468   {
 469     ShenandoahSATBMarkQueueSet& satbqs = ShenandoahBarrierSet::satb_mark_queue_set();
 470     satbqs.set_process_completed_buffers_threshold(20); // G1SATBProcessCompletedThreshold
 471     satbqs.set_buffer_enqueue_threshold_percentage(60); // G1SATBBufferEnqueueingThresholdPercent
 472   }
 473 
 474   _monitoring_support = new ShenandoahMonitoringSupport(this);
 475   _phase_timings = new ShenandoahPhaseTimings(max_workers());
 476   ShenandoahCodeRoots::initialize();
 477 
 478   if (ShenandoahPacing) {
 479     _pacer = new ShenandoahPacer(this);
 480     _pacer->setup_for_idle();
 481   }
 482 
 483   initialize_controller();
 484 
 485   if (ShenandoahUncommit) {
 486     _uncommit_thread = new ShenandoahUncommitThread(this);
 487   }
 488 
 489   print_init_logger();
 490 
 491   FullGCForwarding::initialize(_heap_region);
 492 
 493   return JNI_OK;
 494 }
 495 
 496 void ShenandoahHeap::initialize_controller() {
 497   _control_thread = new ShenandoahControlThread();
 498 }
 499 
 500 void ShenandoahHeap::print_init_logger() const {
 501   ShenandoahInitLogger::print();
 502 }
 503 
 504 void ShenandoahHeap::initialize_mode() {
 505   if (ShenandoahGCMode != nullptr) {
 506     if (strcmp(ShenandoahGCMode, "satb") == 0) {
 507       _gc_mode = new ShenandoahSATBMode();
 508     } else if (strcmp(ShenandoahGCMode, "passive") == 0) {
 509       _gc_mode = new ShenandoahPassiveMode();
 510     } else if (strcmp(ShenandoahGCMode, "generational") == 0) {
 511       _gc_mode = new ShenandoahGenerationalMode();
 512     } else {
 513       vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option");
 514     }
 515   } else {
 516     vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option (null)");
 517   }
 518   _gc_mode->initialize_flags();
 519   if (_gc_mode->is_diagnostic() && !UnlockDiagnosticVMOptions) {
 520     vm_exit_during_initialization(
 521             err_msg("GC mode \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
 522                     _gc_mode->name()));
 523   }
 524   if (_gc_mode->is_experimental() && !UnlockExperimentalVMOptions) {
 525     vm_exit_during_initialization(
 526             err_msg("GC mode \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
 527                     _gc_mode->name()));
 528   }
 529 }
 530 
 531 void ShenandoahHeap::initialize_heuristics() {
 532   _global_generation = new ShenandoahGlobalGeneration(mode()->is_generational(), max_workers(), max_capacity(), max_capacity());
 533   _global_generation->initialize_heuristics(mode());
 534 }
 535 
 536 #ifdef _MSC_VER
 537 #pragma warning( push )
 538 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
 539 #endif
 540 
 541 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
 542   CollectedHeap(),
 543   _gc_generation(nullptr),
 544   _active_generation(nullptr),
 545   _initial_size(0),
 546   _committed(0),
 547   _max_workers(MAX3(ConcGCThreads, ParallelGCThreads, 1U)),
 548   _workers(nullptr),
 549   _safepoint_workers(nullptr),
 550   _heap_region_special(false),
 551   _num_regions(0),
 552   _regions(nullptr),
 553   _affiliations(nullptr),
 554   _gc_state_changed(false),
 555   _gc_no_progress_count(0),
 556   _cancel_requested_time(0),
 557   _update_refs_iterator(this),
 558   _global_generation(nullptr),
 559   _control_thread(nullptr),
 560   _uncommit_thread(nullptr),
 561   _young_generation(nullptr),
 562   _old_generation(nullptr),
 563   _shenandoah_policy(policy),
 564   _gc_mode(nullptr),
 565   _free_set(nullptr),
 566   _pacer(nullptr),
 567   _verifier(nullptr),
 568   _phase_timings(nullptr),
 569   _monitoring_support(nullptr),
 570   _memory_pool(nullptr),
 571   _stw_memory_manager("Shenandoah Pauses"),
 572   _cycle_memory_manager("Shenandoah Cycles"),
 573   _gc_timer(new ConcurrentGCTimer()),
 574   _log_min_obj_alignment_in_bytes(LogMinObjAlignmentInBytes),
 575   _marking_context(nullptr),
 576   _bitmap_size(0),
 577   _bitmap_regions_per_slice(0),
 578   _bitmap_bytes_per_slice(0),
 579   _bitmap_region_special(false),
 580   _aux_bitmap_region_special(false),
 581   _liveness_cache(nullptr),
 582   _collection_set(nullptr)
 583 {
 584   // Initialize GC mode early, many subsequent initialization procedures depend on it
 585   initialize_mode();
 586 }
 587 
 588 #ifdef _MSC_VER
 589 #pragma warning( pop )
 590 #endif
 591 
 592 void ShenandoahHeap::print_on(outputStream* st) const {
 593   st->print_cr("Shenandoah Heap");
 594   st->print_cr(" %zu%s max, %zu%s soft max, %zu%s committed, %zu%s used",
 595                byte_size_in_proper_unit(max_capacity()), proper_unit_for_byte_size(max_capacity()),
 596                byte_size_in_proper_unit(soft_max_capacity()), proper_unit_for_byte_size(soft_max_capacity()),
 597                byte_size_in_proper_unit(committed()),    proper_unit_for_byte_size(committed()),
 598                byte_size_in_proper_unit(used()),         proper_unit_for_byte_size(used()));
 599   st->print_cr(" %zu x %zu %s regions",
 600                num_regions(),
 601                byte_size_in_proper_unit(ShenandoahHeapRegion::region_size_bytes()),
 602                proper_unit_for_byte_size(ShenandoahHeapRegion::region_size_bytes()));
 603 
 604   st->print("Status: ");
 605   if (has_forwarded_objects())                 st->print("has forwarded objects, ");
 606   if (!mode()->is_generational()) {
 607     if (is_concurrent_mark_in_progress())      st->print("marking,");
 608   } else {
 609     if (is_concurrent_old_mark_in_progress())    st->print("old marking, ");
 610     if (is_concurrent_young_mark_in_progress())  st->print("young marking, ");
 611   }
 612   if (is_evacuation_in_progress())             st->print("evacuating, ");
 613   if (is_update_refs_in_progress())            st->print("updating refs, ");
 614   if (is_degenerated_gc_in_progress())         st->print("degenerated gc, ");
 615   if (is_full_gc_in_progress())                st->print("full gc, ");
 616   if (is_full_gc_move_in_progress())           st->print("full gc move, ");
 617   if (is_concurrent_weak_root_in_progress())   st->print("concurrent weak roots, ");
 618   if (is_concurrent_strong_root_in_progress() &&
 619       !is_concurrent_weak_root_in_progress())  st->print("concurrent strong roots, ");
 620 
 621   if (cancelled_gc()) {
 622     st->print("cancelled");
 623   } else {
 624     st->print("not cancelled");
 625   }
 626   st->cr();
 627 
 628   st->print_cr("Reserved region:");
 629   st->print_cr(" - [" PTR_FORMAT ", " PTR_FORMAT ") ",
 630                p2i(reserved_region().start()),
 631                p2i(reserved_region().end()));
 632 
 633   ShenandoahCollectionSet* cset = collection_set();
 634   st->print_cr("Collection set:");
 635   if (cset != nullptr) {
 636     st->print_cr(" - map (vanilla): " PTR_FORMAT, p2i(cset->map_address()));
 637     st->print_cr(" - map (biased):  " PTR_FORMAT, p2i(cset->biased_map_address()));
 638   } else {
 639     st->print_cr(" (null)");
 640   }
 641 
 642   st->cr();
 643   MetaspaceUtils::print_on(st);
 644 
 645   if (Verbose) {
 646     st->cr();
 647     print_heap_regions_on(st);
 648   }
 649 }
 650 
 651 class ShenandoahInitWorkerGCLABClosure : public ThreadClosure {
 652 public:
 653   void do_thread(Thread* thread) {
 654     assert(thread != nullptr, "Sanity");
 655     ShenandoahThreadLocalData::initialize_gclab(thread);
 656   }
 657 };
 658 
 659 void ShenandoahHeap::post_initialize() {
 660   CollectedHeap::post_initialize();
 661 
 662   // Schedule periodic task to report on gc thread CPU utilization
 663   _mmu_tracker.initialize();
 664 
 665   MutexLocker ml(Threads_lock);
 666 
 667   ShenandoahInitWorkerGCLABClosure init_gclabs;
 668   _workers->threads_do(&init_gclabs);
 669 
 670   // gclab can not be initialized early during VM startup, as it can not determinate its max_size.
 671   // Now, we will let WorkerThreads to initialize gclab when new worker is created.
 672   _workers->set_initialize_gclab();
 673 
 674   // Note that the safepoint workers may require gclabs if the threads are used to create a heap dump
 675   // during a concurrent evacuation phase.
 676   if (_safepoint_workers != nullptr) {
 677     _safepoint_workers->threads_do(&init_gclabs);
 678     _safepoint_workers->set_initialize_gclab();
 679   }
 680 
 681   JFR_ONLY(ShenandoahJFRSupport::register_jfr_type_serializers();)
 682 }
 683 
 684 ShenandoahHeuristics* ShenandoahHeap::heuristics() {
 685   return _global_generation->heuristics();
 686 }
 687 
 688 size_t ShenandoahHeap::used() const {
 689   return global_generation()->used();
 690 }
 691 
 692 size_t ShenandoahHeap::committed() const {
 693   return Atomic::load(&_committed);
 694 }
 695 
 696 void ShenandoahHeap::increase_committed(size_t bytes) {
 697   shenandoah_assert_heaplocked_or_safepoint();
 698   _committed += bytes;
 699 }
 700 
 701 void ShenandoahHeap::decrease_committed(size_t bytes) {
 702   shenandoah_assert_heaplocked_or_safepoint();
 703   _committed -= bytes;
 704 }
 705 
 706 // For tracking usage based on allocations, it should be the case that:
 707 // * The sum of regions::used == heap::used
 708 // * The sum of a generation's regions::used == generation::used
 709 // * The sum of a generation's humongous regions::free == generation::humongous_waste
 710 // These invariants are checked by the verifier on GC safepoints.
 711 //
 712 // Additional notes:
 713 // * When a mutator's allocation request causes a region to be retired, the
 714 //   free memory left in that region is considered waste. It does not contribute
 715 //   to the usage, but it _does_ contribute to allocation rate.
 716 // * The bottom of a PLAB must be aligned on card size. In some cases this will
 717 //   require padding in front of the PLAB (a filler object). Because this padding
 718 //   is included in the region's used memory we include the padding in the usage
 719 //   accounting as waste.
 720 // * Mutator allocations are used to compute an allocation rate. They are also
 721 //   sent to the Pacer for those purposes.
 722 // * There are three sources of waste:
 723 //  1. The padding used to align a PLAB on card size
 724 //  2. Region's free is less than minimum TLAB size and is retired
 725 //  3. The unused portion of memory in the last region of a humongous object
 726 void ShenandoahHeap::increase_used(const ShenandoahAllocRequest& req) {
 727   size_t actual_bytes = req.actual_size() * HeapWordSize;
 728   size_t wasted_bytes = req.waste() * HeapWordSize;
 729   ShenandoahGeneration* generation = generation_for(req.affiliation());
 730 
 731   if (req.is_gc_alloc()) {
 732     assert(wasted_bytes == 0 || req.type() == ShenandoahAllocRequest::_alloc_plab, "Only PLABs have waste");
 733     increase_used(generation, actual_bytes + wasted_bytes);
 734   } else {
 735     assert(req.is_mutator_alloc(), "Expected mutator alloc here");
 736     // padding and actual size both count towards allocation counter
 737     generation->increase_allocated(actual_bytes + wasted_bytes);
 738 
 739     // only actual size counts toward usage for mutator allocations
 740     increase_used(generation, actual_bytes);
 741 
 742     // notify pacer of both actual size and waste
 743     notify_mutator_alloc_words(req.actual_size(), req.waste());
 744 
 745     if (wasted_bytes > 0 && ShenandoahHeapRegion::requires_humongous(req.actual_size())) {
 746       increase_humongous_waste(generation,wasted_bytes);
 747     }
 748   }
 749 }
 750 
 751 void ShenandoahHeap::increase_humongous_waste(ShenandoahGeneration* generation, size_t bytes) {
 752   generation->increase_humongous_waste(bytes);
 753   if (!generation->is_global()) {
 754     global_generation()->increase_humongous_waste(bytes);
 755   }
 756 }
 757 
 758 void ShenandoahHeap::decrease_humongous_waste(ShenandoahGeneration* generation, size_t bytes) {
 759   generation->decrease_humongous_waste(bytes);
 760   if (!generation->is_global()) {
 761     global_generation()->decrease_humongous_waste(bytes);
 762   }
 763 }
 764 
 765 void ShenandoahHeap::increase_used(ShenandoahGeneration* generation, size_t bytes) {
 766   generation->increase_used(bytes);
 767   if (!generation->is_global()) {
 768     global_generation()->increase_used(bytes);
 769   }
 770 }
 771 
 772 void ShenandoahHeap::decrease_used(ShenandoahGeneration* generation, size_t bytes) {
 773   generation->decrease_used(bytes);
 774   if (!generation->is_global()) {
 775     global_generation()->decrease_used(bytes);
 776   }
 777 }
 778 
 779 void ShenandoahHeap::notify_mutator_alloc_words(size_t words, size_t waste) {
 780   if (ShenandoahPacing) {
 781     control_thread()->pacing_notify_alloc(words);
 782     if (waste > 0) {
 783       pacer()->claim_for_alloc<true>(waste);
 784     }
 785   }
 786 }
 787 
 788 size_t ShenandoahHeap::capacity() const {
 789   return committed();
 790 }
 791 
 792 size_t ShenandoahHeap::max_capacity() const {
 793   return _num_regions * ShenandoahHeapRegion::region_size_bytes();
 794 }
 795 
 796 size_t ShenandoahHeap::soft_max_capacity() const {
 797   size_t v = Atomic::load(&_soft_max_size);
 798   assert(min_capacity() <= v && v <= max_capacity(),
 799          "Should be in bounds: %zu <= %zu <= %zu",
 800          min_capacity(), v, max_capacity());
 801   return v;
 802 }
 803 
 804 void ShenandoahHeap::set_soft_max_capacity(size_t v) {
 805   assert(min_capacity() <= v && v <= max_capacity(),
 806          "Should be in bounds: %zu <= %zu <= %zu",
 807          min_capacity(), v, max_capacity());
 808   Atomic::store(&_soft_max_size, v);
 809 }
 810 
 811 size_t ShenandoahHeap::min_capacity() const {
 812   return _minimum_size;
 813 }
 814 
 815 size_t ShenandoahHeap::initial_capacity() const {
 816   return _initial_size;
 817 }
 818 
 819 bool ShenandoahHeap::is_in(const void* p) const {
 820   if (is_in_reserved(p)) {
 821     if (is_full_gc_move_in_progress()) {
 822       // Full GC move is running, we do not have a consistent region
 823       // information yet. But we know the pointer is in heap.
 824       return true;
 825     }
 826     // Now check if we point to a live section in active region.
 827     ShenandoahHeapRegion* r = heap_region_containing(p);
 828     return (r->is_active() && p < r->top());
 829   } else {
 830     return false;
 831   }
 832 }
 833 
 834 void ShenandoahHeap::notify_soft_max_changed() {
 835   if (_uncommit_thread != nullptr) {
 836     _uncommit_thread->notify_soft_max_changed();
 837   }
 838 }
 839 
 840 void ShenandoahHeap::notify_explicit_gc_requested() {
 841   if (_uncommit_thread != nullptr) {
 842     _uncommit_thread->notify_explicit_gc_requested();
 843   }
 844 }
 845 
 846 bool ShenandoahHeap::check_soft_max_changed() {
 847   size_t new_soft_max = Atomic::load(&SoftMaxHeapSize);
 848   size_t old_soft_max = soft_max_capacity();
 849   if (new_soft_max != old_soft_max) {
 850     new_soft_max = MAX2(min_capacity(), new_soft_max);
 851     new_soft_max = MIN2(max_capacity(), new_soft_max);
 852     if (new_soft_max != old_soft_max) {
 853       log_info(gc)("Soft Max Heap Size: %zu%s -> %zu%s",
 854                    byte_size_in_proper_unit(old_soft_max), proper_unit_for_byte_size(old_soft_max),
 855                    byte_size_in_proper_unit(new_soft_max), proper_unit_for_byte_size(new_soft_max)
 856       );
 857       set_soft_max_capacity(new_soft_max);
 858       return true;
 859     }
 860   }
 861   return false;
 862 }
 863 
 864 void ShenandoahHeap::notify_heap_changed() {
 865   // Update monitoring counters when we took a new region. This amortizes the
 866   // update costs on slow path.
 867   monitoring_support()->notify_heap_changed();
 868   _heap_changed.try_set();
 869 }
 870 
 871 void ShenandoahHeap::set_forced_counters_update(bool value) {
 872   monitoring_support()->set_forced_counters_update(value);
 873 }
 874 
 875 void ShenandoahHeap::handle_force_counters_update() {
 876   monitoring_support()->handle_force_counters_update();
 877 }
 878 
 879 HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) {
 880   // New object should fit the GCLAB size
 881   size_t min_size = MAX2(size, PLAB::min_size());
 882 
 883   // Figure out size of new GCLAB, looking back at heuristics. Expand aggressively.
 884   size_t new_size = ShenandoahThreadLocalData::gclab_size(thread) * 2;
 885 
 886   new_size = MIN2(new_size, PLAB::max_size());
 887   new_size = MAX2(new_size, PLAB::min_size());
 888 
 889   // Record new heuristic value even if we take any shortcut. This captures
 890   // the case when moderately-sized objects always take a shortcut. At some point,
 891   // heuristics should catch up with them.
 892   log_debug(gc, free)("Set new GCLAB size: %zu", new_size);
 893   ShenandoahThreadLocalData::set_gclab_size(thread, new_size);
 894 
 895   if (new_size < size) {
 896     // New size still does not fit the object. Fall back to shared allocation.
 897     // This avoids retiring perfectly good GCLABs, when we encounter a large object.
 898     log_debug(gc, free)("New gclab size (%zu) is too small for %zu", new_size, size);
 899     return nullptr;
 900   }
 901 
 902   // Retire current GCLAB, and allocate a new one.
 903   PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
 904   gclab->retire();
 905 
 906   size_t actual_size = 0;
 907   HeapWord* gclab_buf = allocate_new_gclab(min_size, new_size, &actual_size);
 908   if (gclab_buf == nullptr) {
 909     return nullptr;
 910   }
 911 
 912   assert (size <= actual_size, "allocation should fit");
 913 
 914   // ...and clear or zap just allocated TLAB, if needed.
 915   if (ZeroTLAB) {
 916     Copy::zero_to_words(gclab_buf, actual_size);
 917   } else if (ZapTLAB) {
 918     // Skip mangling the space corresponding to the object header to
 919     // ensure that the returned space is not considered parsable by
 920     // any concurrent GC thread.
 921     size_t hdr_size = oopDesc::header_size();
 922     Copy::fill_to_words(gclab_buf + hdr_size, actual_size - hdr_size, badHeapWordVal);
 923   }
 924   gclab->set_buf(gclab_buf, actual_size);
 925   return gclab->allocate(size);
 926 }
 927 
 928 // Called from stubs in JIT code or interpreter
 929 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t min_size,
 930                                             size_t requested_size,
 931                                             size_t* actual_size) {
 932   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_tlab(min_size, requested_size);
 933   HeapWord* res = allocate_memory(req);
 934   if (res != nullptr) {
 935     *actual_size = req.actual_size();
 936   } else {
 937     *actual_size = 0;
 938   }
 939   return res;
 940 }
 941 
 942 HeapWord* ShenandoahHeap::allocate_new_gclab(size_t min_size,
 943                                              size_t word_size,
 944                                              size_t* actual_size) {
 945   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_gclab(min_size, word_size);
 946   HeapWord* res = allocate_memory(req);
 947   if (res != nullptr) {
 948     *actual_size = req.actual_size();
 949   } else {
 950     *actual_size = 0;
 951   }
 952   return res;
 953 }
 954 
 955 HeapWord* ShenandoahHeap::allocate_memory(ShenandoahAllocRequest& req) {
 956   intptr_t pacer_epoch = 0;
 957   bool in_new_region = false;
 958   HeapWord* result = nullptr;
 959 
 960   if (req.is_mutator_alloc()) {
 961     if (ShenandoahPacing) {
 962       pacer()->pace_for_alloc(req.size());
 963       pacer_epoch = pacer()->epoch();
 964     }
 965 
 966     if (!ShenandoahAllocFailureALot || !should_inject_alloc_failure()) {
 967       result = allocate_memory_under_lock(req, in_new_region);
 968     }
 969 
 970     // Check that gc overhead is not exceeded.
 971     //
 972     // Shenandoah will grind along for quite a while allocating one
 973     // object at a time using shared (non-tlab) allocations. This check
 974     // is testing that the GC overhead limit has not been exceeded.
 975     // This will notify the collector to start a cycle, but will raise
 976     // an OOME to the mutator if the last Full GCs have not made progress.
 977     // gc_no_progress_count is incremented following each degen or full GC that fails to achieve is_good_progress().
 978     if (result == nullptr && !req.is_lab_alloc() && get_gc_no_progress_count() > ShenandoahNoProgressThreshold) {
 979       control_thread()->handle_alloc_failure(req, false);
 980       req.set_actual_size(0);
 981       return nullptr;
 982     }
 983 
 984     if (result == nullptr) {
 985       // Block until control thread reacted, then retry allocation.
 986       //
 987       // It might happen that one of the threads requesting allocation would unblock
 988       // way later after GC happened, only to fail the second allocation, because
 989       // other threads have already depleted the free storage. In this case, a better
 990       // strategy is to try again, until at least one full GC has completed.
 991       //
 992       // Stop retrying and return nullptr to cause OOMError exception if our allocation failed even after:
 993       //   a) We experienced a GC that had good progress, or
 994       //   b) We experienced at least one Full GC (whether or not it had good progress)
 995 
 996       size_t original_count = shenandoah_policy()->full_gc_count();
 997       while ((result == nullptr) && (original_count == shenandoah_policy()->full_gc_count())) {
 998         control_thread()->handle_alloc_failure(req, true);
 999         result = allocate_memory_under_lock(req, in_new_region);
1000       }
1001       if (result != nullptr) {
1002         // If our allocation request has been satisifed after it initially failed, we count this as good gc progress
1003         notify_gc_progress();
1004       }
1005       if (log_develop_is_enabled(Debug, gc, alloc)) {
1006         ResourceMark rm;
1007         log_debug(gc, alloc)("Thread: %s, Result: " PTR_FORMAT ", Request: %s, Size: %zu"
1008                              ", Original: %zu, Latest: %zu",
1009                              Thread::current()->name(), p2i(result), req.type_string(), req.size(),
1010                              original_count, get_gc_no_progress_count());
1011       }
1012     }
1013   } else {
1014     assert(req.is_gc_alloc(), "Can only accept GC allocs here");
1015     result = allocate_memory_under_lock(req, in_new_region);
1016     // Do not call handle_alloc_failure() here, because we cannot block.
1017     // The allocation failure would be handled by the LRB slowpath with handle_alloc_failure_evac().
1018   }
1019 
1020   if (in_new_region) {
1021     notify_heap_changed();
1022   }
1023 
1024   if (result == nullptr) {
1025     req.set_actual_size(0);
1026   }
1027 
1028   // This is called regardless of the outcome of the allocation to account
1029   // for any waste created by retiring regions with this request.
1030   increase_used(req);
1031 
1032   if (result != nullptr) {
1033     size_t requested = req.size();
1034     size_t actual = req.actual_size();
1035 
1036     assert (req.is_lab_alloc() || (requested == actual),
1037             "Only LAB allocations are elastic: %s, requested = %zu, actual = %zu",
1038             ShenandoahAllocRequest::alloc_type_to_string(req.type()), requested, actual);
1039 
1040     if (req.is_mutator_alloc()) {
1041       // If we requested more than we were granted, give the rest back to pacer.
1042       // This only matters if we are in the same pacing epoch: do not try to unpace
1043       // over the budget for the other phase.
1044       if (ShenandoahPacing && (pacer_epoch > 0) && (requested > actual)) {
1045         pacer()->unpace_for_alloc(pacer_epoch, requested - actual);
1046       }
1047     }
1048   }
1049 
1050   return result;
1051 }
1052 
1053 HeapWord* ShenandoahHeap::allocate_memory_under_lock(ShenandoahAllocRequest& req, bool& in_new_region) {
1054   // If we are dealing with mutator allocation, then we may need to block for safepoint.
1055   // We cannot block for safepoint for GC allocations, because there is a high chance
1056   // we are already running at safepoint or from stack watermark machinery, and we cannot
1057   // block again.
1058   ShenandoahHeapLocker locker(lock(), req.is_mutator_alloc());
1059 
1060   // Make sure the old generation has room for either evacuations or promotions before trying to allocate.
1061   if (req.is_old() && !old_generation()->can_allocate(req)) {
1062     return nullptr;
1063   }
1064 
1065   // If TLAB request size is greater than available, allocate() will attempt to downsize request to fit within available
1066   // memory.
1067   HeapWord* result = _free_set->allocate(req, in_new_region);
1068 
1069   // Record the plab configuration for this result and register the object.
1070   if (result != nullptr && req.is_old()) {
1071     old_generation()->configure_plab_for_current_thread(req);
1072     if (req.type() == ShenandoahAllocRequest::_alloc_shared_gc) {
1073       // Register the newly allocated object while we're holding the global lock since there's no synchronization
1074       // built in to the implementation of register_object().  There are potential races when multiple independent
1075       // threads are allocating objects, some of which might span the same card region.  For example, consider
1076       // a card table's memory region within which three objects are being allocated by three different threads:
1077       //
1078       // objects being "concurrently" allocated:
1079       //    [-----a------][-----b-----][--------------c------------------]
1080       //            [---- card table memory range --------------]
1081       //
1082       // Before any objects are allocated, this card's memory range holds no objects.  Note that allocation of object a
1083       // wants to set the starts-object, first-start, and last-start attributes of the preceding card region.
1084       // Allocation of object b wants to set the starts-object, first-start, and last-start attributes of this card region.
1085       // Allocation of object c also wants to set the starts-object, first-start, and last-start attributes of this
1086       // card region.
1087       //
1088       // The thread allocating b and the thread allocating c can "race" in various ways, resulting in confusion, such as
1089       // last-start representing object b while first-start represents object c.  This is why we need to require all
1090       // register_object() invocations to be "mutually exclusive" with respect to each card's memory range.
1091       old_generation()->card_scan()->register_object(result);
1092     }
1093   }
1094 
1095   return result;
1096 }
1097 
1098 HeapWord* ShenandoahHeap::mem_allocate(size_t size,
1099                                         bool*  gc_overhead_limit_was_exceeded) {
1100   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared(size);
1101   return allocate_memory(req);
1102 }
1103 
1104 MetaWord* ShenandoahHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
1105                                                              size_t size,
1106                                                              Metaspace::MetadataType mdtype) {
1107   MetaWord* result;
1108 
1109   // Inform metaspace OOM to GC heuristics if class unloading is possible.
1110   ShenandoahHeuristics* h = global_generation()->heuristics();
1111   if (h->can_unload_classes()) {
1112     h->record_metaspace_oom();
1113   }
1114 
1115   // Expand and retry allocation
1116   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
1117   if (result != nullptr) {
1118     return result;
1119   }
1120 
1121   // Start full GC
1122   collect(GCCause::_metadata_GC_clear_soft_refs);
1123 
1124   // Retry allocation
1125   result = loader_data->metaspace_non_null()->allocate(size, mdtype);
1126   if (result != nullptr) {
1127     return result;
1128   }
1129 
1130   // Expand and retry allocation
1131   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
1132   if (result != nullptr) {
1133     return result;
1134   }
1135 
1136   // Out of memory
1137   return nullptr;
1138 }
1139 
1140 class ShenandoahConcurrentEvacuateRegionObjectClosure : public ObjectClosure {
1141 private:
1142   ShenandoahHeap* const _heap;
1143   Thread* const _thread;
1144 public:
1145   ShenandoahConcurrentEvacuateRegionObjectClosure(ShenandoahHeap* heap) :
1146     _heap(heap), _thread(Thread::current()) {}
1147 
1148   void do_object(oop p) {
1149     shenandoah_assert_marked(nullptr, p);
1150     if (!p->is_forwarded()) {
1151       _heap->evacuate_object(p, _thread);
1152     }
1153   }
1154 };
1155 
1156 class ShenandoahEvacuationTask : public WorkerTask {
1157 private:
1158   ShenandoahHeap* const _sh;
1159   ShenandoahCollectionSet* const _cs;
1160   bool _concurrent;
1161 public:
1162   ShenandoahEvacuationTask(ShenandoahHeap* sh,
1163                            ShenandoahCollectionSet* cs,
1164                            bool concurrent) :
1165     WorkerTask("Shenandoah Evacuation"),
1166     _sh(sh),
1167     _cs(cs),
1168     _concurrent(concurrent)
1169   {}
1170 
1171   void work(uint worker_id) {
1172     if (_concurrent) {
1173       ShenandoahConcurrentWorkerSession worker_session(worker_id);
1174       ShenandoahSuspendibleThreadSetJoiner stsj;
1175       ShenandoahEvacOOMScope oom_evac_scope;
1176       do_work();
1177     } else {
1178       ShenandoahParallelWorkerSession worker_session(worker_id);
1179       ShenandoahEvacOOMScope oom_evac_scope;
1180       do_work();
1181     }
1182   }
1183 
1184 private:
1185   void do_work() {
1186     ShenandoahConcurrentEvacuateRegionObjectClosure cl(_sh);
1187     ShenandoahHeapRegion* r;
1188     while ((r =_cs->claim_next()) != nullptr) {
1189       assert(r->has_live(), "Region %zu should have been reclaimed early", r->index());
1190       _sh->marked_object_iterate(r, &cl);
1191 
1192       if (ShenandoahPacing) {
1193         _sh->pacer()->report_evac(r->used() >> LogHeapWordSize);
1194       }
1195 
1196       if (_sh->check_cancelled_gc_and_yield(_concurrent)) {
1197         break;
1198       }
1199     }
1200   }
1201 };
1202 
1203 class ShenandoahRetireGCLABClosure : public ThreadClosure {
1204 private:
1205   bool const _resize;
1206 public:
1207   explicit ShenandoahRetireGCLABClosure(bool resize) : _resize(resize) {}
1208   void do_thread(Thread* thread) override {
1209     PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1210     assert(gclab != nullptr, "GCLAB should be initialized for %s", thread->name());
1211     gclab->retire();
1212     if (_resize && ShenandoahThreadLocalData::gclab_size(thread) > 0) {
1213       ShenandoahThreadLocalData::set_gclab_size(thread, 0);
1214     }
1215 
1216     if (ShenandoahHeap::heap()->mode()->is_generational()) {
1217       PLAB* plab = ShenandoahThreadLocalData::plab(thread);
1218       assert(plab != nullptr, "PLAB should be initialized for %s", thread->name());
1219 
1220       // There are two reasons to retire all plabs between old-gen evacuation passes.
1221       //  1. We need to make the plab memory parsable by remembered-set scanning.
1222       //  2. We need to establish a trustworthy UpdateWaterMark value within each old-gen heap region
1223       ShenandoahGenerationalHeap::heap()->retire_plab(plab, thread);
1224       if (_resize && ShenandoahThreadLocalData::plab_size(thread) > 0) {
1225         ShenandoahThreadLocalData::set_plab_size(thread, 0);
1226       }
1227     }
1228   }
1229 };
1230 
1231 class ShenandoahGCStatePropagator : public ThreadClosure {
1232 public:
1233   explicit ShenandoahGCStatePropagator(char gc_state) : _gc_state(gc_state) {}
1234 
1235   void do_thread(Thread* thread) override {
1236     ShenandoahThreadLocalData::set_gc_state(thread, _gc_state);
1237   }
1238 private:
1239   char _gc_state;
1240 };
1241 
1242 class ShenandoahPrepareForUpdateRefs : public HandshakeClosure {
1243 public:
1244   explicit ShenandoahPrepareForUpdateRefs(char gc_state) :
1245     HandshakeClosure("Shenandoah Prepare for Update Refs"),
1246     _retire(ResizeTLAB), _propagator(gc_state) {}
1247 
1248   void do_thread(Thread* thread) override {
1249     _propagator.do_thread(thread);
1250     if (ShenandoahThreadLocalData::gclab(thread) != nullptr) {
1251       _retire.do_thread(thread);
1252     }
1253   }
1254 private:
1255   ShenandoahRetireGCLABClosure _retire;
1256   ShenandoahGCStatePropagator _propagator;
1257 };
1258 
1259 void ShenandoahHeap::evacuate_collection_set(bool concurrent) {
1260   ShenandoahEvacuationTask task(this, _collection_set, concurrent);
1261   workers()->run_task(&task);
1262 }
1263 
1264 void ShenandoahHeap::concurrent_prepare_for_update_refs() {
1265   // It's possible that evacuation succeeded, but we could still be cancelled when we get here.
1266   // A cancellation at this point means the degenerated cycle must resume from update-refs.
1267   set_gc_state_concurrent(EVACUATION, false);
1268   set_gc_state_concurrent(WEAK_ROOTS, false);
1269   set_gc_state_concurrent(UPDATE_REFS, true);
1270 
1271   // This will propagate the gc state and retire gclabs and plabs for threads that require it.
1272   ShenandoahPrepareForUpdateRefs prepare_for_update_refs(_gc_state.raw_value());
1273 
1274   // The handshake won't touch worker threads (or control thread, or VM thread), so do those separately.
1275   Threads::non_java_threads_do(&prepare_for_update_refs);
1276 
1277   // Now retire gclabs and plabs and propagate gc_state for mutator threads
1278   Handshake::execute(&prepare_for_update_refs);
1279 
1280   _update_refs_iterator.reset();
1281 }
1282 
1283 oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) {
1284   assert(thread == Thread::current(), "Expected thread parameter to be current thread.");
1285   if (ShenandoahThreadLocalData::is_oom_during_evac(thread)) {
1286     // This thread went through the OOM during evac protocol. It is safe to return
1287     // the forward pointer. It must not attempt to evacuate any other objects.
1288     return ShenandoahBarrierSet::resolve_forwarded(p);
1289   }
1290 
1291   assert(ShenandoahThreadLocalData::is_evac_allowed(thread), "must be enclosed in oom-evac scope");
1292 
1293   ShenandoahHeapRegion* r = heap_region_containing(p);
1294   assert(!r->is_humongous(), "never evacuate humongous objects");
1295 
1296   ShenandoahAffiliation target_gen = r->affiliation();
1297   return try_evacuate_object(p, thread, r, target_gen);
1298 }
1299 
1300 oop ShenandoahHeap::try_evacuate_object(oop p, Thread* thread, ShenandoahHeapRegion* from_region,
1301                                                ShenandoahAffiliation target_gen) {
1302   assert(target_gen == YOUNG_GENERATION, "Only expect evacuations to young in this mode");
1303   assert(from_region->is_young(), "Only expect evacuations from young in this mode");
1304   bool alloc_from_lab = true;
1305   HeapWord* copy = nullptr;
1306 
1307   markWord mark = p->mark();
1308   if (ShenandoahForwarding::is_forwarded(mark)) {
1309     return ShenandoahForwarding::get_forwardee(p);
1310   }
1311   size_t old_size = ShenandoahForwarding::size(p);
1312   size_t size = p->copy_size(old_size, mark);
1313 
1314 #ifdef ASSERT
1315   if (ShenandoahOOMDuringEvacALot &&
1316       (os::random() & 1) == 0) { // Simulate OOM every ~2nd slow-path call
1317     copy = nullptr;
1318   } else {
1319 #endif
1320     if (UseTLAB) {
1321       copy = allocate_from_gclab(thread, size);
1322     }
1323     if (copy == nullptr) {
1324       // If we failed to allocate in LAB, we'll try a shared allocation.
1325       ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared_gc(size, target_gen);
1326       copy = allocate_memory(req);
1327       alloc_from_lab = false;
1328     }
1329 #ifdef ASSERT
1330   }
1331 #endif
1332 
1333   if (copy == nullptr) {
1334     control_thread()->handle_alloc_failure_evac(size);
1335 
1336     _oom_evac_handler.handle_out_of_memory_during_evacuation();
1337 
1338     return ShenandoahBarrierSet::resolve_forwarded(p);
1339   }
1340 
1341   // Copy the object:
1342   Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(p), copy, old_size);
1343 
1344   // Try to install the new forwarding pointer.
1345   oop copy_val = cast_to_oop(copy);
1346   oop result = ShenandoahForwarding::try_update_forwardee(p, copy_val);
1347   if (result == copy_val) {
1348     // Successfully evacuated. Our copy is now the public one!
1349     copy_val->initialize_hash_if_necessary(p);
1350     ContinuationGCSupport::relativize_stack_chunk(copy_val);
1351     shenandoah_assert_correct(nullptr, copy_val);
1352     return copy_val;
1353   }  else {
1354     // Failed to evacuate. We need to deal with the object that is left behind. Since this
1355     // new allocation is certainly after TAMS, it will be considered live in the next cycle.
1356     // But if it happens to contain references to evacuated regions, those references would
1357     // not get updated for this stale copy during this cycle, and we will crash while scanning
1358     // it the next cycle.
1359     if (alloc_from_lab) {
1360       // For LAB allocations, it is enough to rollback the allocation ptr. Either the next
1361       // object will overwrite this stale copy, or the filler object on LAB retirement will
1362       // do this.
1363       ShenandoahThreadLocalData::gclab(thread)->undo_allocation(copy, size);
1364     } else {
1365       // For non-LAB allocations, we have no way to retract the allocation, and
1366       // have to explicitly overwrite the copy with the filler object. With that overwrite,
1367       // we have to keep the fwdptr initialized and pointing to our (stale) copy.
1368       assert(size >= ShenandoahHeap::min_fill_size(), "previously allocated object known to be larger than min_size");
1369       fill_with_object(copy, size);
1370       shenandoah_assert_correct(nullptr, copy_val);
1371       // For non-LAB allocations, the object has already been registered
1372     }
1373     shenandoah_assert_correct(nullptr, result);
1374     return result;
1375   }
1376 }
1377 
1378 void ShenandoahHeap::trash_cset_regions() {
1379   ShenandoahHeapLocker locker(lock());
1380 
1381   ShenandoahCollectionSet* set = collection_set();
1382   ShenandoahHeapRegion* r;
1383   set->clear_current_index();
1384   while ((r = set->next()) != nullptr) {
1385     r->make_trash();
1386   }
1387   collection_set()->clear();
1388 }
1389 
1390 void ShenandoahHeap::print_heap_regions_on(outputStream* st) const {
1391   st->print_cr("Heap Regions:");
1392   st->print_cr("Region state: EU=empty-uncommitted, EC=empty-committed, R=regular, H=humongous start, HP=pinned humongous start");
1393   st->print_cr("              HC=humongous continuation, CS=collection set, TR=trash, P=pinned, CSP=pinned collection set");
1394   st->print_cr("BTE=bottom/top/end, TAMS=top-at-mark-start");
1395   st->print_cr("UWM=update watermark, U=used");
1396   st->print_cr("T=TLAB allocs, G=GCLAB allocs");
1397   st->print_cr("S=shared allocs, L=live data");
1398   st->print_cr("CP=critical pins");
1399 
1400   for (size_t i = 0; i < num_regions(); i++) {
1401     get_region(i)->print_on(st);
1402   }
1403 }
1404 
1405 size_t ShenandoahHeap::trash_humongous_region_at(ShenandoahHeapRegion* start) {
1406   assert(start->is_humongous_start(), "reclaim regions starting with the first one");
1407 
1408   oop humongous_obj = cast_to_oop(start->bottom());
1409   size_t size = humongous_obj->size();
1410   size_t required_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize);
1411   size_t index = start->index() + required_regions - 1;
1412 
1413   assert(!start->has_live(), "liveness must be zero");
1414 
1415   for(size_t i = 0; i < required_regions; i++) {
1416     // Reclaim from tail. Otherwise, assertion fails when printing region to trace log,
1417     // as it expects that every region belongs to a humongous region starting with a humongous start region.
1418     ShenandoahHeapRegion* region = get_region(index --);
1419 
1420     assert(region->is_humongous(), "expect correct humongous start or continuation");
1421     assert(!region->is_cset(), "Humongous region should not be in collection set");
1422 
1423     region->make_trash_immediate();
1424   }
1425   return required_regions;
1426 }
1427 
1428 class ShenandoahCheckCleanGCLABClosure : public ThreadClosure {
1429 public:
1430   ShenandoahCheckCleanGCLABClosure() {}
1431   void do_thread(Thread* thread) {
1432     PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1433     assert(gclab != nullptr, "GCLAB should be initialized for %s", thread->name());
1434     assert(gclab->words_remaining() == 0, "GCLAB should not need retirement");
1435 
1436     if (ShenandoahHeap::heap()->mode()->is_generational()) {
1437       PLAB* plab = ShenandoahThreadLocalData::plab(thread);
1438       assert(plab != nullptr, "PLAB should be initialized for %s", thread->name());
1439       assert(plab->words_remaining() == 0, "PLAB should not need retirement");
1440     }
1441   }
1442 };
1443 
1444 void ShenandoahHeap::labs_make_parsable() {
1445   assert(UseTLAB, "Only call with UseTLAB");
1446 
1447   ShenandoahRetireGCLABClosure cl(false);
1448 
1449   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1450     ThreadLocalAllocBuffer& tlab = t->tlab();
1451     tlab.make_parsable();
1452     cl.do_thread(t);
1453   }
1454 
1455   workers()->threads_do(&cl);
1456 
1457   if (safepoint_workers() != nullptr) {
1458     safepoint_workers()->threads_do(&cl);
1459   }
1460 }
1461 
1462 void ShenandoahHeap::tlabs_retire(bool resize) {
1463   assert(UseTLAB, "Only call with UseTLAB");
1464   assert(!resize || ResizeTLAB, "Only call for resize when ResizeTLAB is enabled");
1465 
1466   ThreadLocalAllocStats stats;
1467 
1468   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1469     ThreadLocalAllocBuffer& tlab = t->tlab();
1470     tlab.retire(&stats);
1471     if (resize) {
1472       tlab.resize();
1473     }
1474   }
1475 
1476   stats.publish();
1477 
1478 #ifdef ASSERT
1479   ShenandoahCheckCleanGCLABClosure cl;
1480   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1481     cl.do_thread(t);
1482   }
1483   workers()->threads_do(&cl);
1484 #endif
1485 }
1486 
1487 void ShenandoahHeap::gclabs_retire(bool resize) {
1488   assert(UseTLAB, "Only call with UseTLAB");
1489   assert(!resize || ResizeTLAB, "Only call for resize when ResizeTLAB is enabled");
1490 
1491   ShenandoahRetireGCLABClosure cl(resize);
1492   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1493     cl.do_thread(t);
1494   }
1495 
1496   workers()->threads_do(&cl);
1497 
1498   if (safepoint_workers() != nullptr) {
1499     safepoint_workers()->threads_do(&cl);
1500   }
1501 }
1502 
1503 // Returns size in bytes
1504 size_t ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const {
1505   // Return the max allowed size, and let the allocation path
1506   // figure out the safe size for current allocation.
1507   return ShenandoahHeapRegion::max_tlab_size_bytes();
1508 }
1509 
1510 size_t ShenandoahHeap::max_tlab_size() const {
1511   // Returns size in words
1512   return ShenandoahHeapRegion::max_tlab_size_words();
1513 }
1514 
1515 void ShenandoahHeap::collect_as_vm_thread(GCCause::Cause cause) {
1516   // These requests are ignored because we can't easily have Shenandoah jump into
1517   // a synchronous (degenerated or full) cycle while it is in the middle of a concurrent
1518   // cycle. We _could_ cancel the concurrent cycle and then try to run a cycle directly
1519   // on the VM thread, but this would confuse the control thread mightily and doesn't
1520   // seem worth the trouble. Instead, we will have the caller thread run (and wait for) a
1521   // concurrent cycle in the prologue of the heap inspect/dump operation. This is how
1522   // other concurrent collectors in the JVM handle this scenario as well.
1523   assert(Thread::current()->is_VM_thread(), "Should be the VM thread");
1524   guarantee(cause == GCCause::_heap_dump || cause == GCCause::_heap_inspection, "Invalid cause");
1525 }
1526 
1527 void ShenandoahHeap::collect(GCCause::Cause cause) {
1528   control_thread()->request_gc(cause);
1529 }
1530 
1531 void ShenandoahHeap::do_full_collection(bool clear_all_soft_refs) {
1532   //assert(false, "Shouldn't need to do full collections");
1533 }
1534 
1535 HeapWord* ShenandoahHeap::block_start(const void* addr) const {
1536   ShenandoahHeapRegion* r = heap_region_containing(addr);
1537   if (r != nullptr) {
1538     return r->block_start(addr);
1539   }
1540   return nullptr;
1541 }
1542 
1543 bool ShenandoahHeap::block_is_obj(const HeapWord* addr) const {
1544   ShenandoahHeapRegion* r = heap_region_containing(addr);
1545   return r->block_is_obj(addr);
1546 }
1547 
1548 bool ShenandoahHeap::print_location(outputStream* st, void* addr) const {
1549   return BlockLocationPrinter<ShenandoahHeap>::print_location(st, addr);
1550 }
1551 
1552 void ShenandoahHeap::prepare_for_verify() {
1553   if (SafepointSynchronize::is_at_safepoint() && UseTLAB) {
1554     labs_make_parsable();
1555   }
1556 }
1557 
1558 void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const {
1559   if (_shenandoah_policy->is_at_shutdown()) {
1560     return;
1561   }
1562 
1563   if (_control_thread != nullptr) {
1564     tcl->do_thread(_control_thread);
1565   }
1566 
1567   if (_uncommit_thread != nullptr) {
1568     tcl->do_thread(_uncommit_thread);
1569   }
1570 
1571   workers()->threads_do(tcl);
1572   if (_safepoint_workers != nullptr) {
1573     _safepoint_workers->threads_do(tcl);
1574   }
1575 }
1576 
1577 void ShenandoahHeap::print_tracing_info() const {
1578   LogTarget(Info, gc, stats) lt;
1579   if (lt.is_enabled()) {
1580     ResourceMark rm;
1581     LogStream ls(lt);
1582 
1583     phase_timings()->print_global_on(&ls);
1584 
1585     ls.cr();
1586     ls.cr();
1587 
1588     shenandoah_policy()->print_gc_stats(&ls);
1589 
1590     ls.cr();
1591     ls.cr();
1592   }
1593 }
1594 
1595 void ShenandoahHeap::set_gc_generation(ShenandoahGeneration* generation) {
1596   shenandoah_assert_control_or_vm_thread_at_safepoint();
1597   _gc_generation = generation;
1598 }
1599 
1600 // Active generation may only be set by the VM thread at a safepoint.
1601 void ShenandoahHeap::set_active_generation() {
1602   assert(Thread::current()->is_VM_thread(), "Only the VM Thread");
1603   assert(SafepointSynchronize::is_at_safepoint(), "Only at a safepoint!");
1604   assert(_gc_generation != nullptr, "Will set _active_generation to nullptr");
1605   _active_generation = _gc_generation;
1606 }
1607 
1608 void ShenandoahHeap::on_cycle_start(GCCause::Cause cause, ShenandoahGeneration* generation) {
1609   shenandoah_policy()->record_collection_cause(cause);
1610 
1611   const GCCause::Cause current = gc_cause();
1612   assert(current == GCCause::_no_gc, "Over-writing cause: %s, with: %s",
1613          GCCause::to_string(current), GCCause::to_string(cause));
1614   assert(_gc_generation == nullptr, "Over-writing _gc_generation");
1615 
1616   set_gc_cause(cause);
1617   set_gc_generation(generation);
1618 
1619   generation->heuristics()->record_cycle_start();
1620 }
1621 
1622 void ShenandoahHeap::on_cycle_end(ShenandoahGeneration* generation) {
1623   assert(gc_cause() != GCCause::_no_gc, "cause wasn't set");
1624   assert(_gc_generation != nullptr, "_gc_generation wasn't set");
1625 
1626   generation->heuristics()->record_cycle_end();
1627   if (mode()->is_generational() && generation->is_global()) {
1628     // If we just completed a GLOBAL GC, claim credit for completion of young-gen and old-gen GC as well
1629     young_generation()->heuristics()->record_cycle_end();
1630     old_generation()->heuristics()->record_cycle_end();
1631   }
1632 
1633   set_gc_generation(nullptr);
1634   set_gc_cause(GCCause::_no_gc);
1635 }
1636 
1637 void ShenandoahHeap::verify(VerifyOption vo) {
1638   if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
1639     if (ShenandoahVerify) {
1640       verifier()->verify_generic(vo);
1641     } else {
1642       // TODO: Consider allocating verification bitmaps on demand,
1643       // and turn this on unconditionally.
1644     }
1645   }
1646 }
1647 size_t ShenandoahHeap::tlab_capacity(Thread *thr) const {
1648   return _free_set->capacity();
1649 }
1650 
1651 class ObjectIterateScanRootClosure : public BasicOopIterateClosure {
1652 private:
1653   MarkBitMap* _bitmap;
1654   ShenandoahScanObjectStack* _oop_stack;
1655   ShenandoahHeap* const _heap;
1656   ShenandoahMarkingContext* const _marking_context;
1657 
1658   template <class T>
1659   void do_oop_work(T* p) {
1660     T o = RawAccess<>::oop_load(p);
1661     if (!CompressedOops::is_null(o)) {
1662       oop obj = CompressedOops::decode_not_null(o);
1663       if (_heap->is_concurrent_weak_root_in_progress() && !_marking_context->is_marked(obj)) {
1664         // There may be dead oops in weak roots in concurrent root phase, do not touch them.
1665         return;
1666       }
1667       obj = ShenandoahBarrierSet::barrier_set()->load_reference_barrier(obj);
1668 
1669       assert(oopDesc::is_oop(obj), "must be a valid oop");
1670       if (!_bitmap->is_marked(obj)) {
1671         _bitmap->mark(obj);
1672         _oop_stack->push(obj);
1673       }
1674     }
1675   }
1676 public:
1677   ObjectIterateScanRootClosure(MarkBitMap* bitmap, ShenandoahScanObjectStack* oop_stack) :
1678     _bitmap(bitmap), _oop_stack(oop_stack), _heap(ShenandoahHeap::heap()),
1679     _marking_context(_heap->marking_context()) {}
1680   void do_oop(oop* p)       { do_oop_work(p); }
1681   void do_oop(narrowOop* p) { do_oop_work(p); }
1682 };
1683 
1684 /*
1685  * This is public API, used in preparation of object_iterate().
1686  * Since we don't do linear scan of heap in object_iterate() (see comment below), we don't
1687  * need to make the heap parsable. For Shenandoah-internal linear heap scans that we can
1688  * control, we call SH::tlabs_retire, SH::gclabs_retire.
1689  */
1690 void ShenandoahHeap::ensure_parsability(bool retire_tlabs) {
1691   // No-op.
1692 }
1693 
1694 /*
1695  * Iterates objects in the heap. This is public API, used for, e.g., heap dumping.
1696  *
1697  * We cannot safely iterate objects by doing a linear scan at random points in time. Linear
1698  * scanning needs to deal with dead objects, which may have dead Klass* pointers (e.g.
1699  * calling oopDesc::size() would crash) or dangling reference fields (crashes) etc. Linear
1700  * scanning therefore depends on having a valid marking bitmap to support it. However, we only
1701  * have a valid marking bitmap after successful marking. In particular, we *don't* have a valid
1702  * marking bitmap during marking, after aborted marking or during/after cleanup (when we just
1703  * wiped the bitmap in preparation for next marking).
1704  *
1705  * For all those reasons, we implement object iteration as a single marking traversal, reporting
1706  * objects as we mark+traverse through the heap, starting from GC roots. JVMTI IterateThroughHeap
1707  * is allowed to report dead objects, but is not required to do so.
1708  */
1709 void ShenandoahHeap::object_iterate(ObjectClosure* cl) {
1710   // Reset bitmap
1711   if (!prepare_aux_bitmap_for_iteration())
1712     return;
1713 
1714   ShenandoahScanObjectStack oop_stack;
1715   ObjectIterateScanRootClosure oops(&_aux_bit_map, &oop_stack);
1716   // Seed the stack with root scan
1717   scan_roots_for_iteration(&oop_stack, &oops);
1718 
1719   // Work through the oop stack to traverse heap
1720   while (! oop_stack.is_empty()) {
1721     oop obj = oop_stack.pop();
1722     assert(oopDesc::is_oop(obj), "must be a valid oop");
1723     cl->do_object(obj);
1724     obj->oop_iterate(&oops);
1725   }
1726 
1727   assert(oop_stack.is_empty(), "should be empty");
1728   // Reclaim bitmap
1729   reclaim_aux_bitmap_for_iteration();
1730 }
1731 
1732 bool ShenandoahHeap::prepare_aux_bitmap_for_iteration() {
1733   assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1734 
1735   if (!_aux_bitmap_region_special && !os::commit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size(), false)) {
1736     log_warning(gc)("Could not commit native memory for auxiliary marking bitmap for heap iteration");
1737     return false;
1738   }
1739   // Reset bitmap
1740   _aux_bit_map.clear();
1741   return true;
1742 }
1743 
1744 void ShenandoahHeap::scan_roots_for_iteration(ShenandoahScanObjectStack* oop_stack, ObjectIterateScanRootClosure* oops) {
1745   // Process GC roots according to current GC cycle
1746   // This populates the work stack with initial objects
1747   // It is important to relinquish the associated locks before diving
1748   // into heap dumper
1749   uint n_workers = safepoint_workers() != nullptr ? safepoint_workers()->active_workers() : 1;
1750   ShenandoahHeapIterationRootScanner rp(n_workers);
1751   rp.roots_do(oops);
1752 }
1753 
1754 void ShenandoahHeap::reclaim_aux_bitmap_for_iteration() {
1755   if (!_aux_bitmap_region_special && !os::uncommit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size())) {
1756     log_warning(gc)("Could not uncommit native memory for auxiliary marking bitmap for heap iteration");
1757   }
1758 }
1759 
1760 // Closure for parallelly iterate objects
1761 class ShenandoahObjectIterateParScanClosure : public BasicOopIterateClosure {
1762 private:
1763   MarkBitMap* _bitmap;
1764   ShenandoahObjToScanQueue* _queue;
1765   ShenandoahHeap* const _heap;
1766   ShenandoahMarkingContext* const _marking_context;
1767 
1768   template <class T>
1769   void do_oop_work(T* p) {
1770     T o = RawAccess<>::oop_load(p);
1771     if (!CompressedOops::is_null(o)) {
1772       oop obj = CompressedOops::decode_not_null(o);
1773       if (_heap->is_concurrent_weak_root_in_progress() && !_marking_context->is_marked(obj)) {
1774         // There may be dead oops in weak roots in concurrent root phase, do not touch them.
1775         return;
1776       }
1777       obj = ShenandoahBarrierSet::barrier_set()->load_reference_barrier(obj);
1778 
1779       assert(oopDesc::is_oop(obj), "Must be a valid oop");
1780       if (_bitmap->par_mark(obj)) {
1781         _queue->push(ShenandoahMarkTask(obj));
1782       }
1783     }
1784   }
1785 public:
1786   ShenandoahObjectIterateParScanClosure(MarkBitMap* bitmap, ShenandoahObjToScanQueue* q) :
1787     _bitmap(bitmap), _queue(q), _heap(ShenandoahHeap::heap()),
1788     _marking_context(_heap->marking_context()) {}
1789   void do_oop(oop* p)       { do_oop_work(p); }
1790   void do_oop(narrowOop* p) { do_oop_work(p); }
1791 };
1792 
1793 // Object iterator for parallel heap iteraion.
1794 // The root scanning phase happenes in construction as a preparation of
1795 // parallel marking queues.
1796 // Every worker processes it's own marking queue. work-stealing is used
1797 // to balance workload.
1798 class ShenandoahParallelObjectIterator : public ParallelObjectIteratorImpl {
1799 private:
1800   uint                         _num_workers;
1801   bool                         _init_ready;
1802   MarkBitMap*                  _aux_bit_map;
1803   ShenandoahHeap*              _heap;
1804   ShenandoahScanObjectStack    _roots_stack; // global roots stack
1805   ShenandoahObjToScanQueueSet* _task_queues;
1806 public:
1807   ShenandoahParallelObjectIterator(uint num_workers, MarkBitMap* bitmap) :
1808         _num_workers(num_workers),
1809         _init_ready(false),
1810         _aux_bit_map(bitmap),
1811         _heap(ShenandoahHeap::heap()) {
1812     // Initialize bitmap
1813     _init_ready = _heap->prepare_aux_bitmap_for_iteration();
1814     if (!_init_ready) {
1815       return;
1816     }
1817 
1818     ObjectIterateScanRootClosure oops(_aux_bit_map, &_roots_stack);
1819     _heap->scan_roots_for_iteration(&_roots_stack, &oops);
1820 
1821     _init_ready = prepare_worker_queues();
1822   }
1823 
1824   ~ShenandoahParallelObjectIterator() {
1825     // Reclaim bitmap
1826     _heap->reclaim_aux_bitmap_for_iteration();
1827     // Reclaim queue for workers
1828     if (_task_queues!= nullptr) {
1829       for (uint i = 0; i < _num_workers; ++i) {
1830         ShenandoahObjToScanQueue* q = _task_queues->queue(i);
1831         if (q != nullptr) {
1832           delete q;
1833           _task_queues->register_queue(i, nullptr);
1834         }
1835       }
1836       delete _task_queues;
1837       _task_queues = nullptr;
1838     }
1839   }
1840 
1841   virtual void object_iterate(ObjectClosure* cl, uint worker_id) {
1842     if (_init_ready) {
1843       object_iterate_parallel(cl, worker_id, _task_queues);
1844     }
1845   }
1846 
1847 private:
1848   // Divide global root_stack into worker queues
1849   bool prepare_worker_queues() {
1850     _task_queues = new ShenandoahObjToScanQueueSet((int) _num_workers);
1851     // Initialize queues for every workers
1852     for (uint i = 0; i < _num_workers; ++i) {
1853       ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue();
1854       _task_queues->register_queue(i, task_queue);
1855     }
1856     // Divide roots among the workers. Assume that object referencing distribution
1857     // is related with root kind, use round-robin to make every worker have same chance
1858     // to process every kind of roots
1859     size_t roots_num = _roots_stack.size();
1860     if (roots_num == 0) {
1861       // No work to do
1862       return false;
1863     }
1864 
1865     for (uint j = 0; j < roots_num; j++) {
1866       uint stack_id = j % _num_workers;
1867       oop obj = _roots_stack.pop();
1868       _task_queues->queue(stack_id)->push(ShenandoahMarkTask(obj));
1869     }
1870     return true;
1871   }
1872 
1873   void object_iterate_parallel(ObjectClosure* cl,
1874                                uint worker_id,
1875                                ShenandoahObjToScanQueueSet* queue_set) {
1876     assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1877     assert(queue_set != nullptr, "task queue must not be null");
1878 
1879     ShenandoahObjToScanQueue* q = queue_set->queue(worker_id);
1880     assert(q != nullptr, "object iterate queue must not be null");
1881 
1882     ShenandoahMarkTask t;
1883     ShenandoahObjectIterateParScanClosure oops(_aux_bit_map, q);
1884 
1885     // Work through the queue to traverse heap.
1886     // Steal when there is no task in queue.
1887     while (q->pop(t) || queue_set->steal(worker_id, t)) {
1888       oop obj = t.obj();
1889       assert(oopDesc::is_oop(obj), "must be a valid oop");
1890       cl->do_object(obj);
1891       obj->oop_iterate(&oops);
1892     }
1893     assert(q->is_empty(), "should be empty");
1894   }
1895 };
1896 
1897 ParallelObjectIteratorImpl* ShenandoahHeap::parallel_object_iterator(uint workers) {
1898   return new ShenandoahParallelObjectIterator(workers, &_aux_bit_map);
1899 }
1900 
1901 // Keep alive an object that was loaded with AS_NO_KEEPALIVE.
1902 void ShenandoahHeap::keep_alive(oop obj) {
1903   if (is_concurrent_mark_in_progress() && (obj != nullptr)) {
1904     ShenandoahBarrierSet::barrier_set()->enqueue(obj);
1905   }
1906 }
1907 
1908 void ShenandoahHeap::heap_region_iterate(ShenandoahHeapRegionClosure* blk) const {
1909   for (size_t i = 0; i < num_regions(); i++) {
1910     ShenandoahHeapRegion* current = get_region(i);
1911     blk->heap_region_do(current);
1912   }
1913 }
1914 
1915 class ShenandoahParallelHeapRegionTask : public WorkerTask {
1916 private:
1917   ShenandoahHeap* const _heap;
1918   ShenandoahHeapRegionClosure* const _blk;
1919   size_t const _stride;
1920 
1921   shenandoah_padding(0);
1922   volatile size_t _index;
1923   shenandoah_padding(1);
1924 
1925 public:
1926   ShenandoahParallelHeapRegionTask(ShenandoahHeapRegionClosure* blk, size_t stride) :
1927           WorkerTask("Shenandoah Parallel Region Operation"),
1928           _heap(ShenandoahHeap::heap()), _blk(blk), _stride(stride), _index(0) {}
1929 
1930   void work(uint worker_id) {
1931     ShenandoahParallelWorkerSession worker_session(worker_id);
1932     size_t stride = _stride;
1933 
1934     size_t max = _heap->num_regions();
1935     while (Atomic::load(&_index) < max) {
1936       size_t cur = Atomic::fetch_then_add(&_index, stride, memory_order_relaxed);
1937       size_t start = cur;
1938       size_t end = MIN2(cur + stride, max);
1939       if (start >= max) break;
1940 
1941       for (size_t i = cur; i < end; i++) {
1942         ShenandoahHeapRegion* current = _heap->get_region(i);
1943         _blk->heap_region_do(current);
1944       }
1945     }
1946   }
1947 };
1948 
1949 void ShenandoahHeap::parallel_heap_region_iterate(ShenandoahHeapRegionClosure* blk) const {
1950   assert(blk->is_thread_safe(), "Only thread-safe closures here");
1951   const uint active_workers = workers()->active_workers();
1952   const size_t n_regions = num_regions();
1953   size_t stride = ShenandoahParallelRegionStride;
1954   if (stride == 0 && active_workers > 1) {
1955     // Automatically derive the stride to balance the work between threads
1956     // evenly. Do not try to split work if below the reasonable threshold.
1957     constexpr size_t threshold = 4096;
1958     stride = n_regions <= threshold ?
1959             threshold :
1960             (n_regions + active_workers - 1) / active_workers;
1961   }
1962 
1963   if (n_regions > stride && active_workers > 1) {
1964     ShenandoahParallelHeapRegionTask task(blk, stride);
1965     workers()->run_task(&task);
1966   } else {
1967     heap_region_iterate(blk);
1968   }
1969 }
1970 
1971 class ShenandoahRendezvousClosure : public HandshakeClosure {
1972 public:
1973   inline ShenandoahRendezvousClosure(const char* name) : HandshakeClosure(name) {}
1974   inline void do_thread(Thread* thread) {}
1975 };
1976 
1977 void ShenandoahHeap::rendezvous_threads(const char* name) {
1978   ShenandoahRendezvousClosure cl(name);
1979   Handshake::execute(&cl);
1980 }
1981 
1982 void ShenandoahHeap::recycle_trash() {
1983   free_set()->recycle_trash();
1984 }
1985 
1986 void ShenandoahHeap::do_class_unloading() {
1987   _unloader.unload();
1988   if (mode()->is_generational()) {
1989     old_generation()->set_parsable(false);
1990   }
1991 }
1992 
1993 void ShenandoahHeap::stw_weak_refs(bool full_gc) {
1994   // Weak refs processing
1995   ShenandoahPhaseTimings::Phase phase = full_gc ? ShenandoahPhaseTimings::full_gc_weakrefs
1996                                                 : ShenandoahPhaseTimings::degen_gc_weakrefs;
1997   ShenandoahTimingsTracker t(phase);
1998   ShenandoahGCWorkerPhase worker_phase(phase);
1999   shenandoah_assert_generations_reconciled();
2000   gc_generation()->ref_processor()->process_references(phase, workers(), false /* concurrent */);
2001 }
2002 
2003 void ShenandoahHeap::prepare_update_heap_references(bool concurrent) {
2004   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
2005 
2006   // Evacuation is over, no GCLABs are needed anymore. GCLABs are under URWM, so we need to
2007   // make them parsable for update code to work correctly. Plus, we can compute new sizes
2008   // for future GCLABs here.
2009   if (UseTLAB) {
2010     ShenandoahGCPhase phase(concurrent ?
2011                             ShenandoahPhaseTimings::init_update_refs_manage_gclabs :
2012                             ShenandoahPhaseTimings::degen_gc_init_update_refs_manage_gclabs);
2013     gclabs_retire(ResizeTLAB);
2014   }
2015 
2016   _update_refs_iterator.reset();
2017 }
2018 
2019 void ShenandoahHeap::propagate_gc_state_to_all_threads() {
2020   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at Shenandoah safepoint");
2021   if (_gc_state_changed) {
2022     ShenandoahGCStatePropagator propagator(_gc_state.raw_value());
2023     Threads::threads_do(&propagator);
2024     _gc_state_changed = false;
2025   }
2026 }
2027 
2028 void ShenandoahHeap::set_gc_state_at_safepoint(uint mask, bool value) {
2029   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at Shenandoah safepoint");
2030   _gc_state.set_cond(mask, value);
2031   _gc_state_changed = true;
2032 }
2033 
2034 void ShenandoahHeap::set_gc_state_concurrent(uint mask, bool value) {
2035   _gc_state.set_cond(mask, value);
2036 }
2037 
2038 void ShenandoahHeap::set_concurrent_young_mark_in_progress(bool in_progress) {
2039   uint mask;
2040   assert(!has_forwarded_objects(), "Young marking is not concurrent with evacuation");
2041   if (!in_progress && is_concurrent_old_mark_in_progress()) {
2042     assert(mode()->is_generational(), "Only generational GC has old marking");
2043     assert(_gc_state.is_set(MARKING), "concurrent_old_marking_in_progress implies MARKING");
2044     // If old-marking is in progress when we turn off YOUNG_MARKING, leave MARKING (and OLD_MARKING) on
2045     mask = YOUNG_MARKING;
2046   } else {
2047     mask = MARKING | YOUNG_MARKING;
2048   }
2049   set_gc_state_at_safepoint(mask, in_progress);
2050   manage_satb_barrier(in_progress);
2051 }
2052 
2053 void ShenandoahHeap::set_concurrent_old_mark_in_progress(bool in_progress) {
2054 #ifdef ASSERT
2055   // has_forwarded_objects() iff UPDATE_REFS or EVACUATION
2056   bool has_forwarded = has_forwarded_objects();
2057   bool updating_or_evacuating = _gc_state.is_set(UPDATE_REFS | EVACUATION);
2058   bool evacuating = _gc_state.is_set(EVACUATION);
2059   assert ((has_forwarded == updating_or_evacuating) || (evacuating && !has_forwarded && collection_set()->is_empty()),
2060           "Updating or evacuating iff has forwarded objects, or if evacuation phase is promoting in place without forwarding");
2061 #endif
2062   if (!in_progress && is_concurrent_young_mark_in_progress()) {
2063     // If young-marking is in progress when we turn off OLD_MARKING, leave MARKING (and YOUNG_MARKING) on
2064     assert(_gc_state.is_set(MARKING), "concurrent_young_marking_in_progress implies MARKING");
2065     set_gc_state_at_safepoint(OLD_MARKING, in_progress);
2066   } else {
2067     set_gc_state_at_safepoint(MARKING | OLD_MARKING, in_progress);
2068   }
2069   manage_satb_barrier(in_progress);
2070 }
2071 
2072 bool ShenandoahHeap::is_prepare_for_old_mark_in_progress() const {
2073   return old_generation()->is_preparing_for_mark();
2074 }
2075 
2076 void ShenandoahHeap::manage_satb_barrier(bool active) {
2077   if (is_concurrent_mark_in_progress()) {
2078     // Ignore request to deactivate barrier while concurrent mark is in progress.
2079     // Do not attempt to re-activate the barrier if it is already active.
2080     if (active && !ShenandoahBarrierSet::satb_mark_queue_set().is_active()) {
2081       ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(active, !active);
2082     }
2083   } else {
2084     // No concurrent marking is in progress so honor request to deactivate,
2085     // but only if the barrier is already active.
2086     if (!active && ShenandoahBarrierSet::satb_mark_queue_set().is_active()) {
2087       ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(active, !active);
2088     }
2089   }
2090 }
2091 
2092 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
2093   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only call this at safepoint");
2094   set_gc_state_at_safepoint(EVACUATION, in_progress);
2095 }
2096 
2097 void ShenandoahHeap::set_concurrent_strong_root_in_progress(bool in_progress) {
2098   if (in_progress) {
2099     _concurrent_strong_root_in_progress.set();
2100   } else {
2101     _concurrent_strong_root_in_progress.unset();
2102   }
2103 }
2104 
2105 void ShenandoahHeap::set_concurrent_weak_root_in_progress(bool cond) {
2106   set_gc_state_at_safepoint(WEAK_ROOTS, cond);
2107 }
2108 
2109 GCTracer* ShenandoahHeap::tracer() {
2110   return shenandoah_policy()->tracer();
2111 }
2112 
2113 size_t ShenandoahHeap::tlab_used(Thread* thread) const {
2114   return _free_set->used();
2115 }
2116 
2117 bool ShenandoahHeap::try_cancel_gc() {
2118   jbyte prev = _cancelled_gc.cmpxchg(CANCELLED, CANCELLABLE);
2119   return prev == CANCELLABLE;
2120 }
2121 
2122 void ShenandoahHeap::cancel_concurrent_mark() {
2123   if (mode()->is_generational()) {
2124     young_generation()->cancel_marking();
2125     old_generation()->cancel_marking();
2126   }
2127 
2128   global_generation()->cancel_marking();
2129 
2130   ShenandoahBarrierSet::satb_mark_queue_set().abandon_partial_marking();
2131 }
2132 
2133 void ShenandoahHeap::cancel_gc(GCCause::Cause cause) {
2134   if (try_cancel_gc()) {
2135     FormatBuffer<> msg("Cancelling GC: %s", GCCause::to_string(cause));
2136     log_info(gc)("%s", msg.buffer());
2137     Events::log(Thread::current(), "%s", msg.buffer());
2138     _cancel_requested_time = os::elapsedTime();
2139   }
2140 }
2141 
2142 uint ShenandoahHeap::max_workers() {
2143   return _max_workers;
2144 }
2145 
2146 void ShenandoahHeap::stop() {
2147   // The shutdown sequence should be able to terminate when GC is running.
2148 
2149   // Step 0. Notify policy to disable event recording and prevent visiting gc threads during shutdown
2150   _shenandoah_policy->record_shutdown();
2151 
2152   // Step 0a. Stop reporting on gc thread cpu utilization
2153   mmu_tracker()->stop();
2154 
2155   // Step 1. Notify control thread that we are in shutdown.
2156   // Note that we cannot do that with stop(), because stop() is blocking and waits for the actual shutdown.
2157   // Doing stop() here would wait for the normal GC cycle to complete, never falling through to cancel below.
2158   control_thread()->prepare_for_graceful_shutdown();
2159 
2160   // Step 2. Notify GC workers that we are cancelling GC.
2161   cancel_gc(GCCause::_shenandoah_stop_vm);
2162 
2163   // Step 3. Wait until GC worker exits normally.
2164   control_thread()->stop();
2165 
2166   // Stop 4. Shutdown uncommit thread.
2167   if (_uncommit_thread != nullptr) {
2168     _uncommit_thread->stop();
2169   }
2170 }
2171 
2172 void ShenandoahHeap::stw_unload_classes(bool full_gc) {
2173   if (!unload_classes()) return;
2174   ClassUnloadingContext ctx(_workers->active_workers(),
2175                             true /* unregister_nmethods_during_purge */,
2176                             false /* lock_nmethod_free_separately */);
2177 
2178   // Unload classes and purge SystemDictionary.
2179   {
2180     ShenandoahPhaseTimings::Phase phase = full_gc ?
2181                                           ShenandoahPhaseTimings::full_gc_purge_class_unload :
2182                                           ShenandoahPhaseTimings::degen_gc_purge_class_unload;
2183     ShenandoahIsAliveSelector is_alive;
2184     {
2185       CodeCache::UnlinkingScope scope(is_alive.is_alive_closure());
2186       ShenandoahGCPhase gc_phase(phase);
2187       ShenandoahGCWorkerPhase worker_phase(phase);
2188       bool unloading_occurred = SystemDictionary::do_unloading(gc_timer());
2189 
2190       uint num_workers = _workers->active_workers();
2191       ShenandoahClassUnloadingTask unlink_task(phase, num_workers, unloading_occurred);
2192       _workers->run_task(&unlink_task);
2193     }
2194     // Release unloaded nmethods's memory.
2195     ClassUnloadingContext::context()->purge_and_free_nmethods();
2196   }
2197 
2198   {
2199     ShenandoahGCPhase phase(full_gc ?
2200                             ShenandoahPhaseTimings::full_gc_purge_cldg :
2201                             ShenandoahPhaseTimings::degen_gc_purge_cldg);
2202     ClassLoaderDataGraph::purge(true /* at_safepoint */);
2203   }
2204   // Resize and verify metaspace
2205   MetaspaceGC::compute_new_size();
2206   DEBUG_ONLY(MetaspaceUtils::verify();)
2207 }
2208 
2209 // Weak roots are either pre-evacuated (final mark) or updated (final update refs),
2210 // so they should not have forwarded oops.
2211 // However, we do need to "null" dead oops in the roots, if can not be done
2212 // in concurrent cycles.
2213 void ShenandoahHeap::stw_process_weak_roots(bool full_gc) {
2214   uint num_workers = _workers->active_workers();
2215   ShenandoahPhaseTimings::Phase timing_phase = full_gc ?
2216                                                ShenandoahPhaseTimings::full_gc_purge_weak_par :
2217                                                ShenandoahPhaseTimings::degen_gc_purge_weak_par;
2218   ShenandoahGCPhase phase(timing_phase);
2219   ShenandoahGCWorkerPhase worker_phase(timing_phase);
2220   // Cleanup weak roots
2221   if (has_forwarded_objects()) {
2222     ShenandoahForwardedIsAliveClosure is_alive;
2223     ShenandoahNonConcUpdateRefsClosure keep_alive;
2224     ShenandoahParallelWeakRootsCleaningTask<ShenandoahForwardedIsAliveClosure, ShenandoahNonConcUpdateRefsClosure>
2225       cleaning_task(timing_phase, &is_alive, &keep_alive, num_workers);
2226     _workers->run_task(&cleaning_task);
2227   } else {
2228     ShenandoahIsAliveClosure is_alive;
2229 #ifdef ASSERT
2230     ShenandoahAssertNotForwardedClosure verify_cl;
2231     ShenandoahParallelWeakRootsCleaningTask<ShenandoahIsAliveClosure, ShenandoahAssertNotForwardedClosure>
2232       cleaning_task(timing_phase, &is_alive, &verify_cl, num_workers);
2233 #else
2234     ShenandoahParallelWeakRootsCleaningTask<ShenandoahIsAliveClosure, DoNothingClosure>
2235       cleaning_task(timing_phase, &is_alive, &do_nothing_cl, num_workers);
2236 #endif
2237     _workers->run_task(&cleaning_task);
2238   }
2239 }
2240 
2241 void ShenandoahHeap::parallel_cleaning(bool full_gc) {
2242   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2243   assert(is_stw_gc_in_progress(), "Only for Degenerated and Full GC");
2244   ShenandoahGCPhase phase(full_gc ?
2245                           ShenandoahPhaseTimings::full_gc_purge :
2246                           ShenandoahPhaseTimings::degen_gc_purge);
2247   stw_weak_refs(full_gc);
2248   stw_process_weak_roots(full_gc);
2249   stw_unload_classes(full_gc);
2250 }
2251 
2252 void ShenandoahHeap::set_has_forwarded_objects(bool cond) {
2253   set_gc_state_at_safepoint(HAS_FORWARDED, cond);
2254 }
2255 
2256 void ShenandoahHeap::set_unload_classes(bool uc) {
2257   _unload_classes.set_cond(uc);
2258 }
2259 
2260 bool ShenandoahHeap::unload_classes() const {
2261   return _unload_classes.is_set();
2262 }
2263 
2264 address ShenandoahHeap::in_cset_fast_test_addr() {
2265   ShenandoahHeap* heap = ShenandoahHeap::heap();
2266   assert(heap->collection_set() != nullptr, "Sanity");
2267   return (address) heap->collection_set()->biased_map_address();
2268 }
2269 
2270 void ShenandoahHeap::reset_bytes_allocated_since_gc_start() {
2271   if (mode()->is_generational()) {
2272     young_generation()->reset_bytes_allocated_since_gc_start();
2273     old_generation()->reset_bytes_allocated_since_gc_start();
2274   }
2275 
2276   global_generation()->reset_bytes_allocated_since_gc_start();
2277 }
2278 
2279 void ShenandoahHeap::set_degenerated_gc_in_progress(bool in_progress) {
2280   _degenerated_gc_in_progress.set_cond(in_progress);
2281 }
2282 
2283 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) {
2284   _full_gc_in_progress.set_cond(in_progress);
2285 }
2286 
2287 void ShenandoahHeap::set_full_gc_move_in_progress(bool in_progress) {
2288   assert (is_full_gc_in_progress(), "should be");
2289   _full_gc_move_in_progress.set_cond(in_progress);
2290 }
2291 
2292 void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) {
2293   set_gc_state_at_safepoint(UPDATE_REFS, in_progress);
2294 }
2295 
2296 void ShenandoahHeap::register_nmethod(nmethod* nm) {
2297   ShenandoahCodeRoots::register_nmethod(nm);
2298 }
2299 
2300 void ShenandoahHeap::unregister_nmethod(nmethod* nm) {
2301   ShenandoahCodeRoots::unregister_nmethod(nm);
2302 }
2303 
2304 void ShenandoahHeap::pin_object(JavaThread* thr, oop o) {
2305   heap_region_containing(o)->record_pin();
2306 }
2307 
2308 void ShenandoahHeap::unpin_object(JavaThread* thr, oop o) {
2309   ShenandoahHeapRegion* r = heap_region_containing(o);
2310   assert(r != nullptr, "Sanity");
2311   assert(r->pin_count() > 0, "Region %zu should have non-zero pins", r->index());
2312   r->record_unpin();
2313 }
2314 
2315 void ShenandoahHeap::sync_pinned_region_status() {
2316   ShenandoahHeapLocker locker(lock());
2317 
2318   for (size_t i = 0; i < num_regions(); i++) {
2319     ShenandoahHeapRegion *r = get_region(i);
2320     if (r->is_active()) {
2321       if (r->is_pinned()) {
2322         if (r->pin_count() == 0) {
2323           r->make_unpinned();
2324         }
2325       } else {
2326         if (r->pin_count() > 0) {
2327           r->make_pinned();
2328         }
2329       }
2330     }
2331   }
2332 
2333   assert_pinned_region_status();
2334 }
2335 
2336 #ifdef ASSERT
2337 void ShenandoahHeap::assert_pinned_region_status() {
2338   for (size_t i = 0; i < num_regions(); i++) {
2339     ShenandoahHeapRegion* r = get_region(i);
2340     shenandoah_assert_generations_reconciled();
2341     if (gc_generation()->contains(r)) {
2342       assert((r->is_pinned() && r->pin_count() > 0) || (!r->is_pinned() && r->pin_count() == 0),
2343              "Region %zu pinning status is inconsistent", i);
2344     }
2345   }
2346 }
2347 #endif
2348 
2349 ConcurrentGCTimer* ShenandoahHeap::gc_timer() const {
2350   return _gc_timer;
2351 }
2352 
2353 void ShenandoahHeap::prepare_concurrent_roots() {
2354   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2355   assert(!is_stw_gc_in_progress(), "Only concurrent GC");
2356   set_concurrent_strong_root_in_progress(!collection_set()->is_empty());
2357   set_concurrent_weak_root_in_progress(true);
2358   if (unload_classes()) {
2359     _unloader.prepare();
2360   }
2361 }
2362 
2363 void ShenandoahHeap::finish_concurrent_roots() {
2364   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2365   assert(!is_stw_gc_in_progress(), "Only concurrent GC");
2366   if (unload_classes()) {
2367     _unloader.finish();
2368   }
2369 }
2370 
2371 #ifdef ASSERT
2372 void ShenandoahHeap::assert_gc_workers(uint nworkers) {
2373   assert(nworkers > 0 && nworkers <= max_workers(), "Sanity");
2374 
2375   if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
2376     // Use ParallelGCThreads inside safepoints
2377     assert(nworkers == ParallelGCThreads, "Use ParallelGCThreads (%u) within safepoint, not %u",
2378            ParallelGCThreads, nworkers);
2379   } else {
2380     // Use ConcGCThreads outside safepoints
2381     assert(nworkers == ConcGCThreads, "Use ConcGCThreads (%u) outside safepoints, %u",
2382            ConcGCThreads, nworkers);
2383   }
2384 }
2385 #endif
2386 
2387 ShenandoahVerifier* ShenandoahHeap::verifier() {
2388   guarantee(ShenandoahVerify, "Should be enabled");
2389   assert (_verifier != nullptr, "sanity");
2390   return _verifier;
2391 }
2392 
2393 template<bool CONCURRENT>
2394 class ShenandoahUpdateHeapRefsTask : public WorkerTask {
2395 private:
2396   ShenandoahHeap* _heap;
2397   ShenandoahRegionIterator* _regions;
2398 public:
2399   explicit ShenandoahUpdateHeapRefsTask(ShenandoahRegionIterator* regions) :
2400     WorkerTask("Shenandoah Update References"),
2401     _heap(ShenandoahHeap::heap()),
2402     _regions(regions) {
2403   }
2404 
2405   void work(uint worker_id) {
2406     if (CONCURRENT) {
2407       ShenandoahConcurrentWorkerSession worker_session(worker_id);
2408       ShenandoahSuspendibleThreadSetJoiner stsj;
2409       do_work<ShenandoahConcUpdateRefsClosure>(worker_id);
2410     } else {
2411       ShenandoahParallelWorkerSession worker_session(worker_id);
2412       do_work<ShenandoahNonConcUpdateRefsClosure>(worker_id);
2413     }
2414   }
2415 
2416 private:
2417   template<class T>
2418   void do_work(uint worker_id) {
2419     if (CONCURRENT && (worker_id == 0)) {
2420       // We ask the first worker to replenish the Mutator free set by moving regions previously reserved to hold the
2421       // results of evacuation.  These reserves are no longer necessary because evacuation has completed.
2422       size_t cset_regions = _heap->collection_set()->count();
2423 
2424       // Now that evacuation is done, we can reassign any regions that had been reserved to hold the results of evacuation
2425       // to the mutator free set.  At the end of GC, we will have cset_regions newly evacuated fully empty regions from
2426       // which we will be able to replenish the Collector free set and the OldCollector free set in preparation for the
2427       // next GC cycle.
2428       _heap->free_set()->move_regions_from_collector_to_mutator(cset_regions);
2429     }
2430     // If !CONCURRENT, there's no value in expanding Mutator free set
2431     T cl;
2432     ShenandoahHeapRegion* r = _regions->next();
2433     while (r != nullptr) {
2434       HeapWord* update_watermark = r->get_update_watermark();
2435       assert (update_watermark >= r->bottom(), "sanity");
2436       if (r->is_active() && !r->is_cset()) {
2437         _heap->marked_object_oop_iterate(r, &cl, update_watermark);
2438         if (ShenandoahPacing) {
2439           _heap->pacer()->report_update_refs(pointer_delta(update_watermark, r->bottom()));
2440         }
2441       }
2442       if (_heap->check_cancelled_gc_and_yield(CONCURRENT)) {
2443         return;
2444       }
2445       r = _regions->next();
2446     }
2447   }
2448 };
2449 
2450 void ShenandoahHeap::update_heap_references(bool concurrent) {
2451   assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
2452 
2453   if (concurrent) {
2454     ShenandoahUpdateHeapRefsTask<true> task(&_update_refs_iterator);
2455     workers()->run_task(&task);
2456   } else {
2457     ShenandoahUpdateHeapRefsTask<false> task(&_update_refs_iterator);
2458     workers()->run_task(&task);
2459   }
2460 }
2461 
2462 void ShenandoahHeap::update_heap_region_states(bool concurrent) {
2463   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2464   assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
2465 
2466   {
2467     ShenandoahGCPhase phase(concurrent ?
2468                             ShenandoahPhaseTimings::final_update_refs_update_region_states :
2469                             ShenandoahPhaseTimings::degen_gc_final_update_refs_update_region_states);
2470 
2471     final_update_refs_update_region_states();
2472 
2473     assert_pinned_region_status();
2474   }
2475 
2476   {
2477     ShenandoahGCPhase phase(concurrent ?
2478                             ShenandoahPhaseTimings::final_update_refs_trash_cset :
2479                             ShenandoahPhaseTimings::degen_gc_final_update_refs_trash_cset);
2480     trash_cset_regions();
2481   }
2482 }
2483 
2484 void ShenandoahHeap::final_update_refs_update_region_states() {
2485   ShenandoahSynchronizePinnedRegionStates cl;
2486   parallel_heap_region_iterate(&cl);
2487 }
2488 
2489 void ShenandoahHeap::rebuild_free_set(bool concurrent) {
2490   ShenandoahGCPhase phase(concurrent ?
2491                           ShenandoahPhaseTimings::final_update_refs_rebuild_freeset :
2492                           ShenandoahPhaseTimings::degen_gc_final_update_refs_rebuild_freeset);
2493   ShenandoahHeapLocker locker(lock());
2494   size_t young_cset_regions, old_cset_regions;
2495   size_t first_old_region, last_old_region, old_region_count;
2496   _free_set->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old_region, last_old_region, old_region_count);
2497   // If there are no old regions, first_old_region will be greater than last_old_region
2498   assert((first_old_region > last_old_region) ||
2499          ((last_old_region + 1 - first_old_region >= old_region_count) &&
2500           get_region(first_old_region)->is_old() && get_region(last_old_region)->is_old()),
2501          "sanity: old_region_count: %zu, first_old_region: %zu, last_old_region: %zu",
2502          old_region_count, first_old_region, last_old_region);
2503 
2504   if (mode()->is_generational()) {
2505 #ifdef ASSERT
2506     if (ShenandoahVerify) {
2507       verifier()->verify_before_rebuilding_free_set();
2508     }
2509 #endif
2510 
2511     // The computation of bytes_of_allocation_runway_before_gc_trigger is quite conservative so consider all of this
2512     // available for transfer to old. Note that transfer of humongous regions does not impact available.
2513     ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::heap();
2514     size_t allocation_runway = gen_heap->young_generation()->heuristics()->bytes_of_allocation_runway_before_gc_trigger(young_cset_regions);
2515     gen_heap->compute_old_generation_balance(allocation_runway, old_cset_regions);
2516 
2517     // Total old_available may have been expanded to hold anticipated promotions.  We trigger if the fragmented available
2518     // memory represents more than 16 regions worth of data.  Note that fragmentation may increase when we promote regular
2519     // regions in place when many of these regular regions have an abundant amount of available memory within them.  Fragmentation
2520     // will decrease as promote-by-copy consumes the available memory within these partially consumed regions.
2521     //
2522     // We consider old-gen to have excessive fragmentation if more than 12.5% of old-gen is free memory that resides
2523     // within partially consumed regions of memory.
2524   }
2525   // Rebuild free set based on adjusted generation sizes.
2526   _free_set->finish_rebuild(young_cset_regions, old_cset_regions, old_region_count);
2527 
2528   if (mode()->is_generational()) {
2529     ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::heap();
2530     ShenandoahOldGeneration* old_gen = gen_heap->old_generation();
2531     old_gen->heuristics()->evaluate_triggers(first_old_region, last_old_region, old_region_count, num_regions());
2532   }
2533 }
2534 
2535 void ShenandoahHeap::print_extended_on(outputStream *st) const {
2536   print_on(st);
2537   st->cr();
2538   print_heap_regions_on(st);
2539 }
2540 
2541 bool ShenandoahHeap::is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self) {
2542   size_t slice = r->index() / _bitmap_regions_per_slice;
2543 
2544   size_t regions_from = _bitmap_regions_per_slice * slice;
2545   size_t regions_to   = MIN2(num_regions(), _bitmap_regions_per_slice * (slice + 1));
2546   for (size_t g = regions_from; g < regions_to; g++) {
2547     assert (g / _bitmap_regions_per_slice == slice, "same slice");
2548     if (skip_self && g == r->index()) continue;
2549     if (get_region(g)->is_committed()) {
2550       return true;
2551     }
2552   }
2553   return false;
2554 }
2555 
2556 bool ShenandoahHeap::commit_bitmap_slice(ShenandoahHeapRegion* r) {
2557   shenandoah_assert_heaplocked();
2558 
2559   // Bitmaps in special regions do not need commits
2560   if (_bitmap_region_special) {
2561     return true;
2562   }
2563 
2564   if (is_bitmap_slice_committed(r, true)) {
2565     // Some other region from the group is already committed, meaning the bitmap
2566     // slice is already committed, we exit right away.
2567     return true;
2568   }
2569 
2570   // Commit the bitmap slice:
2571   size_t slice = r->index() / _bitmap_regions_per_slice;
2572   size_t off = _bitmap_bytes_per_slice * slice;
2573   size_t len = _bitmap_bytes_per_slice;
2574   char* start = (char*) _bitmap_region.start() + off;
2575 
2576   if (!os::commit_memory(start, len, false)) {
2577     return false;
2578   }
2579 
2580   if (AlwaysPreTouch) {
2581     os::pretouch_memory(start, start + len, _pretouch_bitmap_page_size);
2582   }
2583 
2584   return true;
2585 }
2586 
2587 bool ShenandoahHeap::uncommit_bitmap_slice(ShenandoahHeapRegion *r) {
2588   shenandoah_assert_heaplocked();
2589 
2590   // Bitmaps in special regions do not need uncommits
2591   if (_bitmap_region_special) {
2592     return true;
2593   }
2594 
2595   if (is_bitmap_slice_committed(r, true)) {
2596     // Some other region from the group is still committed, meaning the bitmap
2597     // slice should stay committed, exit right away.
2598     return true;
2599   }
2600 
2601   // Uncommit the bitmap slice:
2602   size_t slice = r->index() / _bitmap_regions_per_slice;
2603   size_t off = _bitmap_bytes_per_slice * slice;
2604   size_t len = _bitmap_bytes_per_slice;
2605   if (!os::uncommit_memory((char*)_bitmap_region.start() + off, len)) {
2606     return false;
2607   }
2608   return true;
2609 }
2610 
2611 void ShenandoahHeap::forbid_uncommit() {
2612   if (_uncommit_thread != nullptr) {
2613     _uncommit_thread->forbid_uncommit();
2614   }
2615 }
2616 
2617 void ShenandoahHeap::allow_uncommit() {
2618   if (_uncommit_thread != nullptr) {
2619     _uncommit_thread->allow_uncommit();
2620   }
2621 }
2622 
2623 #ifdef ASSERT
2624 bool ShenandoahHeap::is_uncommit_in_progress() {
2625   if (_uncommit_thread != nullptr) {
2626     return _uncommit_thread->is_uncommit_in_progress();
2627   }
2628   return false;
2629 }
2630 #endif
2631 
2632 void ShenandoahHeap::safepoint_synchronize_begin() {
2633   StackWatermarkSet::safepoint_synchronize_begin();
2634   SuspendibleThreadSet::synchronize();
2635 }
2636 
2637 void ShenandoahHeap::safepoint_synchronize_end() {
2638   SuspendibleThreadSet::desynchronize();
2639 }
2640 
2641 void ShenandoahHeap::try_inject_alloc_failure() {
2642   if (ShenandoahAllocFailureALot && !cancelled_gc() && ((os::random() % 1000) > 950)) {
2643     _inject_alloc_failure.set();
2644     os::naked_short_sleep(1);
2645     if (cancelled_gc()) {
2646       log_info(gc)("Allocation failure was successfully injected");
2647     }
2648   }
2649 }
2650 
2651 bool ShenandoahHeap::should_inject_alloc_failure() {
2652   return _inject_alloc_failure.is_set() && _inject_alloc_failure.try_unset();
2653 }
2654 
2655 void ShenandoahHeap::initialize_serviceability() {
2656   _memory_pool = new ShenandoahMemoryPool(this);
2657   _cycle_memory_manager.add_pool(_memory_pool);
2658   _stw_memory_manager.add_pool(_memory_pool);
2659 }
2660 
2661 GrowableArray<GCMemoryManager*> ShenandoahHeap::memory_managers() {
2662   GrowableArray<GCMemoryManager*> memory_managers(2);
2663   memory_managers.append(&_cycle_memory_manager);
2664   memory_managers.append(&_stw_memory_manager);
2665   return memory_managers;
2666 }
2667 
2668 GrowableArray<MemoryPool*> ShenandoahHeap::memory_pools() {
2669   GrowableArray<MemoryPool*> memory_pools(1);
2670   memory_pools.append(_memory_pool);
2671   return memory_pools;
2672 }
2673 
2674 MemoryUsage ShenandoahHeap::memory_usage() {
2675   return MemoryUsage(_initial_size, used(), committed(), max_capacity());
2676 }
2677 
2678 ShenandoahRegionIterator::ShenandoahRegionIterator() :
2679   _heap(ShenandoahHeap::heap()),
2680   _index(0) {}
2681 
2682 ShenandoahRegionIterator::ShenandoahRegionIterator(ShenandoahHeap* heap) :
2683   _heap(heap),
2684   _index(0) {}
2685 
2686 void ShenandoahRegionIterator::reset() {
2687   _index = 0;
2688 }
2689 
2690 bool ShenandoahRegionIterator::has_next() const {
2691   return _index < _heap->num_regions();
2692 }
2693 
2694 char ShenandoahHeap::gc_state() const {
2695   return _gc_state.raw_value();
2696 }
2697 
2698 bool ShenandoahHeap::is_gc_state(GCState state) const {
2699   // If the global gc state has been changed, but hasn't yet been propagated to all threads, then
2700   // the global gc state is the correct value. Once the gc state has been synchronized with all threads,
2701   // _gc_state_changed will be toggled to false and we need to use the thread local state.
2702   return _gc_state_changed ? _gc_state.is_set(state) : ShenandoahThreadLocalData::is_gc_state(state);
2703 }
2704 
2705 
2706 ShenandoahLiveData* ShenandoahHeap::get_liveness_cache(uint worker_id) {
2707 #ifdef ASSERT
2708   assert(_liveness_cache != nullptr, "sanity");
2709   assert(worker_id < _max_workers, "sanity");
2710   for (uint i = 0; i < num_regions(); i++) {
2711     assert(_liveness_cache[worker_id][i] == 0, "liveness cache should be empty");
2712   }
2713 #endif
2714   return _liveness_cache[worker_id];
2715 }
2716 
2717 void ShenandoahHeap::flush_liveness_cache(uint worker_id) {
2718   assert(worker_id < _max_workers, "sanity");
2719   assert(_liveness_cache != nullptr, "sanity");
2720   ShenandoahLiveData* ld = _liveness_cache[worker_id];
2721   for (uint i = 0; i < num_regions(); i++) {
2722     ShenandoahLiveData live = ld[i];
2723     if (live > 0) {
2724       ShenandoahHeapRegion* r = get_region(i);
2725       r->increase_live_data_gc_words(live);
2726       ld[i] = 0;
2727     }
2728   }
2729 }
2730 
2731 bool ShenandoahHeap::requires_barriers(stackChunkOop obj) const {
2732   if (is_idle()) return false;
2733 
2734   // Objects allocated after marking start are implicitly alive, don't need any barriers during
2735   // marking phase.
2736   if (is_concurrent_mark_in_progress() &&
2737      !marking_context()->allocated_after_mark_start(obj)) {
2738     return true;
2739   }
2740 
2741   // Can not guarantee obj is deeply good.
2742   if (has_forwarded_objects()) {
2743     return true;
2744   }
2745 
2746   return false;
2747 }
2748 
2749 HeapWord* ShenandoahHeap::allocate_loaded_archive_space(size_t size) {
2750 #if INCLUDE_CDS_JAVA_HEAP
2751   // CDS wants a continuous memory range to load a bunch of objects.
2752   // This effectively bypasses normal allocation paths, and requires
2753   // a bit of massaging to unbreak GC invariants.
2754 
2755   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared(size);
2756 
2757   // Easy case: a single regular region, no further adjustments needed.
2758   if (!ShenandoahHeapRegion::requires_humongous(size)) {
2759     return allocate_memory(req);
2760   }
2761 
2762   // Hard case: the requested size would cause a humongous allocation.
2763   // We need to make sure it looks like regular allocation to the rest of GC.
2764 
2765   // CDS code would guarantee no objects straddle multiple regions, as long as
2766   // regions are as large as MIN_GC_REGION_ALIGNMENT. It is impractical at this
2767   // point to deal with case when Shenandoah runs with smaller regions.
2768   // TODO: This check can be dropped once MIN_GC_REGION_ALIGNMENT agrees more with Shenandoah.
2769   if (ShenandoahHeapRegion::region_size_bytes() < ArchiveHeapWriter::MIN_GC_REGION_ALIGNMENT) {
2770     return nullptr;
2771   }
2772 
2773   HeapWord* mem = allocate_memory(req);
2774   size_t start_idx = heap_region_index_containing(mem);
2775   size_t num_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize);
2776 
2777   // Flip humongous -> regular.
2778   {
2779     ShenandoahHeapLocker locker(lock(), false);
2780     for (size_t c = start_idx; c < start_idx + num_regions; c++) {
2781       get_region(c)->make_regular_bypass();
2782     }
2783   }
2784 
2785   return mem;
2786 #else
2787   assert(false, "Archive heap loader should not be available, should not be here");
2788   return nullptr;
2789 #endif // INCLUDE_CDS_JAVA_HEAP
2790 }
2791 
2792 void ShenandoahHeap::complete_loaded_archive_space(MemRegion archive_space) {
2793   // Nothing to do here, except checking that heap looks fine.
2794 #ifdef ASSERT
2795   HeapWord* start = archive_space.start();
2796   HeapWord* end = archive_space.end();
2797 
2798   // No unclaimed space between the objects.
2799   // Objects are properly allocated in correct regions.
2800   HeapWord* cur = start;
2801   while (cur < end) {
2802     oop oop = cast_to_oop(cur);
2803     shenandoah_assert_in_correct_region(nullptr, oop);
2804     cur += oop->size();
2805   }
2806 
2807   // No unclaimed tail at the end of archive space.
2808   assert(cur == end,
2809          "Archive space should be fully used: " PTR_FORMAT " " PTR_FORMAT,
2810          p2i(cur), p2i(end));
2811 
2812   // Region bounds are good.
2813   ShenandoahHeapRegion* begin_reg = heap_region_containing(start);
2814   ShenandoahHeapRegion* end_reg = heap_region_containing(end);
2815   assert(begin_reg->is_regular(), "Must be");
2816   assert(end_reg->is_regular(), "Must be");
2817   assert(begin_reg->bottom() == start,
2818          "Must agree: archive-space-start: " PTR_FORMAT ", begin-region-bottom: " PTR_FORMAT,
2819          p2i(start), p2i(begin_reg->bottom()));
2820   assert(end_reg->top() == end,
2821          "Must agree: archive-space-end: " PTR_FORMAT ", end-region-top: " PTR_FORMAT,
2822          p2i(end), p2i(end_reg->top()));
2823 #endif
2824 }
2825 
2826 ShenandoahGeneration* ShenandoahHeap::generation_for(ShenandoahAffiliation affiliation) const {
2827   if (!mode()->is_generational()) {
2828     return global_generation();
2829   } else if (affiliation == YOUNG_GENERATION) {
2830     return young_generation();
2831   } else if (affiliation == OLD_GENERATION) {
2832     return old_generation();
2833   }
2834 
2835   ShouldNotReachHere();
2836   return nullptr;
2837 }
2838 
2839 void ShenandoahHeap::log_heap_status(const char* msg) const {
2840   if (mode()->is_generational()) {
2841     young_generation()->log_status(msg);
2842     old_generation()->log_status(msg);
2843   } else {
2844     global_generation()->log_status(msg);
2845   }
2846 }