1 /*
   2  * Copyright (c) 2023, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2013, 2022, Red Hat, Inc. All rights reserved.
   4  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
   5  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   6  *
   7  * This code is free software; you can redistribute it and/or modify it
   8  * under the terms of the GNU General Public License version 2 only, as
   9  * published by the Free Software Foundation.
  10  *
  11  * This code is distributed in the hope that it will be useful, but WITHOUT
  12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  14  * version 2 for more details (a copy is included in the LICENSE file that
  15  * accompanied this code).
  16  *
  17  * You should have received a copy of the GNU General Public License version
  18  * 2 along with this work; if not, write to the Free Software Foundation,
  19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20  *
  21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22  * or visit www.oracle.com if you need additional information or have any
  23  * questions.
  24  *
  25  */
  26 
  27 
  28 #include "cds/aotMappedHeapWriter.hpp"
  29 #include "classfile/systemDictionary.hpp"
  30 #include "gc/shared/classUnloadingContext.hpp"
  31 #include "gc/shared/fullGCForwarding.inline.hpp"
  32 #include "gc/shared/gc_globals.hpp"
  33 #include "gc/shared/gcArguments.hpp"
  34 #include "gc/shared/gcTimer.hpp"
  35 #include "gc/shared/gcTraceTime.inline.hpp"
  36 #include "gc/shared/locationPrinter.inline.hpp"
  37 #include "gc/shared/memAllocator.hpp"
  38 #include "gc/shared/plab.hpp"
  39 #include "gc/shared/tlab_globals.hpp"
  40 #include "gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp"
  41 #include "gc/shenandoah/heuristics/shenandoahYoungHeuristics.hpp"
  42 #include "gc/shenandoah/mode/shenandoahGenerationalMode.hpp"
  43 #include "gc/shenandoah/mode/shenandoahPassiveMode.hpp"
  44 #include "gc/shenandoah/mode/shenandoahSATBMode.hpp"
  45 #include "gc/shenandoah/shenandoahAllocRequest.hpp"
  46 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  47 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
  48 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
  49 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  50 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  51 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
  52 #include "gc/shenandoah/shenandoahControlThread.hpp"
  53 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  54 #include "gc/shenandoah/shenandoahGenerationalEvacuationTask.hpp"
  55 #include "gc/shenandoah/shenandoahGenerationalHeap.hpp"
  56 #include "gc/shenandoah/shenandoahGlobalGeneration.hpp"
  57 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  58 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
  59 #include "gc/shenandoah/shenandoahHeapRegionClosures.hpp"
  60 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  61 #include "gc/shenandoah/shenandoahInitLogger.hpp"
  62 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  63 #include "gc/shenandoah/shenandoahMemoryPool.hpp"
  64 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  65 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
  66 #include "gc/shenandoah/shenandoahPadding.hpp"
  67 #include "gc/shenandoah/shenandoahParallelCleaning.inline.hpp"
  68 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  69 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
  70 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
  71 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
  72 #include "gc/shenandoah/shenandoahSTWMark.hpp"
  73 #include "gc/shenandoah/shenandoahUncommitThread.hpp"
  74 #include "gc/shenandoah/shenandoahUtils.hpp"
  75 #include "gc/shenandoah/shenandoahVerifier.hpp"
  76 #include "gc/shenandoah/shenandoahVMOperations.hpp"
  77 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  78 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
  79 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
  80 #include "memory/allocation.hpp"
  81 #include "memory/classLoaderMetaspace.hpp"
  82 #include "memory/memoryReserver.hpp"
  83 #include "memory/metaspaceUtils.hpp"
  84 #include "memory/universe.hpp"
  85 #include "nmt/mallocTracker.hpp"
  86 #include "nmt/memTracker.hpp"
  87 #include "oops/compressedOops.inline.hpp"
  88 #include "prims/jvmtiTagMap.hpp"
  89 #include "runtime/atomicAccess.hpp"
  90 #include "runtime/globals.hpp"
  91 #include "runtime/interfaceSupport.inline.hpp"
  92 #include "runtime/java.hpp"
  93 #include "runtime/orderAccess.hpp"
  94 #include "runtime/safepointMechanism.hpp"
  95 #include "runtime/stackWatermarkSet.hpp"
  96 #include "runtime/threads.hpp"
  97 #include "runtime/vmThread.hpp"
  98 #include "utilities/events.hpp"
  99 #include "utilities/globalDefinitions.hpp"
 100 #include "utilities/powerOfTwo.hpp"
 101 #if INCLUDE_JVMCI
 102 #include "jvmci/jvmci.hpp"
 103 #endif
 104 #if INCLUDE_JFR
 105 #include "gc/shenandoah/shenandoahJfrSupport.hpp"
 106 #endif
 107 
 108 class ShenandoahPretouchHeapTask : public WorkerTask {
 109 private:
 110   ShenandoahRegionIterator _regions;
 111   const size_t _page_size;
 112 public:
 113   ShenandoahPretouchHeapTask(size_t page_size) :
 114     WorkerTask("Shenandoah Pretouch Heap"),
 115     _page_size(page_size) {}
 116 
 117   virtual void work(uint worker_id) {
 118     ShenandoahHeapRegion* r = _regions.next();
 119     while (r != nullptr) {
 120       if (r->is_committed()) {
 121         os::pretouch_memory(r->bottom(), r->end(), _page_size);
 122       }
 123       r = _regions.next();
 124     }
 125   }
 126 };
 127 
 128 class ShenandoahPretouchBitmapTask : public WorkerTask {
 129 private:
 130   ShenandoahRegionIterator _regions;
 131   char* _bitmap_base;
 132   const size_t _bitmap_size;
 133   const size_t _page_size;
 134 public:
 135   ShenandoahPretouchBitmapTask(char* bitmap_base, size_t bitmap_size, size_t page_size) :
 136     WorkerTask("Shenandoah Pretouch Bitmap"),
 137     _bitmap_base(bitmap_base),
 138     _bitmap_size(bitmap_size),
 139     _page_size(page_size) {}
 140 
 141   virtual void work(uint worker_id) {
 142     ShenandoahHeapRegion* r = _regions.next();
 143     while (r != nullptr) {
 144       size_t start = r->index()       * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
 145       size_t end   = (r->index() + 1) * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
 146       assert (end <= _bitmap_size, "end is sane: %zu < %zu", end, _bitmap_size);
 147 
 148       if (r->is_committed()) {
 149         os::pretouch_memory(_bitmap_base + start, _bitmap_base + end, _page_size);
 150       }
 151 
 152       r = _regions.next();
 153     }
 154   }
 155 };
 156 
 157 static ReservedSpace reserve(size_t size, size_t preferred_page_size) {
 158   // When a page size is given we don't want to mix large
 159   // and normal pages. If the size is not a multiple of the
 160   // page size it will be aligned up to achieve this.
 161   size_t alignment = os::vm_allocation_granularity();
 162   if (preferred_page_size != os::vm_page_size()) {
 163     alignment = MAX2(preferred_page_size, alignment);
 164     size = align_up(size, alignment);
 165   }
 166 
 167   const ReservedSpace reserved = MemoryReserver::reserve(size, alignment, preferred_page_size, mtGC);
 168   if (!reserved.is_reserved()) {
 169     vm_exit_during_initialization("Could not reserve space");
 170   }
 171   return reserved;
 172 }
 173 
 174 jint ShenandoahHeap::initialize() {
 175   //
 176   // Figure out heap sizing
 177   //
 178 
 179   size_t init_byte_size = InitialHeapSize;
 180   size_t min_byte_size  = MinHeapSize;
 181   size_t max_byte_size  = MaxHeapSize;
 182   size_t heap_alignment = HeapAlignment;
 183 
 184   size_t reg_size_bytes = ShenandoahHeapRegion::region_size_bytes();
 185 
 186   Universe::check_alignment(max_byte_size,  reg_size_bytes, "Shenandoah heap");
 187   Universe::check_alignment(init_byte_size, reg_size_bytes, "Shenandoah heap");
 188 
 189   _num_regions = ShenandoahHeapRegion::region_count();
 190   assert(_num_regions == (max_byte_size / reg_size_bytes),
 191          "Regions should cover entire heap exactly: %zu != %zu/%zu",
 192          _num_regions, max_byte_size, reg_size_bytes);
 193 
 194   size_t num_committed_regions = init_byte_size / reg_size_bytes;
 195   num_committed_regions = MIN2(num_committed_regions, _num_regions);
 196   assert(num_committed_regions <= _num_regions, "sanity");
 197   _initial_size = num_committed_regions * reg_size_bytes;
 198 
 199   size_t num_min_regions = min_byte_size / reg_size_bytes;
 200   num_min_regions = MIN2(num_min_regions, _num_regions);
 201   assert(num_min_regions <= _num_regions, "sanity");
 202   _minimum_size = num_min_regions * reg_size_bytes;
 203 
 204   _soft_max_size = clamp(SoftMaxHeapSize, min_capacity(), max_capacity());
 205 
 206   _committed = _initial_size;
 207 
 208   size_t heap_page_size   = UseLargePages ? os::large_page_size() : os::vm_page_size();
 209   size_t bitmap_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
 210   size_t region_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
 211 
 212   //
 213   // Reserve and commit memory for heap
 214   //
 215 
 216   ReservedHeapSpace heap_rs = Universe::reserve_heap(max_byte_size, heap_alignment);
 217   initialize_reserved_region(heap_rs);
 218   _heap_region = MemRegion((HeapWord*)heap_rs.base(), heap_rs.size() / HeapWordSize);
 219   _heap_region_special = heap_rs.special();
 220 
 221   assert((((size_t) base()) & ShenandoahHeapRegion::region_size_bytes_mask()) == 0,
 222          "Misaligned heap: " PTR_FORMAT, p2i(base()));
 223   os::trace_page_sizes_for_requested_size("Heap",
 224                                           max_byte_size, heap_alignment,
 225                                           heap_rs.base(),
 226                                           heap_rs.size(), heap_rs.page_size());
 227 
 228 #if SHENANDOAH_OPTIMIZED_MARKTASK
 229   // The optimized ShenandoahMarkTask takes some bits away from the full object bits.
 230   // Fail if we ever attempt to address more than we can.
 231   if ((uintptr_t)heap_rs.end() >= ShenandoahMarkTask::max_addressable()) {
 232     FormatBuffer<512> buf("Shenandoah reserved [" PTR_FORMAT ", " PTR_FORMAT") for the heap, \n"
 233                           "but max object address is " PTR_FORMAT ". Try to reduce heap size, or try other \n"
 234                           "VM options that allocate heap at lower addresses (HeapBaseMinAddress, AllocateHeapAt, etc).",
 235                 p2i(heap_rs.base()), p2i(heap_rs.end()), ShenandoahMarkTask::max_addressable());
 236     vm_exit_during_initialization("Fatal Error", buf);
 237   }
 238 #endif
 239 
 240   ReservedSpace sh_rs = heap_rs.first_part(max_byte_size);
 241   if (!_heap_region_special) {
 242     os::commit_memory_or_exit(sh_rs.base(), _initial_size, heap_alignment, false,
 243                               "Cannot commit heap memory");
 244   }
 245 
 246   BarrierSet::set_barrier_set(new ShenandoahBarrierSet(this, _heap_region));
 247 
 248   // Now we know the number of regions and heap sizes, initialize the heuristics.
 249   initialize_heuristics();
 250 
 251   // If ShenandoahCardBarrier is enabled but it's not generational mode
 252   // it means we're under passive mode and we have to initialize old gen
 253   // for the purpose of having card table.
 254   if (ShenandoahCardBarrier && !(mode()->is_generational())) {
 255     _old_generation = new ShenandoahOldGeneration(max_workers());
 256   }
 257 
 258   assert(_heap_region.byte_size() == heap_rs.size(), "Need to know reserved size for card table");
 259 
 260   //
 261   // Worker threads must be initialized after the barrier is configured
 262   //
 263   _workers = new ShenandoahWorkerThreads("Shenandoah GC Threads", _max_workers);
 264   if (_workers == nullptr) {
 265     vm_exit_during_initialization("Failed necessary allocation.");
 266   } else {
 267     _workers->initialize_workers();
 268   }
 269 
 270   if (ParallelGCThreads > 1) {
 271     _safepoint_workers = new ShenandoahWorkerThreads("Safepoint Cleanup Thread", ParallelGCThreads);
 272     _safepoint_workers->initialize_workers();
 273   }
 274 
 275   //
 276   // Reserve and commit memory for bitmap(s)
 277   //
 278 
 279   size_t bitmap_size_orig = ShenandoahMarkBitMap::compute_size(heap_rs.size());
 280   _bitmap_size = align_up(bitmap_size_orig, bitmap_page_size);
 281 
 282   size_t bitmap_bytes_per_region = reg_size_bytes / ShenandoahMarkBitMap::heap_map_factor();
 283 
 284   guarantee(bitmap_bytes_per_region != 0,
 285             "Bitmap bytes per region should not be zero");
 286   guarantee(is_power_of_2(bitmap_bytes_per_region),
 287             "Bitmap bytes per region should be power of two: %zu", bitmap_bytes_per_region);
 288 
 289   if (bitmap_page_size > bitmap_bytes_per_region) {
 290     _bitmap_regions_per_slice = bitmap_page_size / bitmap_bytes_per_region;
 291     _bitmap_bytes_per_slice = bitmap_page_size;
 292   } else {
 293     _bitmap_regions_per_slice = 1;
 294     _bitmap_bytes_per_slice = bitmap_bytes_per_region;
 295   }
 296 
 297   guarantee(_bitmap_regions_per_slice >= 1,
 298             "Should have at least one region per slice: %zu",
 299             _bitmap_regions_per_slice);
 300 
 301   guarantee(((_bitmap_bytes_per_slice) % bitmap_page_size) == 0,
 302             "Bitmap slices should be page-granular: bps = %zu, page size = %zu",
 303             _bitmap_bytes_per_slice, bitmap_page_size);
 304 
 305   ReservedSpace bitmap = reserve(_bitmap_size, bitmap_page_size);
 306   os::trace_page_sizes_for_requested_size("Mark Bitmap",
 307                                           bitmap_size_orig, bitmap_page_size,
 308                                           bitmap.base(),
 309                                           bitmap.size(), bitmap.page_size());
 310   MemTracker::record_virtual_memory_tag(bitmap, mtGC);
 311   _bitmap_region = MemRegion((HeapWord*) bitmap.base(), bitmap.size() / HeapWordSize);
 312   _bitmap_region_special = bitmap.special();
 313 
 314   size_t bitmap_init_commit = _bitmap_bytes_per_slice *
 315     align_up(num_committed_regions, _bitmap_regions_per_slice) / _bitmap_regions_per_slice;
 316   bitmap_init_commit = MIN2(_bitmap_size, bitmap_init_commit);
 317   if (!_bitmap_region_special) {
 318     os::commit_memory_or_exit((char *) _bitmap_region.start(), bitmap_init_commit, bitmap_page_size, false,
 319                               "Cannot commit bitmap memory");
 320   }
 321 
 322   _marking_context = new ShenandoahMarkingContext(_heap_region, _bitmap_region, _num_regions);
 323 
 324   if (ShenandoahVerify) {
 325     ReservedSpace verify_bitmap = reserve(_bitmap_size, bitmap_page_size);
 326     os::trace_page_sizes_for_requested_size("Verify Bitmap",
 327                                             bitmap_size_orig, bitmap_page_size,
 328                                             verify_bitmap.base(),
 329                                             verify_bitmap.size(), verify_bitmap.page_size());
 330     if (!verify_bitmap.special()) {
 331       os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), bitmap_page_size, false,
 332                                 "Cannot commit verification bitmap memory");
 333     }
 334     MemTracker::record_virtual_memory_tag(verify_bitmap, mtGC);
 335     MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize);
 336     _verification_bit_map.initialize(_heap_region, verify_bitmap_region);
 337     _verifier = new ShenandoahVerifier(this, &_verification_bit_map);
 338   }
 339 
 340   // Reserve aux bitmap for use in object_iterate(). We don't commit it here.
 341   size_t aux_bitmap_page_size = bitmap_page_size;
 342 
 343   ReservedSpace aux_bitmap = reserve(_bitmap_size, aux_bitmap_page_size);
 344   os::trace_page_sizes_for_requested_size("Aux Bitmap",
 345                                           bitmap_size_orig, aux_bitmap_page_size,
 346                                           aux_bitmap.base(),
 347                                           aux_bitmap.size(), aux_bitmap.page_size());
 348   MemTracker::record_virtual_memory_tag(aux_bitmap, mtGC);
 349   _aux_bitmap_region = MemRegion((HeapWord*) aux_bitmap.base(), aux_bitmap.size() / HeapWordSize);
 350   _aux_bitmap_region_special = aux_bitmap.special();
 351   _aux_bit_map.initialize(_heap_region, _aux_bitmap_region);
 352 
 353   //
 354   // Create regions and region sets
 355   //
 356   size_t region_align = align_up(sizeof(ShenandoahHeapRegion), SHENANDOAH_CACHE_LINE_SIZE);
 357   size_t region_storage_size_orig = region_align * _num_regions;
 358   size_t region_storage_size = align_up(region_storage_size_orig,
 359                                         MAX2(region_page_size, os::vm_allocation_granularity()));
 360 
 361   ReservedSpace region_storage = reserve(region_storage_size, region_page_size);
 362   os::trace_page_sizes_for_requested_size("Region Storage",
 363                                           region_storage_size_orig, region_page_size,
 364                                           region_storage.base(),
 365                                           region_storage.size(), region_storage.page_size());
 366   MemTracker::record_virtual_memory_tag(region_storage, mtGC);
 367   if (!region_storage.special()) {
 368     os::commit_memory_or_exit(region_storage.base(), region_storage_size, region_page_size, false,
 369                               "Cannot commit region memory");
 370   }
 371 
 372   // Try to fit the collection set bitmap at lower addresses. This optimizes code generation for cset checks.
 373   // Go up until a sensible limit (subject to encoding constraints) and try to reserve the space there.
 374   // If not successful, bite a bullet and allocate at whatever address.
 375   {
 376     const size_t cset_align = MAX2<size_t>(os::vm_page_size(), os::vm_allocation_granularity());
 377     const size_t cset_size = align_up(((size_t) sh_rs.base() + sh_rs.size()) >> ShenandoahHeapRegion::region_size_bytes_shift(), cset_align);
 378     const size_t cset_page_size = os::vm_page_size();
 379 
 380     uintptr_t min = round_up_power_of_2(cset_align);
 381     uintptr_t max = (1u << 30u);
 382     ReservedSpace cset_rs;
 383 
 384     for (uintptr_t addr = min; addr <= max; addr <<= 1u) {
 385       char* req_addr = (char*)addr;
 386       assert(is_aligned(req_addr, cset_align), "Should be aligned");
 387       cset_rs = MemoryReserver::reserve(req_addr, cset_size, cset_align, cset_page_size, mtGC);
 388       if (cset_rs.is_reserved()) {
 389         assert(cset_rs.base() == req_addr, "Allocated where requested: " PTR_FORMAT ", " PTR_FORMAT, p2i(cset_rs.base()), addr);
 390         _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
 391         break;
 392       }
 393     }
 394 
 395     if (_collection_set == nullptr) {
 396       cset_rs = MemoryReserver::reserve(cset_size, cset_align, os::vm_page_size(), mtGC);
 397       if (!cset_rs.is_reserved()) {
 398         vm_exit_during_initialization("Cannot reserve memory for collection set");
 399       }
 400 
 401       _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
 402     }
 403     os::trace_page_sizes_for_requested_size("Collection Set",
 404                                             cset_size, cset_page_size,
 405                                             cset_rs.base(),
 406                                             cset_rs.size(), cset_rs.page_size());
 407   }
 408 
 409   _regions = NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, _num_regions, mtGC);
 410   _affiliations = NEW_C_HEAP_ARRAY(uint8_t, _num_regions, mtGC);
 411 
 412   {
 413     ShenandoahHeapLocker locker(lock());
 414     for (size_t i = 0; i < _num_regions; i++) {
 415       HeapWord* start = (HeapWord*)sh_rs.base() + ShenandoahHeapRegion::region_size_words() * i;
 416       bool is_committed = i < num_committed_regions;
 417       void* loc = region_storage.base() + i * region_align;
 418 
 419       ShenandoahHeapRegion* r = new (loc) ShenandoahHeapRegion(start, i, is_committed);
 420       assert(is_aligned(r, SHENANDOAH_CACHE_LINE_SIZE), "Sanity");
 421 
 422       _marking_context->initialize_top_at_mark_start(r);
 423       _regions[i] = r;
 424       assert(!collection_set()->is_in(i), "New region should not be in collection set");
 425 
 426       _affiliations[i] = ShenandoahAffiliation::FREE;
 427     }
 428     _free_set = new ShenandoahFreeSet(this, _num_regions);
 429 
 430 
 431     post_initialize_heuristics();
 432     // We are initializing free set.  We ignore cset region tallies.
 433     size_t young_cset_regions, old_cset_regions, first_old, last_old, num_old;
 434     _free_set->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old, last_old, num_old);
 435     if (mode()->is_generational()) {
 436       ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::heap();
 437       // We cannot call
 438       //  gen_heap->young_generation()->heuristics()->bytes_of_allocation_runway_before_gc_trigger(young_cset_regions)
 439       // until after the heap is fully initialized.  So we make up a safe value here.
 440       size_t allocation_runway = InitialHeapSize / 2;
 441       gen_heap->compute_old_generation_balance(allocation_runway, old_cset_regions);
 442     }
 443     _free_set->finish_rebuild(young_cset_regions, old_cset_regions, num_old);
 444   }
 445 
 446   if (AlwaysPreTouch) {
 447     // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads,
 448     // before initialize() below zeroes it with initializing thread. For any given region,
 449     // we touch the region and the corresponding bitmaps from the same thread.
 450     ShenandoahPushWorkerScope scope(workers(), _max_workers, false);
 451 
 452     _pretouch_heap_page_size = heap_page_size;
 453     _pretouch_bitmap_page_size = bitmap_page_size;
 454 
 455     // OS memory managers may want to coalesce back-to-back pages. Make their jobs
 456     // simpler by pre-touching continuous spaces (heap and bitmap) separately.
 457 
 458     ShenandoahPretouchBitmapTask bcl(bitmap.base(), _bitmap_size, _pretouch_bitmap_page_size);
 459     _workers->run_task(&bcl);
 460 
 461     ShenandoahPretouchHeapTask hcl(_pretouch_heap_page_size);
 462     _workers->run_task(&hcl);
 463   }
 464 
 465   //
 466   // Initialize the rest of GC subsystems
 467   //
 468 
 469   _liveness_cache = NEW_C_HEAP_ARRAY(ShenandoahLiveData*, _max_workers, mtGC);
 470   for (uint worker = 0; worker < _max_workers; worker++) {
 471     _liveness_cache[worker] = NEW_C_HEAP_ARRAY(ShenandoahLiveData, _num_regions, mtGC);
 472     Copy::fill_to_bytes(_liveness_cache[worker], _num_regions * sizeof(ShenandoahLiveData));
 473   }
 474 
 475   // There should probably be Shenandoah-specific options for these,
 476   // just as there are G1-specific options.
 477   {
 478     ShenandoahSATBMarkQueueSet& satbqs = ShenandoahBarrierSet::satb_mark_queue_set();
 479     satbqs.set_process_completed_buffers_threshold(20); // G1SATBProcessCompletedThreshold
 480     satbqs.set_buffer_enqueue_threshold_percentage(60); // G1SATBBufferEnqueueingThresholdPercent
 481   }
 482 
 483   _monitoring_support = new ShenandoahMonitoringSupport(this);
 484   _phase_timings = new ShenandoahPhaseTimings(max_workers());
 485   ShenandoahCodeRoots::initialize();
 486 
 487   initialize_controller();
 488 
 489   if (ShenandoahUncommit) {
 490     _uncommit_thread = new ShenandoahUncommitThread(this);
 491   }
 492 
 493   print_init_logger();
 494 
 495   FullGCForwarding::initialize(_heap_region);
 496 
 497   return JNI_OK;
 498 }
 499 
 500 void ShenandoahHeap::initialize_controller() {
 501   _control_thread = new ShenandoahControlThread();
 502 }
 503 
 504 void ShenandoahHeap::print_init_logger() const {
 505   ShenandoahInitLogger::print();
 506 }
 507 
 508 void ShenandoahHeap::initialize_mode() {
 509   if (ShenandoahGCMode != nullptr) {
 510     if (strcmp(ShenandoahGCMode, "satb") == 0) {
 511       _gc_mode = new ShenandoahSATBMode();
 512     } else if (strcmp(ShenandoahGCMode, "passive") == 0) {
 513       _gc_mode = new ShenandoahPassiveMode();
 514     } else if (strcmp(ShenandoahGCMode, "generational") == 0) {
 515       _gc_mode = new ShenandoahGenerationalMode();
 516     } else {
 517       vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option");
 518     }
 519   } else {
 520     vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option (null)");
 521   }
 522   _gc_mode->initialize_flags();
 523   if (_gc_mode->is_diagnostic() && !UnlockDiagnosticVMOptions) {
 524     vm_exit_during_initialization(
 525             err_msg("GC mode \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
 526                     _gc_mode->name()));
 527   }
 528   if (_gc_mode->is_experimental() && !UnlockExperimentalVMOptions) {
 529     vm_exit_during_initialization(
 530             err_msg("GC mode \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
 531                     _gc_mode->name()));
 532   }
 533 }
 534 
 535 void ShenandoahHeap::initialize_heuristics() {
 536   _global_generation = new ShenandoahGlobalGeneration(mode()->is_generational(), max_workers());
 537   _global_generation->initialize_heuristics(mode());
 538 }
 539 
 540 void ShenandoahHeap::post_initialize_heuristics() {
 541   _global_generation->post_initialize(this);
 542 }
 543 
 544 #ifdef _MSC_VER
 545 #pragma warning( push )
 546 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
 547 #endif
 548 
 549 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
 550   CollectedHeap(),
 551   _active_generation(nullptr),
 552   _initial_size(0),
 553   _committed(0),
 554   _max_workers(MAX3(ConcGCThreads, ParallelGCThreads, 1U)),
 555   _workers(nullptr),
 556   _safepoint_workers(nullptr),
 557   _heap_region_special(false),
 558   _num_regions(0),
 559   _regions(nullptr),
 560   _affiliations(nullptr),
 561   _gc_state_changed(false),
 562   _gc_no_progress_count(0),
 563   _cancel_requested_time(0),
 564   _update_refs_iterator(this),
 565   _global_generation(nullptr),
 566   _control_thread(nullptr),
 567   _uncommit_thread(nullptr),
 568   _young_generation(nullptr),
 569   _old_generation(nullptr),
 570   _shenandoah_policy(policy),
 571   _gc_mode(nullptr),
 572   _free_set(nullptr),
 573   _verifier(nullptr),
 574   _phase_timings(nullptr),
 575   _monitoring_support(nullptr),
 576   _memory_pool(nullptr),
 577   _stw_memory_manager("Shenandoah Pauses"),
 578   _cycle_memory_manager("Shenandoah Cycles"),
 579   _gc_timer(new ConcurrentGCTimer()),
 580   _log_min_obj_alignment_in_bytes(LogMinObjAlignmentInBytes),
 581   _marking_context(nullptr),
 582   _bitmap_size(0),
 583   _bitmap_regions_per_slice(0),
 584   _bitmap_bytes_per_slice(0),
 585   _bitmap_region_special(false),
 586   _aux_bitmap_region_special(false),
 587   _liveness_cache(nullptr),
 588   _collection_set(nullptr),
 589   _evac_tracker(new ShenandoahEvacuationTracker())
 590 {
 591   // Initialize GC mode early, many subsequent initialization procedures depend on it
 592   initialize_mode();
 593   _cancelled_gc.set(GCCause::_no_gc);
 594 }
 595 
 596 #ifdef _MSC_VER
 597 #pragma warning( pop )
 598 #endif
 599 
 600 void ShenandoahHeap::print_heap_on(outputStream* st) const {
 601   const bool is_generational = mode()->is_generational();
 602   const char* front_spacing = "";
 603   if (is_generational) {
 604     st->print_cr("Generational Shenandoah Heap");
 605     st->print_cr(" Young:");
 606     st->print_cr("  " PROPERFMT " max, " PROPERFMT " used", PROPERFMTARGS(young_generation()->max_capacity()), PROPERFMTARGS(young_generation()->used()));
 607     st->print_cr(" Old:");
 608     st->print_cr("  " PROPERFMT " max, " PROPERFMT " used", PROPERFMTARGS(old_generation()->max_capacity()), PROPERFMTARGS(old_generation()->used()));
 609     st->print_cr(" Entire heap:");
 610     st->print_cr("  " PROPERFMT " soft max, " PROPERFMT " committed",
 611                 PROPERFMTARGS(soft_max_capacity()), PROPERFMTARGS(committed()));
 612     front_spacing = " ";
 613   } else {
 614     st->print_cr("Shenandoah Heap");
 615     st->print_cr("  " PROPERFMT " max, " PROPERFMT " soft max, " PROPERFMT " committed, " PROPERFMT " used",
 616       PROPERFMTARGS(max_capacity()),
 617       PROPERFMTARGS(soft_max_capacity()),
 618       PROPERFMTARGS(committed()),
 619       PROPERFMTARGS(used())
 620     );
 621   }
 622   st->print_cr("%s %zu x " PROPERFMT " regions",
 623           front_spacing,
 624           num_regions(),
 625           PROPERFMTARGS(ShenandoahHeapRegion::region_size_bytes()));
 626 
 627   st->print("Status: ");
 628   if (has_forwarded_objects())                 st->print("has forwarded objects, ");
 629   if (!is_generational) {
 630     if (is_concurrent_mark_in_progress())      st->print("marking,");
 631   } else {
 632     if (is_concurrent_old_mark_in_progress())    st->print("old marking, ");
 633     if (is_concurrent_young_mark_in_progress())  st->print("young marking, ");
 634   }
 635   if (is_evacuation_in_progress())             st->print("evacuating, ");
 636   if (is_update_refs_in_progress())            st->print("updating refs, ");
 637   if (is_degenerated_gc_in_progress())         st->print("degenerated gc, ");
 638   if (is_full_gc_in_progress())                st->print("full gc, ");
 639   if (is_full_gc_move_in_progress())           st->print("full gc move, ");
 640   if (is_concurrent_weak_root_in_progress())   st->print("concurrent weak roots, ");
 641   if (is_concurrent_strong_root_in_progress() &&
 642       !is_concurrent_weak_root_in_progress())  st->print("concurrent strong roots, ");
 643 
 644   if (cancelled_gc()) {
 645     st->print("cancelled");
 646   } else {
 647     st->print("not cancelled");
 648   }
 649   st->cr();
 650 
 651   st->print_cr("Reserved region:");
 652   st->print_cr(" - [" PTR_FORMAT ", " PTR_FORMAT ") ",
 653                p2i(reserved_region().start()),
 654                p2i(reserved_region().end()));
 655 
 656   ShenandoahCollectionSet* cset = collection_set();
 657   st->print_cr("Collection set:");
 658   if (cset != nullptr) {
 659     st->print_cr(" - map (vanilla): " PTR_FORMAT, p2i(cset->map_address()));
 660     st->print_cr(" - map (biased):  " PTR_FORMAT, p2i(cset->biased_map_address()));
 661   } else {
 662     st->print_cr(" (null)");
 663   }
 664 
 665   st->cr();
 666 
 667   if (Verbose) {
 668     st->cr();
 669     print_heap_regions_on(st);
 670   }
 671 }
 672 
 673 void ShenandoahHeap::print_gc_on(outputStream* st) const {
 674   print_heap_regions_on(st);
 675 }
 676 
 677 class ShenandoahInitWorkerGCLABClosure : public ThreadClosure {
 678 public:
 679   void do_thread(Thread* thread) {
 680     assert(thread != nullptr, "Sanity");
 681     ShenandoahThreadLocalData::initialize_gclab(thread);
 682   }
 683 };
 684 
 685 void ShenandoahHeap::post_initialize() {
 686   CollectedHeap::post_initialize();
 687 
 688   check_soft_max_changed();
 689 
 690   // Schedule periodic task to report on gc thread CPU utilization
 691   _mmu_tracker.initialize();
 692 
 693   MutexLocker ml(Threads_lock);
 694 
 695   ShenandoahInitWorkerGCLABClosure init_gclabs;
 696   _workers->threads_do(&init_gclabs);
 697 
 698   // gclab can not be initialized early during VM startup, as it can not determinate its max_size.
 699   // Now, we will let WorkerThreads to initialize gclab when new worker is created.
 700   _workers->set_initialize_gclab();
 701 
 702   // Note that the safepoint workers may require gclabs if the threads are used to create a heap dump
 703   // during a concurrent evacuation phase.
 704   if (_safepoint_workers != nullptr) {
 705     _safepoint_workers->threads_do(&init_gclabs);
 706     _safepoint_workers->set_initialize_gclab();
 707   }
 708 
 709   JFR_ONLY(ShenandoahJFRSupport::register_jfr_type_serializers();)
 710 }
 711 
 712 ShenandoahHeuristics* ShenandoahHeap::heuristics() {
 713   return _global_generation->heuristics();
 714 }
 715 
 716 size_t ShenandoahHeap::used() const {
 717   return global_generation()->used();
 718 }
 719 
 720 size_t ShenandoahHeap::committed() const {
 721   return AtomicAccess::load(&_committed);
 722 }
 723 
 724 void ShenandoahHeap::increase_committed(size_t bytes) {
 725   shenandoah_assert_heaplocked_or_safepoint();
 726   _committed += bytes;
 727 }
 728 
 729 void ShenandoahHeap::decrease_committed(size_t bytes) {
 730   shenandoah_assert_heaplocked_or_safepoint();
 731   _committed -= bytes;
 732 }
 733 
 734 size_t ShenandoahHeap::capacity() const {
 735   return committed();
 736 }
 737 
 738 size_t ShenandoahHeap::max_capacity() const {
 739   return _num_regions * ShenandoahHeapRegion::region_size_bytes();
 740 }
 741 
 742 size_t ShenandoahHeap::soft_max_capacity() const {
 743   size_t v = AtomicAccess::load(&_soft_max_size);
 744   assert(min_capacity() <= v && v <= max_capacity(),
 745          "Should be in bounds: %zu <= %zu <= %zu",
 746          min_capacity(), v, max_capacity());
 747   return v;
 748 }
 749 
 750 void ShenandoahHeap::set_soft_max_capacity(size_t v) {
 751   assert(min_capacity() <= v && v <= max_capacity(),
 752          "Should be in bounds: %zu <= %zu <= %zu",
 753          min_capacity(), v, max_capacity());
 754   AtomicAccess::store(&_soft_max_size, v);
 755 }
 756 
 757 size_t ShenandoahHeap::min_capacity() const {
 758   return _minimum_size;
 759 }
 760 
 761 size_t ShenandoahHeap::initial_capacity() const {
 762   return _initial_size;
 763 }
 764 
 765 bool ShenandoahHeap::is_in(const void* p) const {
 766   if (!is_in_reserved(p)) {
 767     return false;
 768   }
 769 
 770   if (is_full_gc_move_in_progress()) {
 771     // Full GC move is running, we do not have a consistent region
 772     // information yet. But we know the pointer is in heap.
 773     return true;
 774   }
 775 
 776   // Now check if we point to a live section in active region.
 777   const ShenandoahHeapRegion* r = heap_region_containing(p);
 778   if (p >= r->top()) {
 779     return false;
 780   }
 781 
 782   if (r->is_active()) {
 783     return true;
 784   }
 785 
 786   // The region is trash, but won't be recycled until after concurrent weak
 787   // roots. We also don't allow mutators to allocate from trash regions
 788   // during weak roots. Concurrent class unloading may access unmarked oops
 789   // in trash regions.
 790   return r->is_trash() && is_concurrent_weak_root_in_progress();
 791 }
 792 
 793 void ShenandoahHeap::notify_soft_max_changed() {
 794   if (_uncommit_thread != nullptr) {
 795     _uncommit_thread->notify_soft_max_changed();
 796   }
 797 }
 798 
 799 void ShenandoahHeap::notify_explicit_gc_requested() {
 800   if (_uncommit_thread != nullptr) {
 801     _uncommit_thread->notify_explicit_gc_requested();
 802   }
 803 }
 804 
 805 bool ShenandoahHeap::check_soft_max_changed() {
 806   size_t new_soft_max = AtomicAccess::load(&SoftMaxHeapSize);
 807   size_t old_soft_max = soft_max_capacity();
 808   if (new_soft_max != old_soft_max) {
 809     new_soft_max = MAX2(min_capacity(), new_soft_max);
 810     new_soft_max = MIN2(max_capacity(), new_soft_max);
 811     if (new_soft_max != old_soft_max) {
 812       log_info(gc)("Soft Max Heap Size: %zu%s -> %zu%s",
 813                    byte_size_in_proper_unit(old_soft_max), proper_unit_for_byte_size(old_soft_max),
 814                    byte_size_in_proper_unit(new_soft_max), proper_unit_for_byte_size(new_soft_max)
 815       );
 816       set_soft_max_capacity(new_soft_max);
 817       return true;
 818     }
 819   }
 820   return false;
 821 }
 822 
 823 void ShenandoahHeap::notify_heap_changed() {
 824   // Update monitoring counters when we took a new region. This amortizes the
 825   // update costs on slow path.
 826   monitoring_support()->notify_heap_changed();
 827   _heap_changed.try_set();
 828 }
 829 
 830 void ShenandoahHeap::set_forced_counters_update(bool value) {
 831   monitoring_support()->set_forced_counters_update(value);
 832 }
 833 
 834 void ShenandoahHeap::handle_force_counters_update() {
 835   monitoring_support()->handle_force_counters_update();
 836 }
 837 
 838 HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) {
 839   // New object should fit the GCLAB size
 840   size_t min_size = MAX2(size, PLAB::min_size());
 841 
 842   // Figure out size of new GCLAB, looking back at heuristics. Expand aggressively.
 843   size_t new_size = ShenandoahThreadLocalData::gclab_size(thread) * 2;
 844 
 845   new_size = MIN2(new_size, PLAB::max_size());
 846   new_size = MAX2(new_size, PLAB::min_size());
 847 
 848   // Record new heuristic value even if we take any shortcut. This captures
 849   // the case when moderately-sized objects always take a shortcut. At some point,
 850   // heuristics should catch up with them.
 851   log_debug(gc, free)("Set new GCLAB size: %zu", new_size);
 852   ShenandoahThreadLocalData::set_gclab_size(thread, new_size);
 853 
 854   if (new_size < size) {
 855     // New size still does not fit the object. Fall back to shared allocation.
 856     // This avoids retiring perfectly good GCLABs, when we encounter a large object.
 857     log_debug(gc, free)("New gclab size (%zu) is too small for %zu", new_size, size);
 858     return nullptr;
 859   }
 860 
 861   // Retire current GCLAB, and allocate a new one.
 862   PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
 863   gclab->retire();
 864 
 865   size_t actual_size = 0;
 866   HeapWord* gclab_buf = allocate_new_gclab(min_size, new_size, &actual_size);
 867   if (gclab_buf == nullptr) {
 868     return nullptr;
 869   }
 870 
 871   assert (size <= actual_size, "allocation should fit");
 872 
 873   // ...and clear or zap just allocated TLAB, if needed.
 874   if (ZeroTLAB) {
 875     Copy::zero_to_words(gclab_buf, actual_size);
 876   } else if (ZapTLAB) {
 877     // Skip mangling the space corresponding to the object header to
 878     // ensure that the returned space is not considered parsable by
 879     // any concurrent GC thread.
 880     size_t hdr_size = oopDesc::header_size();
 881     Copy::fill_to_words(gclab_buf + hdr_size, actual_size - hdr_size, badHeapWordVal);
 882   }
 883   gclab->set_buf(gclab_buf, actual_size);
 884   return gclab->allocate(size);
 885 }
 886 
 887 // Called from stubs in JIT code or interpreter
 888 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t min_size,
 889                                             size_t requested_size,
 890                                             size_t* actual_size) {
 891   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_tlab(min_size, requested_size);
 892   HeapWord* res = allocate_memory(req);
 893   if (res != nullptr) {
 894     *actual_size = req.actual_size();
 895   } else {
 896     *actual_size = 0;
 897   }
 898   return res;
 899 }
 900 
 901 HeapWord* ShenandoahHeap::allocate_new_gclab(size_t min_size,
 902                                              size_t word_size,
 903                                              size_t* actual_size) {
 904   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_gclab(min_size, word_size);
 905   HeapWord* res = allocate_memory(req);
 906   if (res != nullptr) {
 907     *actual_size = req.actual_size();
 908   } else {
 909     *actual_size = 0;
 910   }
 911   return res;
 912 }
 913 
 914 HeapWord* ShenandoahHeap::allocate_memory(ShenandoahAllocRequest& req) {
 915   bool in_new_region = false;
 916   HeapWord* result = nullptr;
 917 
 918   if (req.is_mutator_alloc()) {
 919 
 920     if (!ShenandoahAllocFailureALot || !should_inject_alloc_failure()) {
 921       result = allocate_memory_under_lock(req, in_new_region);
 922     }
 923 
 924     // Check that gc overhead is not exceeded.
 925     //
 926     // Shenandoah will grind along for quite a while allocating one
 927     // object at a time using shared (non-tlab) allocations. This check
 928     // is testing that the GC overhead limit has not been exceeded.
 929     // This will notify the collector to start a cycle, but will raise
 930     // an OOME to the mutator if the last Full GCs have not made progress.
 931     // gc_no_progress_count is incremented following each degen or full GC that fails to achieve is_good_progress().
 932     if (result == nullptr && !req.is_lab_alloc() && get_gc_no_progress_count() > ShenandoahNoProgressThreshold) {
 933       control_thread()->handle_alloc_failure(req, false);
 934       req.set_actual_size(0);
 935       return nullptr;
 936     }
 937 
 938     if (result == nullptr) {
 939       // Block until control thread reacted, then retry allocation.
 940       //
 941       // It might happen that one of the threads requesting allocation would unblock
 942       // way later after GC happened, only to fail the second allocation, because
 943       // other threads have already depleted the free storage. In this case, a better
 944       // strategy is to try again, until at least one full GC has completed.
 945       //
 946       // Stop retrying and return nullptr to cause OOMError exception if our allocation failed even after:
 947       //   a) We experienced a GC that had good progress, or
 948       //   b) We experienced at least one Full GC (whether or not it had good progress)
 949 
 950       const size_t original_count = shenandoah_policy()->full_gc_count();
 951       while (result == nullptr && should_retry_allocation(original_count)) {
 952         control_thread()->handle_alloc_failure(req, true);
 953         result = allocate_memory_under_lock(req, in_new_region);
 954       }
 955       if (result != nullptr) {
 956         // If our allocation request has been satisfied after it initially failed, we count this as good gc progress
 957         notify_gc_progress();
 958       }
 959       if (log_develop_is_enabled(Debug, gc, alloc)) {
 960         ResourceMark rm;
 961         log_debug(gc, alloc)("Thread: %s, Result: " PTR_FORMAT ", Request: %s, Size: %zu"
 962                              ", Original: %zu, Latest: %zu",
 963                              Thread::current()->name(), p2i(result), req.type_string(), req.size(),
 964                              original_count, get_gc_no_progress_count());
 965       }
 966     }
 967   } else {
 968     assert(req.is_gc_alloc(), "Can only accept GC allocs here");
 969     result = allocate_memory_under_lock(req, in_new_region);
 970     // Do not call handle_alloc_failure() here, because we cannot block.
 971     // The allocation failure would be handled by the LRB slowpath with handle_alloc_failure_evac().
 972   }
 973 
 974   if (in_new_region) {
 975     notify_heap_changed();
 976   }
 977 
 978   if (result == nullptr) {
 979     req.set_actual_size(0);
 980   }
 981 
 982   if (result != nullptr) {
 983     size_t requested = req.size();
 984     size_t actual = req.actual_size();
 985 
 986     assert (req.is_lab_alloc() || (requested == actual),
 987             "Only LAB allocations are elastic: %s, requested = %zu, actual = %zu",
 988             ShenandoahAllocRequest::alloc_type_to_string(req.type()), requested, actual);
 989   }
 990 
 991   return result;
 992 }
 993 
 994 inline bool ShenandoahHeap::should_retry_allocation(size_t original_full_gc_count) const {
 995   return shenandoah_policy()->full_gc_count() == original_full_gc_count
 996       && !shenandoah_policy()->is_at_shutdown();
 997 }
 998 
 999 HeapWord* ShenandoahHeap::allocate_memory_under_lock(ShenandoahAllocRequest& req, bool& in_new_region) {
1000   // If we are dealing with mutator allocation, then we may need to block for safepoint.
1001   // We cannot block for safepoint for GC allocations, because there is a high chance
1002   // we are already running at safepoint or from stack watermark machinery, and we cannot
1003   // block again.
1004   ShenandoahHeapLocker locker(lock(), req.is_mutator_alloc());
1005 
1006   // Make sure the old generation has room for either evacuations or promotions before trying to allocate.
1007   if (req.is_old() && !old_generation()->can_allocate(req)) {
1008     return nullptr;
1009   }
1010 
1011   // If TLAB request size is greater than available, allocate() will attempt to downsize request to fit within available
1012   // memory.
1013   HeapWord* result = _free_set->allocate(req, in_new_region);
1014 
1015   // Record the plab configuration for this result and register the object.
1016   if (result != nullptr && req.is_old()) {
1017     old_generation()->configure_plab_for_current_thread(req);
1018     if (req.type() == ShenandoahAllocRequest::_alloc_shared_gc) {
1019       // Register the newly allocated object while we're holding the global lock since there's no synchronization
1020       // built in to the implementation of register_object().  There are potential races when multiple independent
1021       // threads are allocating objects, some of which might span the same card region.  For example, consider
1022       // a card table's memory region within which three objects are being allocated by three different threads:
1023       //
1024       // objects being "concurrently" allocated:
1025       //    [-----a------][-----b-----][--------------c------------------]
1026       //            [---- card table memory range --------------]
1027       //
1028       // Before any objects are allocated, this card's memory range holds no objects.  Note that allocation of object a
1029       // wants to set the starts-object, first-start, and last-start attributes of the preceding card region.
1030       // Allocation of object b wants to set the starts-object, first-start, and last-start attributes of this card region.
1031       // Allocation of object c also wants to set the starts-object, first-start, and last-start attributes of this
1032       // card region.
1033       //
1034       // The thread allocating b and the thread allocating c can "race" in various ways, resulting in confusion, such as
1035       // last-start representing object b while first-start represents object c.  This is why we need to require all
1036       // register_object() invocations to be "mutually exclusive" with respect to each card's memory range.
1037       old_generation()->card_scan()->register_object(result);
1038     }
1039   }
1040 
1041   return result;
1042 }
1043 
1044 HeapWord* ShenandoahHeap::mem_allocate(size_t size) {
1045   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared(size);
1046   return allocate_memory(req);
1047 }
1048 
1049 MetaWord* ShenandoahHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
1050                                                              size_t size,
1051                                                              Metaspace::MetadataType mdtype) {
1052   MetaWord* result;
1053 
1054   // Inform metaspace OOM to GC heuristics if class unloading is possible.
1055   ShenandoahHeuristics* h = global_generation()->heuristics();
1056   if (h->can_unload_classes()) {
1057     h->record_metaspace_oom();
1058   }
1059 
1060   // Expand and retry allocation
1061   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
1062   if (result != nullptr) {
1063     return result;
1064   }
1065 
1066   // Start full GC
1067   collect(GCCause::_metadata_GC_clear_soft_refs);
1068 
1069   // Retry allocation
1070   result = loader_data->metaspace_non_null()->allocate(size, mdtype);
1071   if (result != nullptr) {
1072     return result;
1073   }
1074 
1075   // Expand and retry allocation
1076   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
1077   if (result != nullptr) {
1078     return result;
1079   }
1080 
1081   // Out of memory
1082   return nullptr;
1083 }
1084 
1085 class ShenandoahConcurrentEvacuateRegionObjectClosure : public ObjectClosure {
1086 private:
1087   ShenandoahHeap* const _heap;
1088   Thread* const _thread;
1089 public:
1090   ShenandoahConcurrentEvacuateRegionObjectClosure(ShenandoahHeap* heap) :
1091     _heap(heap), _thread(Thread::current()) {}
1092 
1093   void do_object(oop p) {
1094     shenandoah_assert_marked(nullptr, p);
1095     if (!p->is_forwarded()) {
1096       _heap->evacuate_object(p, _thread);
1097     }
1098   }
1099 };
1100 
1101 class ShenandoahEvacuationTask : public WorkerTask {
1102 private:
1103   ShenandoahHeap* const _sh;
1104   ShenandoahCollectionSet* const _cs;
1105   bool _concurrent;
1106 public:
1107   ShenandoahEvacuationTask(ShenandoahHeap* sh,
1108                            ShenandoahCollectionSet* cs,
1109                            bool concurrent) :
1110     WorkerTask("Shenandoah Evacuation"),
1111     _sh(sh),
1112     _cs(cs),
1113     _concurrent(concurrent)
1114   {}
1115 
1116   void work(uint worker_id) {
1117     if (_concurrent) {
1118       ShenandoahConcurrentWorkerSession worker_session(worker_id);
1119       ShenandoahSuspendibleThreadSetJoiner stsj;
1120       ShenandoahEvacOOMScope oom_evac_scope;
1121       do_work();
1122     } else {
1123       ShenandoahParallelWorkerSession worker_session(worker_id);
1124       ShenandoahEvacOOMScope oom_evac_scope;
1125       do_work();
1126     }
1127   }
1128 
1129 private:
1130   void do_work() {
1131     ShenandoahConcurrentEvacuateRegionObjectClosure cl(_sh);
1132     ShenandoahHeapRegion* r;
1133     while ((r =_cs->claim_next()) != nullptr) {
1134       assert(r->has_live(), "Region %zu should have been reclaimed early", r->index());
1135       _sh->marked_object_iterate(r, &cl);
1136 
1137       if (_sh->check_cancelled_gc_and_yield(_concurrent)) {
1138         break;
1139       }
1140     }
1141   }
1142 };
1143 
1144 class ShenandoahRetireGCLABClosure : public ThreadClosure {
1145 private:
1146   bool const _resize;
1147 public:
1148   explicit ShenandoahRetireGCLABClosure(bool resize) : _resize(resize) {}
1149   void do_thread(Thread* thread) override {
1150     PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1151     assert(gclab != nullptr, "GCLAB should be initialized for %s", thread->name());
1152     gclab->retire();
1153     if (_resize && ShenandoahThreadLocalData::gclab_size(thread) > 0) {
1154       ShenandoahThreadLocalData::set_gclab_size(thread, 0);
1155     }
1156 
1157     if (ShenandoahHeap::heap()->mode()->is_generational()) {
1158       PLAB* plab = ShenandoahThreadLocalData::plab(thread);
1159       assert(plab != nullptr, "PLAB should be initialized for %s", thread->name());
1160 
1161       // There are two reasons to retire all plabs between old-gen evacuation passes.
1162       //  1. We need to make the plab memory parsable by remembered-set scanning.
1163       //  2. We need to establish a trustworthy UpdateWaterMark value within each old-gen heap region
1164       ShenandoahGenerationalHeap::heap()->retire_plab(plab, thread);
1165 
1166       // Re-enable promotions for the next evacuation phase.
1167       ShenandoahThreadLocalData::enable_plab_promotions(thread);
1168 
1169       // Reset the fill size for next evacuation phase.
1170       if (_resize && ShenandoahThreadLocalData::plab_size(thread) > 0) {
1171         ShenandoahThreadLocalData::set_plab_size(thread, 0);
1172       }
1173     }
1174   }
1175 };
1176 
1177 class ShenandoahGCStatePropagatorHandshakeClosure : public HandshakeClosure {
1178 public:
1179   explicit ShenandoahGCStatePropagatorHandshakeClosure(char gc_state) :
1180     HandshakeClosure("Shenandoah GC State Change"),
1181     _gc_state(gc_state) {}
1182 
1183   void do_thread(Thread* thread) override {
1184     ShenandoahThreadLocalData::set_gc_state(thread, _gc_state);
1185   }
1186 private:
1187   char _gc_state;
1188 };
1189 
1190 class ShenandoahPrepareForUpdateRefsHandshakeClosure : public HandshakeClosure {
1191 public:
1192   explicit ShenandoahPrepareForUpdateRefsHandshakeClosure(char gc_state) :
1193     HandshakeClosure("Shenandoah Prepare for Update Refs"),
1194     _retire(ResizeTLAB), _propagator(gc_state) {}
1195 
1196   void do_thread(Thread* thread) override {
1197     _propagator.do_thread(thread);
1198     if (ShenandoahThreadLocalData::gclab(thread) != nullptr) {
1199       _retire.do_thread(thread);
1200     }
1201   }
1202 private:
1203   ShenandoahRetireGCLABClosure _retire;
1204   ShenandoahGCStatePropagatorHandshakeClosure _propagator;
1205 };
1206 
1207 void ShenandoahHeap::evacuate_collection_set(ShenandoahGeneration* generation, bool concurrent) {
1208   assert(generation->is_global(), "Only global generation expected here");
1209   ShenandoahEvacuationTask task(this, _collection_set, concurrent);
1210   workers()->run_task(&task);
1211 }
1212 
1213 void ShenandoahHeap::concurrent_prepare_for_update_refs() {
1214   {
1215     // Java threads take this lock while they are being attached and added to the list of threads.
1216     // If another thread holds this lock before we update the gc state, it will receive a stale
1217     // gc state, but they will have been added to the list of java threads and so will be corrected
1218     // by the following handshake.
1219     MutexLocker lock(Threads_lock);
1220 
1221     // A cancellation at this point means the degenerated cycle must resume from update-refs.
1222     set_gc_state_concurrent(EVACUATION, false);
1223     set_gc_state_concurrent(WEAK_ROOTS, false);
1224     set_gc_state_concurrent(UPDATE_REFS, true);
1225   }
1226 
1227   // This will propagate the gc state and retire gclabs and plabs for threads that require it.
1228   ShenandoahPrepareForUpdateRefsHandshakeClosure prepare_for_update_refs(_gc_state.raw_value());
1229 
1230   // The handshake won't touch worker threads (or control thread, or VM thread), so do those separately.
1231   Threads::non_java_threads_do(&prepare_for_update_refs);
1232 
1233   // Now retire gclabs and plabs and propagate gc_state for mutator threads
1234   Handshake::execute(&prepare_for_update_refs);
1235 
1236   _update_refs_iterator.reset();
1237 }
1238 
1239 class ShenandoahCompositeHandshakeClosure : public HandshakeClosure {
1240   HandshakeClosure* _handshake_1;
1241   HandshakeClosure* _handshake_2;
1242   public:
1243     ShenandoahCompositeHandshakeClosure(HandshakeClosure* handshake_1, HandshakeClosure* handshake_2) :
1244       HandshakeClosure(handshake_2->name()),
1245       _handshake_1(handshake_1), _handshake_2(handshake_2) {}
1246 
1247   void do_thread(Thread* thread) override {
1248       _handshake_1->do_thread(thread);
1249       _handshake_2->do_thread(thread);
1250     }
1251 };
1252 
1253 void ShenandoahHeap::concurrent_final_roots(HandshakeClosure* handshake_closure) {
1254   {
1255     assert(!is_evacuation_in_progress(), "Should not evacuate for abbreviated or old cycles");
1256     MutexLocker lock(Threads_lock);
1257     set_gc_state_concurrent(WEAK_ROOTS, false);
1258   }
1259 
1260   ShenandoahGCStatePropagatorHandshakeClosure propagator(_gc_state.raw_value());
1261   Threads::non_java_threads_do(&propagator);
1262   if (handshake_closure == nullptr) {
1263     Handshake::execute(&propagator);
1264   } else {
1265     ShenandoahCompositeHandshakeClosure composite(&propagator, handshake_closure);
1266     Handshake::execute(&composite);
1267   }
1268 }
1269 
1270 oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) {
1271   assert(thread == Thread::current(), "Expected thread parameter to be current thread.");
1272   if (ShenandoahThreadLocalData::is_oom_during_evac(thread)) {
1273     // This thread went through the OOM during evac protocol. It is safe to return
1274     // the forward pointer. It must not attempt to evacuate any other objects.
1275     return ShenandoahBarrierSet::resolve_forwarded(p);
1276   }
1277 
1278   assert(ShenandoahThreadLocalData::is_evac_allowed(thread), "must be enclosed in oom-evac scope");
1279 
1280   ShenandoahHeapRegion* r = heap_region_containing(p);
1281   assert(!r->is_humongous(), "never evacuate humongous objects");
1282 
1283   ShenandoahAffiliation target_gen = r->affiliation();
1284   return try_evacuate_object(p, thread, r, target_gen);
1285 }
1286 
1287 oop ShenandoahHeap::try_evacuate_object(oop p, Thread* thread, ShenandoahHeapRegion* from_region,
1288                                                ShenandoahAffiliation target_gen) {
1289   assert(target_gen == YOUNG_GENERATION, "Only expect evacuations to young in this mode");
1290   assert(from_region->is_young(), "Only expect evacuations from young in this mode");
1291   bool alloc_from_lab = true;
1292   HeapWord* copy = nullptr;
1293 
1294   markWord mark = p->mark();
1295   if (ShenandoahForwarding::is_forwarded(mark)) {
1296     return ShenandoahForwarding::get_forwardee(p);
1297   }
1298   size_t old_size = ShenandoahForwarding::size(p);
1299   size_t size = p->copy_size(old_size, mark);
1300 
1301 #ifdef ASSERT
1302   if (ShenandoahOOMDuringEvacALot &&
1303       (os::random() & 1) == 0) { // Simulate OOM every ~2nd slow-path call
1304     copy = nullptr;
1305   } else {
1306 #endif
1307     if (UseTLAB) {
1308       copy = allocate_from_gclab(thread, size);
1309     }
1310     if (copy == nullptr) {
1311       // If we failed to allocate in LAB, we'll try a shared allocation.
1312       ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared_gc(size, target_gen);
1313       copy = allocate_memory(req);
1314       alloc_from_lab = false;
1315     }
1316 #ifdef ASSERT
1317   }
1318 #endif
1319 
1320   if (copy == nullptr) {
1321     control_thread()->handle_alloc_failure_evac(size);
1322 
1323     _oom_evac_handler.handle_out_of_memory_during_evacuation();
1324 
1325     return ShenandoahBarrierSet::resolve_forwarded(p);
1326   }
1327 
1328   if (ShenandoahEvacTracking) {
1329     evac_tracker()->begin_evacuation(thread, size * HeapWordSize, from_region->affiliation(), target_gen);
1330   }
1331 
1332   // Copy the object:
1333   Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(p), copy, old_size);
1334 
1335   // Try to install the new forwarding pointer.
1336   oop copy_val = cast_to_oop(copy);
1337   oop result = ShenandoahForwarding::try_update_forwardee(p, copy_val);
1338   if (result == copy_val) {
1339     // Successfully evacuated. Our copy is now the public one!
1340     copy_val->initialize_hash_if_necessary(p);
1341     ContinuationGCSupport::relativize_stack_chunk(copy_val);
1342     shenandoah_assert_correct(nullptr, copy_val);
1343     if (ShenandoahEvacTracking) {
1344       evac_tracker()->end_evacuation(thread, size * HeapWordSize, from_region->affiliation(), target_gen);
1345     }
1346     return copy_val;
1347   }  else {
1348     // Failed to evacuate. We need to deal with the object that is left behind. Since this
1349     // new allocation is certainly after TAMS, it will be considered live in the next cycle.
1350     // But if it happens to contain references to evacuated regions, those references would
1351     // not get updated for this stale copy during this cycle, and we will crash while scanning
1352     // it the next cycle.
1353     if (alloc_from_lab) {
1354       // For LAB allocations, it is enough to rollback the allocation ptr. Either the next
1355       // object will overwrite this stale copy, or the filler object on LAB retirement will
1356       // do this.
1357       ShenandoahThreadLocalData::gclab(thread)->undo_allocation(copy, size);
1358     } else {
1359       // For non-LAB allocations, we have no way to retract the allocation, and
1360       // have to explicitly overwrite the copy with the filler object. With that overwrite,
1361       // we have to keep the fwdptr initialized and pointing to our (stale) copy.
1362       assert(size >= ShenandoahHeap::min_fill_size(), "previously allocated object known to be larger than min_size");
1363       fill_with_object(copy, size);
1364       shenandoah_assert_correct(nullptr, copy_val);
1365       // For non-LAB allocations, the object has already been registered
1366     }
1367     shenandoah_assert_correct(nullptr, result);
1368     return result;
1369   }
1370 }
1371 
1372 void ShenandoahHeap::trash_cset_regions() {
1373   ShenandoahHeapLocker locker(lock());
1374 
1375   ShenandoahCollectionSet* set = collection_set();
1376   ShenandoahHeapRegion* r;
1377   set->clear_current_index();
1378   while ((r = set->next()) != nullptr) {
1379     r->make_trash();
1380   }
1381   collection_set()->clear();
1382 }
1383 
1384 void ShenandoahHeap::print_heap_regions_on(outputStream* st) const {
1385   st->print_cr("Heap Regions:");
1386   st->print_cr("Region state: EU=empty-uncommitted, EC=empty-committed, R=regular, H=humongous start, HP=pinned humongous start");
1387   st->print_cr("              HC=humongous continuation, CS=collection set, TR=trash, P=pinned, CSP=pinned collection set");
1388   st->print_cr("BTE=bottom/top/end, TAMS=top-at-mark-start");
1389   st->print_cr("UWM=update watermark, U=used");
1390   st->print_cr("T=TLAB allocs, G=GCLAB allocs");
1391   st->print_cr("S=shared allocs, L=live data");
1392   st->print_cr("CP=critical pins");
1393 
1394   for (size_t i = 0; i < num_regions(); i++) {
1395     get_region(i)->print_on(st);
1396   }
1397 }
1398 
1399 void ShenandoahHeap::process_gc_stats() const {
1400   // Commit worker statistics to cycle data
1401   phase_timings()->flush_par_workers_to_cycle();
1402 
1403   // Print GC stats for current cycle
1404   LogTarget(Info, gc, stats) lt;
1405   if (lt.is_enabled()) {
1406     ResourceMark rm;
1407     LogStream ls(lt);
1408     phase_timings()->print_cycle_on(&ls);
1409     if (ShenandoahEvacTracking) {
1410       ShenandoahCycleStats  evac_stats = evac_tracker()->flush_cycle_to_global();
1411       evac_tracker()->print_evacuations_on(&ls, &evac_stats.workers,
1412                                                &evac_stats.mutators);
1413     }
1414   }
1415 
1416   // Commit statistics to globals
1417   phase_timings()->flush_cycle_to_global();
1418 }
1419 
1420 size_t ShenandoahHeap::trash_humongous_region_at(ShenandoahHeapRegion* start) const {
1421   assert(start->is_humongous_start(), "reclaim regions starting with the first one");
1422   assert(!start->has_live(), "liveness must be zero");
1423 
1424   // Do not try to get the size of this humongous object. STW collections will
1425   // have already unloaded classes, so an unmarked object may have a bad klass pointer.
1426   ShenandoahHeapRegion* region = start;
1427   size_t index = region->index();
1428   do {
1429     assert(region->is_humongous(), "Expect correct humongous start or continuation");
1430     assert(!region->is_cset(), "Humongous region should not be in collection set");
1431     region->make_trash_immediate();
1432     region = get_region(++index);
1433   } while (region != nullptr && region->is_humongous_continuation());
1434 
1435   // Return number of regions trashed
1436   return index - start->index();
1437 }
1438 
1439 class ShenandoahCheckCleanGCLABClosure : public ThreadClosure {
1440 public:
1441   ShenandoahCheckCleanGCLABClosure() {}
1442   void do_thread(Thread* thread) {
1443     PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1444     assert(gclab != nullptr, "GCLAB should be initialized for %s", thread->name());
1445     assert(gclab->words_remaining() == 0, "GCLAB should not need retirement");
1446 
1447     if (ShenandoahHeap::heap()->mode()->is_generational()) {
1448       PLAB* plab = ShenandoahThreadLocalData::plab(thread);
1449       assert(plab != nullptr, "PLAB should be initialized for %s", thread->name());
1450       assert(plab->words_remaining() == 0, "PLAB should not need retirement");
1451     }
1452   }
1453 };
1454 
1455 void ShenandoahHeap::labs_make_parsable() {
1456   assert(UseTLAB, "Only call with UseTLAB");
1457 
1458   ShenandoahRetireGCLABClosure cl(false);
1459 
1460   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1461     ThreadLocalAllocBuffer& tlab = t->tlab();
1462     tlab.make_parsable();
1463     if (ZeroTLAB) {
1464       t->retire_tlab();
1465     }
1466     cl.do_thread(t);
1467   }
1468 
1469   workers()->threads_do(&cl);
1470 
1471   if (safepoint_workers() != nullptr) {
1472     safepoint_workers()->threads_do(&cl);
1473   }
1474 }
1475 
1476 void ShenandoahHeap::tlabs_retire(bool resize) {
1477   assert(UseTLAB, "Only call with UseTLAB");
1478   assert(!resize || ResizeTLAB, "Only call for resize when ResizeTLAB is enabled");
1479 
1480   ThreadLocalAllocStats stats;
1481 
1482   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1483     t->retire_tlab(&stats);
1484     if (resize) {
1485       t->tlab().resize();
1486     }
1487   }
1488 
1489   stats.publish();
1490 
1491 #ifdef ASSERT
1492   ShenandoahCheckCleanGCLABClosure cl;
1493   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1494     cl.do_thread(t);
1495   }
1496   workers()->threads_do(&cl);
1497 #endif
1498 }
1499 
1500 void ShenandoahHeap::gclabs_retire(bool resize) {
1501   assert(UseTLAB, "Only call with UseTLAB");
1502   assert(!resize || ResizeTLAB, "Only call for resize when ResizeTLAB is enabled");
1503 
1504   ShenandoahRetireGCLABClosure cl(resize);
1505   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1506     cl.do_thread(t);
1507   }
1508 
1509   workers()->threads_do(&cl);
1510 
1511   if (safepoint_workers() != nullptr) {
1512     safepoint_workers()->threads_do(&cl);
1513   }
1514 }
1515 
1516 // Returns size in bytes
1517 size_t ShenandoahHeap::unsafe_max_tlab_alloc() const {
1518   // Return the max allowed size, and let the allocation path
1519   // figure out the safe size for current allocation.
1520   return ShenandoahHeapRegion::max_tlab_size_bytes();
1521 }
1522 
1523 size_t ShenandoahHeap::max_tlab_size() const {
1524   // Returns size in words
1525   return ShenandoahHeapRegion::max_tlab_size_words();
1526 }
1527 
1528 void ShenandoahHeap::collect_as_vm_thread(GCCause::Cause cause) {
1529   // These requests are ignored because we can't easily have Shenandoah jump into
1530   // a synchronous (degenerated or full) cycle while it is in the middle of a concurrent
1531   // cycle. We _could_ cancel the concurrent cycle and then try to run a cycle directly
1532   // on the VM thread, but this would confuse the control thread mightily and doesn't
1533   // seem worth the trouble. Instead, we will have the caller thread run (and wait for) a
1534   // concurrent cycle in the prologue of the heap inspect/dump operation (see VM_HeapDumper::doit_prologue).
1535   // This is how other concurrent collectors in the JVM handle this scenario as well.
1536   assert(Thread::current()->is_VM_thread(), "Should be the VM thread");
1537   guarantee(cause == GCCause::_heap_dump || cause == GCCause::_heap_inspection, "Invalid cause");
1538 }
1539 
1540 void ShenandoahHeap::collect(GCCause::Cause cause) {
1541   control_thread()->request_gc(cause);
1542 }
1543 
1544 void ShenandoahHeap::do_full_collection(bool clear_all_soft_refs) {
1545   // This method is only called by `CollectedHeap::collect_as_vm_thread`, which we have
1546   // overridden to do nothing. See the comment there for an explanation of how heap inspections
1547   // work for Shenandoah.
1548   ShouldNotReachHere();
1549 }
1550 
1551 HeapWord* ShenandoahHeap::block_start(const void* addr) const {
1552   ShenandoahHeapRegion* r = heap_region_containing(addr);
1553   if (r != nullptr) {
1554     return r->block_start(addr);
1555   }
1556   return nullptr;
1557 }
1558 
1559 bool ShenandoahHeap::block_is_obj(const HeapWord* addr) const {
1560   ShenandoahHeapRegion* r = heap_region_containing(addr);
1561   return r->block_is_obj(addr);
1562 }
1563 
1564 bool ShenandoahHeap::print_location(outputStream* st, void* addr) const {
1565   return BlockLocationPrinter<ShenandoahHeap>::print_location(st, addr);
1566 }
1567 
1568 void ShenandoahHeap::prepare_for_verify() {
1569   if (SafepointSynchronize::is_at_safepoint() && UseTLAB) {
1570     labs_make_parsable();
1571   }
1572 }
1573 
1574 void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const {
1575   if (_shenandoah_policy->is_at_shutdown()) {
1576     return;
1577   }
1578 
1579   if (_control_thread != nullptr) {
1580     tcl->do_thread(_control_thread);
1581   }
1582 
1583   if (_uncommit_thread != nullptr) {
1584     tcl->do_thread(_uncommit_thread);
1585   }
1586 
1587   workers()->threads_do(tcl);
1588   if (_safepoint_workers != nullptr) {
1589     _safepoint_workers->threads_do(tcl);
1590   }
1591 }
1592 
1593 void ShenandoahHeap::print_tracing_info() const {
1594   LogTarget(Info, gc, stats) lt;
1595   if (lt.is_enabled()) {
1596     ResourceMark rm;
1597     LogStream ls(lt);
1598 
1599     if (ShenandoahEvacTracking) {
1600       evac_tracker()->print_global_on(&ls);
1601       ls.cr();
1602       ls.cr();
1603     }
1604 
1605     phase_timings()->print_global_on(&ls);
1606 
1607     ls.cr();
1608     ls.cr();
1609 
1610     shenandoah_policy()->print_gc_stats(&ls);
1611 
1612     ls.cr();
1613     ls.cr();
1614   }
1615 }
1616 
1617 // Active generation may only be set by the VM thread at a safepoint.
1618 void ShenandoahHeap::set_active_generation(ShenandoahGeneration* generation) {
1619   assert(Thread::current()->is_VM_thread(), "Only the VM Thread");
1620   assert(SafepointSynchronize::is_at_safepoint(), "Only at a safepoint!");
1621   _active_generation = generation;
1622 }
1623 
1624 void ShenandoahHeap::on_cycle_start(GCCause::Cause cause, ShenandoahGeneration* generation) {
1625   shenandoah_policy()->record_collection_cause(cause);
1626 
1627   const GCCause::Cause current = gc_cause();
1628   assert(current == GCCause::_no_gc, "Over-writing cause: %s, with: %s",
1629          GCCause::to_string(current), GCCause::to_string(cause));
1630 
1631   set_gc_cause(cause);
1632 
1633   generation->heuristics()->record_cycle_start();
1634 }
1635 
1636 void ShenandoahHeap::on_cycle_end(ShenandoahGeneration* generation) {
1637   assert(gc_cause() != GCCause::_no_gc, "cause wasn't set");
1638 
1639   generation->heuristics()->record_cycle_end();
1640   if (mode()->is_generational() && generation->is_global()) {
1641     // If we just completed a GLOBAL GC, claim credit for completion of young-gen and old-gen GC as well
1642     young_generation()->heuristics()->record_cycle_end();
1643     old_generation()->heuristics()->record_cycle_end();
1644   }
1645 
1646   set_gc_cause(GCCause::_no_gc);
1647 }
1648 
1649 void ShenandoahHeap::verify(VerifyOption vo) {
1650   if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
1651     if (ShenandoahVerify) {
1652       verifier()->verify_generic(active_generation(), vo);
1653     } else {
1654       // TODO: Consider allocating verification bitmaps on demand,
1655       // and turn this on unconditionally.
1656     }
1657   }
1658 }
1659 size_t ShenandoahHeap::tlab_capacity() const {
1660   return _free_set->capacity();
1661 }
1662 
1663 class ObjectIterateScanRootClosure : public BasicOopIterateClosure {
1664 private:
1665   MarkBitMap* _bitmap;
1666   ShenandoahScanObjectStack* _oop_stack;
1667   ShenandoahHeap* const _heap;
1668   ShenandoahMarkingContext* const _marking_context;
1669 
1670   template <class T>
1671   void do_oop_work(T* p) {
1672     T o = RawAccess<>::oop_load(p);
1673     if (!CompressedOops::is_null(o)) {
1674       oop obj = CompressedOops::decode_not_null(o);
1675       if (_heap->is_concurrent_weak_root_in_progress() && !_marking_context->is_marked(obj)) {
1676         // There may be dead oops in weak roots in concurrent root phase, do not touch them.
1677         return;
1678       }
1679       obj = ShenandoahBarrierSet::barrier_set()->load_reference_barrier(obj);
1680 
1681       assert(oopDesc::is_oop(obj), "must be a valid oop");
1682       if (!_bitmap->is_marked(obj)) {
1683         _bitmap->mark(obj);
1684         _oop_stack->push(obj);
1685       }
1686     }
1687   }
1688 public:
1689   ObjectIterateScanRootClosure(MarkBitMap* bitmap, ShenandoahScanObjectStack* oop_stack) :
1690     _bitmap(bitmap), _oop_stack(oop_stack), _heap(ShenandoahHeap::heap()),
1691     _marking_context(_heap->marking_context()) {}
1692   void do_oop(oop* p)       { do_oop_work(p); }
1693   void do_oop(narrowOop* p) { do_oop_work(p); }
1694 };
1695 
1696 /*
1697  * This is public API, used in preparation of object_iterate().
1698  * Since we don't do linear scan of heap in object_iterate() (see comment below), we don't
1699  * need to make the heap parsable. For Shenandoah-internal linear heap scans that we can
1700  * control, we call SH::tlabs_retire, SH::gclabs_retire.
1701  */
1702 void ShenandoahHeap::ensure_parsability(bool retire_tlabs) {
1703   // No-op.
1704 }
1705 
1706 /*
1707  * Iterates objects in the heap. This is public API, used for, e.g., heap dumping.
1708  *
1709  * We cannot safely iterate objects by doing a linear scan at random points in time. Linear
1710  * scanning needs to deal with dead objects, which may have dead Klass* pointers (e.g.
1711  * calling oopDesc::size() would crash) or dangling reference fields (crashes) etc. Linear
1712  * scanning therefore depends on having a valid marking bitmap to support it. However, we only
1713  * have a valid marking bitmap after successful marking. In particular, we *don't* have a valid
1714  * marking bitmap during marking, after aborted marking or during/after cleanup (when we just
1715  * wiped the bitmap in preparation for next marking).
1716  *
1717  * For all those reasons, we implement object iteration as a single marking traversal, reporting
1718  * objects as we mark+traverse through the heap, starting from GC roots. JVMTI IterateThroughHeap
1719  * is allowed to report dead objects, but is not required to do so.
1720  */
1721 void ShenandoahHeap::object_iterate(ObjectClosure* cl) {
1722   // Reset bitmap
1723   if (!prepare_aux_bitmap_for_iteration())
1724     return;
1725 
1726   ShenandoahScanObjectStack oop_stack;
1727   ObjectIterateScanRootClosure oops(&_aux_bit_map, &oop_stack);
1728   // Seed the stack with root scan
1729   scan_roots_for_iteration(&oop_stack, &oops);
1730 
1731   // Work through the oop stack to traverse heap
1732   while (! oop_stack.is_empty()) {
1733     oop obj = oop_stack.pop();
1734     assert(oopDesc::is_oop(obj), "must be a valid oop");
1735     cl->do_object(obj);
1736     obj->oop_iterate(&oops);
1737   }
1738 
1739   assert(oop_stack.is_empty(), "should be empty");
1740   // Reclaim bitmap
1741   reclaim_aux_bitmap_for_iteration();
1742 }
1743 
1744 bool ShenandoahHeap::prepare_aux_bitmap_for_iteration() {
1745   assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1746   if (!_aux_bitmap_region_special) {
1747     bool success = os::commit_memory((char *) _aux_bitmap_region.start(), _aux_bitmap_region.byte_size(), false);
1748     if (!success) {
1749       log_warning(gc)("Auxiliary marking bitmap commit failed: " PTR_FORMAT " (%zu bytes)",
1750                       p2i(_aux_bitmap_region.start()), _aux_bitmap_region.byte_size());
1751       return false;
1752     }
1753   }
1754   _aux_bit_map.clear();
1755   return true;
1756 }
1757 
1758 void ShenandoahHeap::scan_roots_for_iteration(ShenandoahScanObjectStack* oop_stack, ObjectIterateScanRootClosure* oops) {
1759   // Process GC roots according to current GC cycle
1760   // This populates the work stack with initial objects
1761   // It is important to relinquish the associated locks before diving
1762   // into heap dumper
1763   uint n_workers = safepoint_workers() != nullptr ? safepoint_workers()->active_workers() : 1;
1764   ShenandoahHeapIterationRootScanner rp(n_workers);
1765   rp.roots_do(oops);
1766 }
1767 
1768 void ShenandoahHeap::reclaim_aux_bitmap_for_iteration() {
1769   if (!_aux_bitmap_region_special) {
1770     bool success = os::uncommit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size());
1771     if (!success) {
1772       log_warning(gc)("Auxiliary marking bitmap uncommit failed: " PTR_FORMAT " (%zu bytes)",
1773                       p2i(_aux_bitmap_region.start()), _aux_bitmap_region.byte_size());
1774       assert(false, "Auxiliary marking bitmap uncommit should always succeed");
1775     }
1776   }
1777 }
1778 
1779 // Closure for parallelly iterate objects
1780 class ShenandoahObjectIterateParScanClosure : public BasicOopIterateClosure {
1781 private:
1782   MarkBitMap* _bitmap;
1783   ShenandoahObjToScanQueue* _queue;
1784   ShenandoahHeap* const _heap;
1785   ShenandoahMarkingContext* const _marking_context;
1786 
1787   template <class T>
1788   void do_oop_work(T* p) {
1789     T o = RawAccess<>::oop_load(p);
1790     if (!CompressedOops::is_null(o)) {
1791       oop obj = CompressedOops::decode_not_null(o);
1792       if (_heap->is_concurrent_weak_root_in_progress() && !_marking_context->is_marked(obj)) {
1793         // There may be dead oops in weak roots in concurrent root phase, do not touch them.
1794         return;
1795       }
1796       obj = ShenandoahBarrierSet::barrier_set()->load_reference_barrier(obj);
1797 
1798       assert(oopDesc::is_oop(obj), "Must be a valid oop");
1799       if (_bitmap->par_mark(obj)) {
1800         _queue->push(ShenandoahMarkTask(obj));
1801       }
1802     }
1803   }
1804 public:
1805   ShenandoahObjectIterateParScanClosure(MarkBitMap* bitmap, ShenandoahObjToScanQueue* q) :
1806     _bitmap(bitmap), _queue(q), _heap(ShenandoahHeap::heap()),
1807     _marking_context(_heap->marking_context()) {}
1808   void do_oop(oop* p)       { do_oop_work(p); }
1809   void do_oop(narrowOop* p) { do_oop_work(p); }
1810 };
1811 
1812 // Object iterator for parallel heap iteraion.
1813 // The root scanning phase happenes in construction as a preparation of
1814 // parallel marking queues.
1815 // Every worker processes it's own marking queue. work-stealing is used
1816 // to balance workload.
1817 class ShenandoahParallelObjectIterator : public ParallelObjectIteratorImpl {
1818 private:
1819   uint                         _num_workers;
1820   bool                         _init_ready;
1821   MarkBitMap*                  _aux_bit_map;
1822   ShenandoahHeap*              _heap;
1823   ShenandoahScanObjectStack    _roots_stack; // global roots stack
1824   ShenandoahObjToScanQueueSet* _task_queues;
1825 public:
1826   ShenandoahParallelObjectIterator(uint num_workers, MarkBitMap* bitmap) :
1827         _num_workers(num_workers),
1828         _init_ready(false),
1829         _aux_bit_map(bitmap),
1830         _heap(ShenandoahHeap::heap()) {
1831     // Initialize bitmap
1832     _init_ready = _heap->prepare_aux_bitmap_for_iteration();
1833     if (!_init_ready) {
1834       return;
1835     }
1836 
1837     ObjectIterateScanRootClosure oops(_aux_bit_map, &_roots_stack);
1838     _heap->scan_roots_for_iteration(&_roots_stack, &oops);
1839 
1840     _init_ready = prepare_worker_queues();
1841   }
1842 
1843   ~ShenandoahParallelObjectIterator() {
1844     // Reclaim bitmap
1845     _heap->reclaim_aux_bitmap_for_iteration();
1846     // Reclaim queue for workers
1847     if (_task_queues!= nullptr) {
1848       for (uint i = 0; i < _num_workers; ++i) {
1849         ShenandoahObjToScanQueue* q = _task_queues->queue(i);
1850         if (q != nullptr) {
1851           delete q;
1852           _task_queues->register_queue(i, nullptr);
1853         }
1854       }
1855       delete _task_queues;
1856       _task_queues = nullptr;
1857     }
1858   }
1859 
1860   virtual void object_iterate(ObjectClosure* cl, uint worker_id) {
1861     if (_init_ready) {
1862       object_iterate_parallel(cl, worker_id, _task_queues);
1863     }
1864   }
1865 
1866 private:
1867   // Divide global root_stack into worker queues
1868   bool prepare_worker_queues() {
1869     _task_queues = new ShenandoahObjToScanQueueSet((int) _num_workers);
1870     // Initialize queues for every workers
1871     for (uint i = 0; i < _num_workers; ++i) {
1872       ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue();
1873       _task_queues->register_queue(i, task_queue);
1874     }
1875     // Divide roots among the workers. Assume that object referencing distribution
1876     // is related with root kind, use round-robin to make every worker have same chance
1877     // to process every kind of roots
1878     size_t roots_num = _roots_stack.size();
1879     if (roots_num == 0) {
1880       // No work to do
1881       return false;
1882     }
1883 
1884     for (uint j = 0; j < roots_num; j++) {
1885       uint stack_id = j % _num_workers;
1886       oop obj = _roots_stack.pop();
1887       _task_queues->queue(stack_id)->push(ShenandoahMarkTask(obj));
1888     }
1889     return true;
1890   }
1891 
1892   void object_iterate_parallel(ObjectClosure* cl,
1893                                uint worker_id,
1894                                ShenandoahObjToScanQueueSet* queue_set) {
1895     assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1896     assert(queue_set != nullptr, "task queue must not be null");
1897 
1898     ShenandoahObjToScanQueue* q = queue_set->queue(worker_id);
1899     assert(q != nullptr, "object iterate queue must not be null");
1900 
1901     ShenandoahMarkTask t;
1902     ShenandoahObjectIterateParScanClosure oops(_aux_bit_map, q);
1903 
1904     // Work through the queue to traverse heap.
1905     // Steal when there is no task in queue.
1906     while (q->pop(t) || queue_set->steal(worker_id, t)) {
1907       oop obj = t.obj();
1908       assert(oopDesc::is_oop(obj), "must be a valid oop");
1909       cl->do_object(obj);
1910       obj->oop_iterate(&oops);
1911     }
1912     assert(q->is_empty(), "should be empty");
1913   }
1914 };
1915 
1916 ParallelObjectIteratorImpl* ShenandoahHeap::parallel_object_iterator(uint workers) {
1917   return new ShenandoahParallelObjectIterator(workers, &_aux_bit_map);
1918 }
1919 
1920 // Keep alive an object that was loaded with AS_NO_KEEPALIVE.
1921 void ShenandoahHeap::keep_alive(oop obj) {
1922   if (is_concurrent_mark_in_progress() && (obj != nullptr)) {
1923     ShenandoahBarrierSet::barrier_set()->enqueue(obj);
1924   }
1925 }
1926 
1927 void ShenandoahHeap::heap_region_iterate(ShenandoahHeapRegionClosure* blk) const {
1928   for (size_t i = 0; i < num_regions(); i++) {
1929     ShenandoahHeapRegion* current = get_region(i);
1930     blk->heap_region_do(current);
1931   }
1932 }
1933 
1934 class ShenandoahParallelHeapRegionTask : public WorkerTask {
1935 private:
1936   ShenandoahHeap* const _heap;
1937   ShenandoahHeapRegionClosure* const _blk;
1938   size_t const _stride;
1939 
1940   shenandoah_padding(0);
1941   volatile size_t _index;
1942   shenandoah_padding(1);
1943 
1944 public:
1945   ShenandoahParallelHeapRegionTask(ShenandoahHeapRegionClosure* blk, size_t stride) :
1946           WorkerTask("Shenandoah Parallel Region Operation"),
1947           _heap(ShenandoahHeap::heap()), _blk(blk), _stride(stride), _index(0) {}
1948 
1949   void work(uint worker_id) {
1950     ShenandoahParallelWorkerSession worker_session(worker_id);
1951     size_t stride = _stride;
1952 
1953     size_t max = _heap->num_regions();
1954     while (AtomicAccess::load(&_index) < max) {
1955       size_t cur = AtomicAccess::fetch_then_add(&_index, stride, memory_order_relaxed);
1956       size_t start = cur;
1957       size_t end = MIN2(cur + stride, max);
1958       if (start >= max) break;
1959 
1960       for (size_t i = cur; i < end; i++) {
1961         ShenandoahHeapRegion* current = _heap->get_region(i);
1962         _blk->heap_region_do(current);
1963       }
1964     }
1965   }
1966 };
1967 
1968 void ShenandoahHeap::parallel_heap_region_iterate(ShenandoahHeapRegionClosure* blk) const {
1969   assert(blk->is_thread_safe(), "Only thread-safe closures here");
1970   const uint active_workers = workers()->active_workers();
1971   const size_t n_regions = num_regions();
1972   size_t stride = ShenandoahParallelRegionStride;
1973   if (stride == 0 && active_workers > 1) {
1974     // Automatically derive the stride to balance the work between threads
1975     // evenly. Do not try to split work if below the reasonable threshold.
1976     constexpr size_t threshold = 4096;
1977     stride = n_regions <= threshold ?
1978             threshold :
1979             (n_regions + active_workers - 1) / active_workers;
1980   }
1981 
1982   if (n_regions > stride && active_workers > 1) {
1983     ShenandoahParallelHeapRegionTask task(blk, stride);
1984     workers()->run_task(&task);
1985   } else {
1986     heap_region_iterate(blk);
1987   }
1988 }
1989 
1990 class ShenandoahRendezvousHandshakeClosure : public HandshakeClosure {
1991 public:
1992   inline ShenandoahRendezvousHandshakeClosure(const char* name) : HandshakeClosure(name) {}
1993   inline void do_thread(Thread* thread) {}
1994 };
1995 
1996 void ShenandoahHeap::rendezvous_threads(const char* name) {
1997   ShenandoahRendezvousHandshakeClosure cl(name);
1998   Handshake::execute(&cl);
1999 }
2000 
2001 void ShenandoahHeap::recycle_trash() {
2002   free_set()->recycle_trash();
2003 }
2004 
2005 void ShenandoahHeap::do_class_unloading() {
2006   _unloader.unload();
2007   if (mode()->is_generational()) {
2008     old_generation()->set_parsable(false);
2009   }
2010 }
2011 
2012 void ShenandoahHeap::stw_weak_refs(ShenandoahGeneration* generation, bool full_gc) {
2013   // Weak refs processing
2014   ShenandoahPhaseTimings::Phase phase = full_gc ? ShenandoahPhaseTimings::full_gc_weakrefs
2015                                                 : ShenandoahPhaseTimings::degen_gc_weakrefs;
2016   ShenandoahTimingsTracker t(phase);
2017   ShenandoahGCWorkerPhase worker_phase(phase);
2018   generation->ref_processor()->process_references(phase, workers(), false /* concurrent */);
2019 }
2020 
2021 void ShenandoahHeap::prepare_update_heap_references() {
2022   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
2023 
2024   // Evacuation is over, no GCLABs are needed anymore. GCLABs are under URWM, so we need to
2025   // make them parsable for update code to work correctly. Plus, we can compute new sizes
2026   // for future GCLABs here.
2027   if (UseTLAB) {
2028     ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_init_update_refs_manage_gclabs);
2029     gclabs_retire(ResizeTLAB);
2030   }
2031 
2032   _update_refs_iterator.reset();
2033 }
2034 
2035 void ShenandoahHeap::propagate_gc_state_to_all_threads() {
2036   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at Shenandoah safepoint");
2037   if (_gc_state_changed) {
2038     // If we are only marking old, we do not need to process young pointers
2039     ShenandoahBarrierSet::satb_mark_queue_set().set_filter_out_young(
2040       is_concurrent_old_mark_in_progress() && !is_concurrent_young_mark_in_progress()
2041     );
2042     ShenandoahGCStatePropagatorHandshakeClosure propagator(_gc_state.raw_value());
2043     Threads::threads_do(&propagator);
2044     _gc_state_changed = false;
2045   }
2046 }
2047 
2048 void ShenandoahHeap::set_gc_state_at_safepoint(uint mask, bool value) {
2049   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at Shenandoah safepoint");
2050   _gc_state.set_cond(mask, value);
2051   _gc_state_changed = true;
2052 }
2053 
2054 void ShenandoahHeap::set_gc_state_concurrent(uint mask, bool value) {
2055   // Holding the thread lock here assures that any thread created after we change the gc
2056   // state will have the correct state. It also prevents attaching threads from seeing
2057   // an inconsistent state. See ShenandoahBarrierSet::on_thread_attach for reference. Established
2058   // threads will use their thread local copy of the gc state (changed by a handshake, or on a
2059   // safepoint).
2060   assert(Threads_lock->is_locked(), "Must hold thread lock for concurrent gc state change");
2061   _gc_state.set_cond(mask, value);
2062 }
2063 
2064 void ShenandoahHeap::set_concurrent_young_mark_in_progress(bool in_progress) {
2065   uint mask;
2066   assert(!has_forwarded_objects(), "Young marking is not concurrent with evacuation");
2067   if (!in_progress && is_concurrent_old_mark_in_progress()) {
2068     assert(mode()->is_generational(), "Only generational GC has old marking");
2069     assert(_gc_state.is_set(MARKING), "concurrent_old_marking_in_progress implies MARKING");
2070     // If old-marking is in progress when we turn off YOUNG_MARKING, leave MARKING (and OLD_MARKING) on
2071     mask = YOUNG_MARKING;
2072   } else {
2073     mask = MARKING | YOUNG_MARKING;
2074   }
2075   set_gc_state_at_safepoint(mask, in_progress);
2076   manage_satb_barrier(in_progress);
2077 }
2078 
2079 void ShenandoahHeap::set_concurrent_old_mark_in_progress(bool in_progress) {
2080 #ifdef ASSERT
2081   // has_forwarded_objects() iff UPDATE_REFS or EVACUATION
2082   bool has_forwarded = has_forwarded_objects();
2083   bool updating_or_evacuating = _gc_state.is_set(UPDATE_REFS | EVACUATION);
2084   bool evacuating = _gc_state.is_set(EVACUATION);
2085   assert ((has_forwarded == updating_or_evacuating) || (evacuating && !has_forwarded && collection_set()->is_empty()),
2086           "Updating or evacuating iff has forwarded objects, or if evacuation phase is promoting in place without forwarding");
2087 #endif
2088   if (!in_progress && is_concurrent_young_mark_in_progress()) {
2089     // If young-marking is in progress when we turn off OLD_MARKING, leave MARKING (and YOUNG_MARKING) on
2090     assert(_gc_state.is_set(MARKING), "concurrent_young_marking_in_progress implies MARKING");
2091     set_gc_state_at_safepoint(OLD_MARKING, in_progress);
2092   } else {
2093     set_gc_state_at_safepoint(MARKING | OLD_MARKING, in_progress);
2094   }
2095   manage_satb_barrier(in_progress);
2096 }
2097 
2098 bool ShenandoahHeap::is_prepare_for_old_mark_in_progress() const {
2099   return old_generation()->is_preparing_for_mark();
2100 }
2101 
2102 void ShenandoahHeap::manage_satb_barrier(bool active) {
2103   if (is_concurrent_mark_in_progress()) {
2104     // Ignore request to deactivate barrier while concurrent mark is in progress.
2105     // Do not attempt to re-activate the barrier if it is already active.
2106     if (active && !ShenandoahBarrierSet::satb_mark_queue_set().is_active()) {
2107       ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(active, !active);
2108     }
2109   } else {
2110     // No concurrent marking is in progress so honor request to deactivate,
2111     // but only if the barrier is already active.
2112     if (!active && ShenandoahBarrierSet::satb_mark_queue_set().is_active()) {
2113       ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(active, !active);
2114     }
2115   }
2116 }
2117 
2118 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
2119   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only call this at safepoint");
2120   set_gc_state_at_safepoint(EVACUATION, in_progress);
2121 }
2122 
2123 void ShenandoahHeap::set_concurrent_strong_root_in_progress(bool in_progress) {
2124   if (in_progress) {
2125     _concurrent_strong_root_in_progress.set();
2126   } else {
2127     _concurrent_strong_root_in_progress.unset();
2128   }
2129 }
2130 
2131 void ShenandoahHeap::set_concurrent_weak_root_in_progress(bool cond) {
2132   set_gc_state_at_safepoint(WEAK_ROOTS, cond);
2133 }
2134 
2135 GCTracer* ShenandoahHeap::tracer() {
2136   return shenandoah_policy()->tracer();
2137 }
2138 
2139 size_t ShenandoahHeap::tlab_used() const {
2140   return _free_set->used();
2141 }
2142 
2143 bool ShenandoahHeap::try_cancel_gc(GCCause::Cause cause) {
2144   const GCCause::Cause prev = _cancelled_gc.xchg(cause);
2145   return prev == GCCause::_no_gc || prev == GCCause::_shenandoah_concurrent_gc;
2146 }
2147 
2148 void ShenandoahHeap::cancel_concurrent_mark() {
2149   if (mode()->is_generational()) {
2150     young_generation()->cancel_marking();
2151     old_generation()->cancel_marking();
2152   }
2153 
2154   global_generation()->cancel_marking();
2155 
2156   ShenandoahBarrierSet::satb_mark_queue_set().abandon_partial_marking();
2157 }
2158 
2159 bool ShenandoahHeap::cancel_gc(GCCause::Cause cause) {
2160   if (try_cancel_gc(cause)) {
2161     FormatBuffer<> msg("Cancelling GC: %s", GCCause::to_string(cause));
2162     log_info(gc,thread)("%s", msg.buffer());
2163     Events::log(Thread::current(), "%s", msg.buffer());
2164     _cancel_requested_time = os::elapsedTime();
2165     return true;
2166   }
2167   return false;
2168 }
2169 
2170 uint ShenandoahHeap::max_workers() {
2171   return _max_workers;
2172 }
2173 
2174 void ShenandoahHeap::stop() {
2175   // The shutdown sequence should be able to terminate when GC is running.
2176 
2177   // Step 0. Notify policy to disable event recording and prevent visiting gc threads during shutdown
2178   _shenandoah_policy->record_shutdown();
2179 
2180   // Step 1. Stop reporting on gc thread cpu utilization
2181   mmu_tracker()->stop();
2182 
2183   // Step 2. Wait until GC worker exits normally (this will cancel any ongoing GC).
2184   control_thread()->stop();
2185 
2186   // Stop 4. Shutdown uncommit thread.
2187   if (_uncommit_thread != nullptr) {
2188     _uncommit_thread->stop();
2189   }
2190 }
2191 
2192 void ShenandoahHeap::stw_unload_classes(bool full_gc) {
2193   if (!unload_classes()) return;
2194   ClassUnloadingContext ctx(_workers->active_workers(),
2195                             true /* unregister_nmethods_during_purge */,
2196                             false /* lock_nmethod_free_separately */);
2197 
2198   // Unload classes and purge SystemDictionary.
2199   {
2200     ShenandoahPhaseTimings::Phase phase = full_gc ?
2201                                           ShenandoahPhaseTimings::full_gc_purge_class_unload :
2202                                           ShenandoahPhaseTimings::degen_gc_purge_class_unload;
2203     ShenandoahIsAliveSelector is_alive;
2204     {
2205       CodeCache::UnlinkingScope scope(is_alive.is_alive_closure());
2206       ShenandoahGCPhase gc_phase(phase);
2207       ShenandoahGCWorkerPhase worker_phase(phase);
2208       bool unloading_occurred = SystemDictionary::do_unloading(gc_timer());
2209 
2210       // Clean JVMCI metadata handles.
2211       JVMCI_ONLY(JVMCI::do_unloading(unloading_occurred));
2212 
2213       ShenandoahClassUnloadingTask unlink_task(phase, unloading_occurred);
2214       _workers->run_task(&unlink_task);
2215     }
2216     // Release unloaded nmethods's memory.
2217     ClassUnloadingContext::context()->purge_and_free_nmethods();
2218   }
2219 
2220   {
2221     ShenandoahGCPhase phase(full_gc ?
2222                             ShenandoahPhaseTimings::full_gc_purge_cldg :
2223                             ShenandoahPhaseTimings::degen_gc_purge_cldg);
2224     ClassLoaderDataGraph::purge(true /* at_safepoint */);
2225   }
2226   // Resize and verify metaspace
2227   MetaspaceGC::compute_new_size();
2228   DEBUG_ONLY(MetaspaceUtils::verify();)
2229 }
2230 
2231 // Weak roots are either pre-evacuated (final mark) or updated (final update refs),
2232 // so they should not have forwarded oops.
2233 // However, we do need to "null" dead oops in the roots, if can not be done
2234 // in concurrent cycles.
2235 void ShenandoahHeap::stw_process_weak_roots(bool full_gc) {
2236   uint num_workers = _workers->active_workers();
2237   ShenandoahPhaseTimings::Phase timing_phase = full_gc ?
2238                                                ShenandoahPhaseTimings::full_gc_purge_weak_par :
2239                                                ShenandoahPhaseTimings::degen_gc_purge_weak_par;
2240   ShenandoahGCPhase phase(timing_phase);
2241   ShenandoahGCWorkerPhase worker_phase(timing_phase);
2242   // Cleanup weak roots
2243   if (has_forwarded_objects()) {
2244     ShenandoahForwardedIsAliveClosure is_alive;
2245     ShenandoahNonConcUpdateRefsClosure keep_alive;
2246     ShenandoahParallelWeakRootsCleaningTask<ShenandoahForwardedIsAliveClosure, ShenandoahNonConcUpdateRefsClosure>
2247       cleaning_task(timing_phase, &is_alive, &keep_alive, num_workers);
2248     _workers->run_task(&cleaning_task);
2249   } else {
2250     ShenandoahIsAliveClosure is_alive;
2251 #ifdef ASSERT
2252     ShenandoahAssertNotForwardedClosure verify_cl;
2253     ShenandoahParallelWeakRootsCleaningTask<ShenandoahIsAliveClosure, ShenandoahAssertNotForwardedClosure>
2254       cleaning_task(timing_phase, &is_alive, &verify_cl, num_workers);
2255 #else
2256     ShenandoahParallelWeakRootsCleaningTask<ShenandoahIsAliveClosure, DoNothingClosure>
2257       cleaning_task(timing_phase, &is_alive, &do_nothing_cl, num_workers);
2258 #endif
2259     _workers->run_task(&cleaning_task);
2260   }
2261 }
2262 
2263 void ShenandoahHeap::parallel_cleaning(ShenandoahGeneration* generation, bool full_gc) {
2264   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2265   assert(is_stw_gc_in_progress(), "Only for Degenerated and Full GC");
2266   ShenandoahGCPhase phase(full_gc ?
2267                           ShenandoahPhaseTimings::full_gc_purge :
2268                           ShenandoahPhaseTimings::degen_gc_purge);
2269   stw_weak_refs(generation, full_gc);
2270   stw_process_weak_roots(full_gc);
2271   stw_unload_classes(full_gc);
2272 }
2273 
2274 void ShenandoahHeap::set_has_forwarded_objects(bool cond) {
2275   set_gc_state_at_safepoint(HAS_FORWARDED, cond);
2276 }
2277 
2278 void ShenandoahHeap::set_unload_classes(bool uc) {
2279   _unload_classes.set_cond(uc);
2280 }
2281 
2282 bool ShenandoahHeap::unload_classes() const {
2283   return _unload_classes.is_set();
2284 }
2285 
2286 address ShenandoahHeap::in_cset_fast_test_addr() {
2287   ShenandoahHeap* heap = ShenandoahHeap::heap();
2288   assert(heap->collection_set() != nullptr, "Sanity");
2289   return (address) heap->collection_set()->biased_map_address();
2290 }
2291 
2292 void ShenandoahHeap::reset_bytes_allocated_since_gc_start() {
2293   // It is important to force_alloc_rate_sample() before the associated generation's bytes_allocated has been reset.
2294   // Note that there is no lock to prevent additional alloations between sampling bytes_allocated_since_gc_start() and
2295   // reset_bytes_allocated_since_gc_start().  If additional allocations happen, they will be ignored in the average
2296   // allocation rate computations.  This effect is considered to be be negligible.
2297 
2298   // unaccounted_bytes is the bytes not accounted for by our forced sample.  If the sample interval is too short,
2299   // the "forced sample" will not happen, and any recently allocated bytes are "unaccounted for".  We pretend these
2300   // bytes are allocated after the start of subsequent gc.
2301   size_t unaccounted_bytes;
2302   ShenandoahFreeSet* _free_set = free_set();
2303   size_t bytes_allocated = _free_set->get_bytes_allocated_since_gc_start();
2304   if (mode()->is_generational()) {
2305     unaccounted_bytes = young_generation()->heuristics()->force_alloc_rate_sample(bytes_allocated);
2306   } else {
2307     // Single-gen Shenandoah uses global heuristics.
2308     unaccounted_bytes = heuristics()->force_alloc_rate_sample(bytes_allocated);
2309   }
2310   ShenandoahHeapLocker locker(lock());
2311   _free_set->reset_bytes_allocated_since_gc_start(unaccounted_bytes);
2312 }
2313 
2314 void ShenandoahHeap::set_degenerated_gc_in_progress(bool in_progress) {
2315   _degenerated_gc_in_progress.set_cond(in_progress);
2316 }
2317 
2318 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) {
2319   _full_gc_in_progress.set_cond(in_progress);
2320 }
2321 
2322 void ShenandoahHeap::set_full_gc_move_in_progress(bool in_progress) {
2323   assert (is_full_gc_in_progress(), "should be");
2324   _full_gc_move_in_progress.set_cond(in_progress);
2325 }
2326 
2327 void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) {
2328   set_gc_state_at_safepoint(UPDATE_REFS, in_progress);
2329 }
2330 
2331 void ShenandoahHeap::register_nmethod(nmethod* nm) {
2332   ShenandoahCodeRoots::register_nmethod(nm);
2333 }
2334 
2335 void ShenandoahHeap::unregister_nmethod(nmethod* nm) {
2336   ShenandoahCodeRoots::unregister_nmethod(nm);
2337 }
2338 
2339 void ShenandoahHeap::pin_object(JavaThread* thr, oop o) {
2340   heap_region_containing(o)->record_pin();
2341 }
2342 
2343 void ShenandoahHeap::unpin_object(JavaThread* thr, oop o) {
2344   ShenandoahHeapRegion* r = heap_region_containing(o);
2345   assert(r != nullptr, "Sanity");
2346   assert(r->pin_count() > 0, "Region %zu should have non-zero pins", r->index());
2347   r->record_unpin();
2348 }
2349 
2350 void ShenandoahHeap::sync_pinned_region_status() {
2351   ShenandoahHeapLocker locker(lock());
2352 
2353   for (size_t i = 0; i < num_regions(); i++) {
2354     ShenandoahHeapRegion *r = get_region(i);
2355     if (r->is_active()) {
2356       if (r->is_pinned()) {
2357         if (r->pin_count() == 0) {
2358           r->make_unpinned();
2359         }
2360       } else {
2361         if (r->pin_count() > 0) {
2362           r->make_pinned();
2363         }
2364       }
2365     }
2366   }
2367 
2368   assert_pinned_region_status();
2369 }
2370 
2371 #ifdef ASSERT
2372 void ShenandoahHeap::assert_pinned_region_status() const {
2373   assert_pinned_region_status(global_generation());
2374 }
2375 
2376 void ShenandoahHeap::assert_pinned_region_status(ShenandoahGeneration* generation) const {
2377   for (size_t i = 0; i < num_regions(); i++) {
2378     ShenandoahHeapRegion* r = get_region(i);
2379     if (generation->contains(r)) {
2380       assert((r->is_pinned() && r->pin_count() > 0) || (!r->is_pinned() && r->pin_count() == 0),
2381              "Region %zu pinning status is inconsistent", i);
2382     }
2383   }
2384 }
2385 #endif
2386 
2387 ConcurrentGCTimer* ShenandoahHeap::gc_timer() const {
2388   return _gc_timer;
2389 }
2390 
2391 void ShenandoahHeap::prepare_concurrent_roots() {
2392   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2393   assert(!is_stw_gc_in_progress(), "Only concurrent GC");
2394   set_concurrent_strong_root_in_progress(!collection_set()->is_empty());
2395   set_concurrent_weak_root_in_progress(true);
2396   if (unload_classes()) {
2397     _unloader.prepare();
2398   }
2399 }
2400 
2401 void ShenandoahHeap::finish_concurrent_roots() {
2402   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2403   assert(!is_stw_gc_in_progress(), "Only concurrent GC");
2404   if (unload_classes()) {
2405     _unloader.finish();
2406   }
2407 }
2408 
2409 #ifdef ASSERT
2410 void ShenandoahHeap::assert_gc_workers(uint nworkers) {
2411   assert(nworkers > 0 && nworkers <= max_workers(), "Sanity");
2412 
2413   if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
2414     // Use ParallelGCThreads inside safepoints
2415     assert(nworkers == ParallelGCThreads, "Use ParallelGCThreads (%u) within safepoint, not %u",
2416            ParallelGCThreads, nworkers);
2417   } else {
2418     // Use ConcGCThreads outside safepoints
2419     assert(nworkers == ConcGCThreads, "Use ConcGCThreads (%u) outside safepoints, %u",
2420            ConcGCThreads, nworkers);
2421   }
2422 }
2423 #endif
2424 
2425 ShenandoahVerifier* ShenandoahHeap::verifier() {
2426   guarantee(ShenandoahVerify, "Should be enabled");
2427   assert (_verifier != nullptr, "sanity");
2428   return _verifier;
2429 }
2430 
2431 template<bool CONCURRENT>
2432 class ShenandoahUpdateHeapRefsTask : public WorkerTask {
2433 private:
2434   ShenandoahHeap* _heap;
2435   ShenandoahRegionIterator* _regions;
2436 public:
2437   explicit ShenandoahUpdateHeapRefsTask(ShenandoahRegionIterator* regions) :
2438     WorkerTask("Shenandoah Update References"),
2439     _heap(ShenandoahHeap::heap()),
2440     _regions(regions) {
2441   }
2442 
2443   void work(uint worker_id) {
2444     if (CONCURRENT) {
2445       ShenandoahConcurrentWorkerSession worker_session(worker_id);
2446       ShenandoahSuspendibleThreadSetJoiner stsj;
2447       do_work<ShenandoahConcUpdateRefsClosure>(worker_id);
2448     } else {
2449       ShenandoahParallelWorkerSession worker_session(worker_id);
2450       do_work<ShenandoahNonConcUpdateRefsClosure>(worker_id);
2451     }
2452   }
2453 
2454 private:
2455   template<class T>
2456   void do_work(uint worker_id) {
2457     if (CONCURRENT && (worker_id == 0)) {
2458       // We ask the first worker to replenish the Mutator free set by moving regions previously reserved to hold the
2459       // results of evacuation.  These reserves are no longer necessary because evacuation has completed.
2460       size_t cset_regions = _heap->collection_set()->count();
2461 
2462       // Now that evacuation is done, we can reassign any regions that had been reserved to hold the results of evacuation
2463       // to the mutator free set.  At the end of GC, we will have cset_regions newly evacuated fully empty regions from
2464       // which we will be able to replenish the Collector free set and the OldCollector free set in preparation for the
2465       // next GC cycle.
2466       _heap->free_set()->move_regions_from_collector_to_mutator(cset_regions);
2467     }
2468     // If !CONCURRENT, there's no value in expanding Mutator free set
2469     T cl;
2470     ShenandoahHeapRegion* r = _regions->next();
2471     while (r != nullptr) {
2472       HeapWord* update_watermark = r->get_update_watermark();
2473       assert (update_watermark >= r->bottom(), "sanity");
2474       if (r->is_active() && !r->is_cset()) {
2475         _heap->marked_object_oop_iterate(r, &cl, update_watermark);
2476       }
2477       if (_heap->check_cancelled_gc_and_yield(CONCURRENT)) {
2478         return;
2479       }
2480       r = _regions->next();
2481     }
2482   }
2483 };
2484 
2485 void ShenandoahHeap::update_heap_references(ShenandoahGeneration* generation, bool concurrent) {
2486   assert(generation->is_global(), "Should only get global generation here");
2487   assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
2488 
2489   if (concurrent) {
2490     ShenandoahUpdateHeapRefsTask<true> task(&_update_refs_iterator);
2491     workers()->run_task(&task);
2492   } else {
2493     ShenandoahUpdateHeapRefsTask<false> task(&_update_refs_iterator);
2494     workers()->run_task(&task);
2495   }
2496 }
2497 
2498 void ShenandoahHeap::update_heap_region_states(bool concurrent) {
2499   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2500   assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
2501 
2502   {
2503     ShenandoahGCPhase phase(concurrent ?
2504                             ShenandoahPhaseTimings::final_update_refs_update_region_states :
2505                             ShenandoahPhaseTimings::degen_gc_final_update_refs_update_region_states);
2506 
2507     final_update_refs_update_region_states();
2508 
2509     assert_pinned_region_status();
2510   }
2511 
2512   {
2513     ShenandoahGCPhase phase(concurrent ?
2514                             ShenandoahPhaseTimings::final_update_refs_trash_cset :
2515                             ShenandoahPhaseTimings::degen_gc_final_update_refs_trash_cset);
2516     trash_cset_regions();
2517   }
2518 }
2519 
2520 void ShenandoahHeap::final_update_refs_update_region_states() {
2521   ShenandoahSynchronizePinnedRegionStates cl;
2522   parallel_heap_region_iterate(&cl);
2523 }
2524 
2525 void ShenandoahHeap::rebuild_free_set(bool concurrent) {
2526   ShenandoahGCPhase phase(concurrent ?
2527                           ShenandoahPhaseTimings::final_update_refs_rebuild_freeset :
2528                           ShenandoahPhaseTimings::degen_gc_final_update_refs_rebuild_freeset);
2529   ShenandoahHeapLocker locker(lock());
2530   size_t young_cset_regions, old_cset_regions;
2531   size_t first_old_region, last_old_region, old_region_count;
2532   _free_set->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old_region, last_old_region, old_region_count);
2533   // If there are no old regions, first_old_region will be greater than last_old_region
2534   assert((first_old_region > last_old_region) ||
2535          ((last_old_region + 1 - first_old_region >= old_region_count) &&
2536           get_region(first_old_region)->is_old() && get_region(last_old_region)->is_old()),
2537          "sanity: old_region_count: %zu, first_old_region: %zu, last_old_region: %zu",
2538          old_region_count, first_old_region, last_old_region);
2539 
2540   if (mode()->is_generational()) {
2541 #ifdef ASSERT
2542     if (ShenandoahVerify) {
2543       verifier()->verify_before_rebuilding_free_set();
2544     }
2545 #endif
2546 
2547     // The computation of bytes_of_allocation_runway_before_gc_trigger is quite conservative so consider all of this
2548     // available for transfer to old. Note that transfer of humongous regions does not impact available.
2549     ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::heap();
2550     size_t allocation_runway = gen_heap->young_generation()->heuristics()->bytes_of_allocation_runway_before_gc_trigger(young_cset_regions);
2551     gen_heap->compute_old_generation_balance(allocation_runway, old_cset_regions);
2552 
2553     // Total old_available may have been expanded to hold anticipated promotions.  We trigger if the fragmented available
2554     // memory represents more than 16 regions worth of data.  Note that fragmentation may increase when we promote regular
2555     // regions in place when many of these regular regions have an abundant amount of available memory within them.  Fragmentation
2556     // will decrease as promote-by-copy consumes the available memory within these partially consumed regions.
2557     //
2558     // We consider old-gen to have excessive fragmentation if more than 12.5% of old-gen is free memory that resides
2559     // within partially consumed regions of memory.
2560   }
2561   // Rebuild free set based on adjusted generation sizes.
2562   _free_set->finish_rebuild(young_cset_regions, old_cset_regions, old_region_count);
2563 
2564   if (mode()->is_generational()) {
2565     ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::heap();
2566     ShenandoahOldGeneration* old_gen = gen_heap->old_generation();
2567     old_gen->heuristics()->evaluate_triggers(first_old_region, last_old_region, old_region_count, num_regions());
2568   }
2569 }
2570 
2571 bool ShenandoahHeap::is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self) {
2572   size_t slice = r->index() / _bitmap_regions_per_slice;
2573 
2574   size_t regions_from = _bitmap_regions_per_slice * slice;
2575   size_t regions_to   = MIN2(num_regions(), _bitmap_regions_per_slice * (slice + 1));
2576   for (size_t g = regions_from; g < regions_to; g++) {
2577     assert (g / _bitmap_regions_per_slice == slice, "same slice");
2578     if (skip_self && g == r->index()) continue;
2579     if (get_region(g)->is_committed()) {
2580       return true;
2581     }
2582   }
2583   return false;
2584 }
2585 
2586 void ShenandoahHeap::commit_bitmap_slice(ShenandoahHeapRegion* r) {
2587   shenandoah_assert_heaplocked();
2588   assert(!is_bitmap_region_special(), "Not for special memory");
2589 
2590   if (is_bitmap_slice_committed(r, true)) {
2591     // Some other region from the group is already committed, meaning the bitmap
2592     // slice is already committed, we exit right away.
2593     return;
2594   }
2595 
2596   // Commit the bitmap slice:
2597   size_t slice = r->index() / _bitmap_regions_per_slice;
2598   size_t off = _bitmap_bytes_per_slice * slice;
2599   size_t len = _bitmap_bytes_per_slice;
2600   char* start = (char*) _bitmap_region.start() + off;
2601 
2602   os::commit_memory_or_exit(start, len, false, "Unable to commit bitmap slice");
2603 
2604   if (AlwaysPreTouch) {
2605     os::pretouch_memory(start, start + len, _pretouch_bitmap_page_size);
2606   }
2607 }
2608 
2609 void ShenandoahHeap::uncommit_bitmap_slice(ShenandoahHeapRegion *r) {
2610   shenandoah_assert_heaplocked();
2611   assert(!is_bitmap_region_special(), "Not for special memory");
2612 
2613   if (is_bitmap_slice_committed(r, true)) {
2614     // Some other region from the group is still committed, meaning the bitmap
2615     // slice should stay committed, exit right away.
2616     return;
2617   }
2618 
2619   // Uncommit the bitmap slice:
2620   size_t slice = r->index() / _bitmap_regions_per_slice;
2621   size_t off = _bitmap_bytes_per_slice * slice;
2622   size_t len = _bitmap_bytes_per_slice;
2623 
2624   char* addr = (char*) _bitmap_region.start() + off;
2625   bool success = os::uncommit_memory(addr, len);
2626   if (!success) {
2627     log_warning(gc)("Bitmap slice uncommit failed: " PTR_FORMAT " (%zu bytes)", p2i(addr), len);
2628     assert(false, "Bitmap slice uncommit should always succeed");
2629   }
2630 }
2631 
2632 void ShenandoahHeap::forbid_uncommit() {
2633   if (_uncommit_thread != nullptr) {
2634     _uncommit_thread->forbid_uncommit();
2635   }
2636 }
2637 
2638 void ShenandoahHeap::allow_uncommit() {
2639   if (_uncommit_thread != nullptr) {
2640     _uncommit_thread->allow_uncommit();
2641   }
2642 }
2643 
2644 #ifdef ASSERT
2645 bool ShenandoahHeap::is_uncommit_in_progress() {
2646   if (_uncommit_thread != nullptr) {
2647     return _uncommit_thread->is_uncommit_in_progress();
2648   }
2649   return false;
2650 }
2651 #endif
2652 
2653 void ShenandoahHeap::safepoint_synchronize_begin() {
2654   StackWatermarkSet::safepoint_synchronize_begin();
2655   SuspendibleThreadSet::synchronize();
2656 }
2657 
2658 void ShenandoahHeap::safepoint_synchronize_end() {
2659   SuspendibleThreadSet::desynchronize();
2660 }
2661 
2662 void ShenandoahHeap::try_inject_alloc_failure() {
2663   if (ShenandoahAllocFailureALot && !cancelled_gc() && ((os::random() % 1000) > 950)) {
2664     _inject_alloc_failure.set();
2665     os::naked_short_sleep(1);
2666     if (cancelled_gc()) {
2667       log_info(gc)("Allocation failure was successfully injected");
2668     }
2669   }
2670 }
2671 
2672 bool ShenandoahHeap::should_inject_alloc_failure() {
2673   return _inject_alloc_failure.is_set() && _inject_alloc_failure.try_unset();
2674 }
2675 
2676 void ShenandoahHeap::initialize_serviceability() {
2677   _memory_pool = new ShenandoahMemoryPool(this);
2678   _cycle_memory_manager.add_pool(_memory_pool);
2679   _stw_memory_manager.add_pool(_memory_pool);
2680 }
2681 
2682 GrowableArray<GCMemoryManager*> ShenandoahHeap::memory_managers() {
2683   GrowableArray<GCMemoryManager*> memory_managers(2);
2684   memory_managers.append(&_cycle_memory_manager);
2685   memory_managers.append(&_stw_memory_manager);
2686   return memory_managers;
2687 }
2688 
2689 GrowableArray<MemoryPool*> ShenandoahHeap::memory_pools() {
2690   GrowableArray<MemoryPool*> memory_pools(1);
2691   memory_pools.append(_memory_pool);
2692   return memory_pools;
2693 }
2694 
2695 MemoryUsage ShenandoahHeap::memory_usage() {
2696   assert(_initial_size <= ShenandoahHeap::heap()->max_capacity(), "sanity");
2697   assert(used() <= ShenandoahHeap::heap()->max_capacity(), "sanity");
2698   assert(committed() <= ShenandoahHeap::heap()->max_capacity(), "sanity");
2699   return MemoryUsage(_initial_size, used(), committed(), max_capacity());
2700 }
2701 
2702 ShenandoahRegionIterator::ShenandoahRegionIterator() :
2703   _heap(ShenandoahHeap::heap()),
2704   _index(0) {}
2705 
2706 ShenandoahRegionIterator::ShenandoahRegionIterator(ShenandoahHeap* heap) :
2707   _heap(heap),
2708   _index(0) {}
2709 
2710 void ShenandoahRegionIterator::reset() {
2711   _index = 0;
2712 }
2713 
2714 bool ShenandoahRegionIterator::has_next() const {
2715   return _index < _heap->num_regions();
2716 }
2717 
2718 char ShenandoahHeap::gc_state() const {
2719   return _gc_state.raw_value();
2720 }
2721 
2722 bool ShenandoahHeap::is_gc_state(GCState state) const {
2723   // If the global gc state has been changed, but hasn't yet been propagated to all threads, then
2724   // the global gc state is the correct value. Once the gc state has been synchronized with all threads,
2725   // _gc_state_changed will be toggled to false and we need to use the thread local state.
2726   return _gc_state_changed ? _gc_state.is_set(state) : ShenandoahThreadLocalData::is_gc_state(state);
2727 }
2728 
2729 
2730 ShenandoahLiveData* ShenandoahHeap::get_liveness_cache(uint worker_id) {
2731 #ifdef ASSERT
2732   assert(_liveness_cache != nullptr, "sanity");
2733   assert(worker_id < _max_workers, "sanity");
2734   for (uint i = 0; i < num_regions(); i++) {
2735     assert(_liveness_cache[worker_id][i] == 0, "liveness cache should be empty");
2736   }
2737 #endif
2738   return _liveness_cache[worker_id];
2739 }
2740 
2741 void ShenandoahHeap::flush_liveness_cache(uint worker_id) {
2742   assert(worker_id < _max_workers, "sanity");
2743   assert(_liveness_cache != nullptr, "sanity");
2744   ShenandoahLiveData* ld = _liveness_cache[worker_id];
2745   for (uint i = 0; i < num_regions(); i++) {
2746     ShenandoahLiveData live = ld[i];
2747     if (live > 0) {
2748       ShenandoahHeapRegion* r = get_region(i);
2749       r->increase_live_data_gc_words(live);
2750       ld[i] = 0;
2751     }
2752   }
2753 }
2754 
2755 bool ShenandoahHeap::requires_barriers(stackChunkOop obj) const {
2756   if (is_idle()) return false;
2757 
2758   // Objects allocated after marking start are implicitly alive, don't need any barriers during
2759   // marking phase.
2760   if (is_concurrent_mark_in_progress() &&
2761      !marking_context()->allocated_after_mark_start(obj)) {
2762     return true;
2763   }
2764 
2765   // Can not guarantee obj is deeply good.
2766   if (has_forwarded_objects()) {
2767     return true;
2768   }
2769 
2770   return false;
2771 }
2772 
2773 HeapWord* ShenandoahHeap::allocate_loaded_archive_space(size_t size) {
2774 #if INCLUDE_CDS_JAVA_HEAP
2775   // CDS wants a raw continuous memory range to load a bunch of objects itself.
2776   // This is an unusual request, since all requested regions should be regular, not humongous.
2777   //
2778   // CDS would guarantee no objects straddle multiple regions, as long as regions are as large
2779   // as MIN_GC_REGION_ALIGNMENT.
2780   guarantee(ShenandoahHeapRegion::region_size_bytes() >= AOTMappedHeapWriter::MIN_GC_REGION_ALIGNMENT, "Must be");
2781 
2782   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_cds(size);
2783   return allocate_memory(req);
2784 #else
2785   assert(false, "Archive heap loader should not be available, should not be here");
2786   return nullptr;
2787 #endif // INCLUDE_CDS_JAVA_HEAP
2788 }
2789 
2790 void ShenandoahHeap::complete_loaded_archive_space(MemRegion archive_space) {
2791   // Nothing to do here, except checking that heap looks fine.
2792 #ifdef ASSERT
2793   HeapWord* start = archive_space.start();
2794   HeapWord* end = archive_space.end();
2795 
2796   // No unclaimed space between the objects.
2797   // Objects are properly allocated in correct regions.
2798   HeapWord* cur = start;
2799   while (cur < end) {
2800     oop oop = cast_to_oop(cur);
2801     shenandoah_assert_in_correct_region(nullptr, oop);
2802     cur += oop->size();
2803   }
2804 
2805   // No unclaimed tail at the end of archive space.
2806   assert(cur == end,
2807          "Archive space should be fully used: " PTR_FORMAT " " PTR_FORMAT,
2808          p2i(cur), p2i(end));
2809 
2810   // All regions in contiguous space have good state.
2811   size_t begin_reg_idx = heap_region_index_containing(start);
2812   size_t end_reg_idx   = heap_region_index_containing(end);
2813 
2814   for (size_t idx = begin_reg_idx; idx <= end_reg_idx; idx++) {
2815     ShenandoahHeapRegion* r = get_region(idx);
2816     assert(r->is_regular(), "Must be regular");
2817     assert(r->is_young(), "Must be young");
2818     assert(idx == end_reg_idx || r->top() == r->end(),
2819            "All regions except the last one should be full: " PTR_FORMAT " " PTR_FORMAT,
2820            p2i(r->top()), p2i(r->end()));
2821     assert(idx != begin_reg_idx || r->bottom() == start,
2822            "Archive space start should be at the bottom of first region: " PTR_FORMAT " " PTR_FORMAT,
2823            p2i(r->bottom()), p2i(start));
2824     assert(idx != end_reg_idx || r->top() == end,
2825            "Archive space end should be at the top of last region: " PTR_FORMAT " " PTR_FORMAT,
2826            p2i(r->top()), p2i(end));
2827   }
2828 
2829 #endif
2830 }
2831 
2832 ShenandoahGeneration* ShenandoahHeap::generation_for(ShenandoahAffiliation affiliation) const {
2833   if (!mode()->is_generational()) {
2834     return global_generation();
2835   } else if (affiliation == YOUNG_GENERATION) {
2836     return young_generation();
2837   } else if (affiliation == OLD_GENERATION) {
2838     return old_generation();
2839   }
2840 
2841   ShouldNotReachHere();
2842   return nullptr;
2843 }
2844 
2845 void ShenandoahHeap::log_heap_status(const char* msg) const {
2846   if (mode()->is_generational()) {
2847     young_generation()->log_status(msg);
2848     old_generation()->log_status(msg);
2849   } else {
2850     global_generation()->log_status(msg);
2851   }
2852 }