1 /*
   2  * Copyright (c) 2023, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2013, 2022, Red Hat, Inc. All rights reserved.
   4  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
   5  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   6  *
   7  * This code is free software; you can redistribute it and/or modify it
   8  * under the terms of the GNU General Public License version 2 only, as
   9  * published by the Free Software Foundation.
  10  *
  11  * This code is distributed in the hope that it will be useful, but WITHOUT
  12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  14  * version 2 for more details (a copy is included in the LICENSE file that
  15  * accompanied this code).
  16  *
  17  * You should have received a copy of the GNU General Public License version
  18  * 2 along with this work; if not, write to the Free Software Foundation,
  19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20  *
  21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22  * or visit www.oracle.com if you need additional information or have any
  23  * questions.
  24  *
  25  */
  26 
  27 
  28 #include "cds/aotMappedHeapWriter.hpp"
  29 #include "classfile/systemDictionary.hpp"
  30 #include "gc/shared/classUnloadingContext.hpp"
  31 #include "gc/shared/fullGCForwarding.hpp"
  32 #include "gc/shared/gc_globals.hpp"
  33 #include "gc/shared/gcArguments.hpp"
  34 #include "gc/shared/gcTimer.hpp"
  35 #include "gc/shared/gcTraceTime.inline.hpp"
  36 #include "gc/shared/locationPrinter.inline.hpp"
  37 #include "gc/shared/memAllocator.hpp"
  38 #include "gc/shared/plab.hpp"
  39 #include "gc/shared/tlab_globals.hpp"
  40 #include "gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp"
  41 #include "gc/shenandoah/heuristics/shenandoahYoungHeuristics.hpp"
  42 #include "gc/shenandoah/mode/shenandoahGenerationalMode.hpp"
  43 #include "gc/shenandoah/mode/shenandoahPassiveMode.hpp"
  44 #include "gc/shenandoah/mode/shenandoahSATBMode.hpp"
  45 #include "gc/shenandoah/shenandoahAllocRequest.hpp"
  46 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  47 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
  48 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
  49 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  50 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  51 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
  52 #include "gc/shenandoah/shenandoahControlThread.hpp"
  53 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  54 #include "gc/shenandoah/shenandoahGenerationalEvacuationTask.hpp"
  55 #include "gc/shenandoah/shenandoahGenerationalHeap.hpp"
  56 #include "gc/shenandoah/shenandoahGlobalGeneration.hpp"
  57 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  58 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
  59 #include "gc/shenandoah/shenandoahHeapRegionClosures.hpp"
  60 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  61 #include "gc/shenandoah/shenandoahInitLogger.hpp"
  62 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  63 #include "gc/shenandoah/shenandoahMemoryPool.hpp"
  64 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  65 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
  66 #include "gc/shenandoah/shenandoahPadding.hpp"
  67 #include "gc/shenandoah/shenandoahParallelCleaning.inline.hpp"
  68 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  69 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
  70 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
  71 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
  72 #include "gc/shenandoah/shenandoahSTWMark.hpp"
  73 #include "gc/shenandoah/shenandoahUncommitThread.hpp"
  74 #include "gc/shenandoah/shenandoahUtils.hpp"
  75 #include "gc/shenandoah/shenandoahVerifier.hpp"
  76 #include "gc/shenandoah/shenandoahVMOperations.hpp"
  77 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  78 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
  79 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
  80 #include "memory/allocation.hpp"
  81 #include "memory/classLoaderMetaspace.hpp"
  82 #include "memory/memoryReserver.hpp"
  83 #include "memory/metaspaceUtils.hpp"
  84 #include "memory/universe.hpp"
  85 #include "nmt/mallocTracker.hpp"
  86 #include "nmt/memTracker.hpp"
  87 #include "oops/compressedOops.inline.hpp"
  88 #include "prims/jvmtiTagMap.hpp"
  89 #include "runtime/atomic.hpp"
  90 #include "runtime/atomicAccess.hpp"
  91 #include "runtime/globals.hpp"
  92 #include "runtime/interfaceSupport.inline.hpp"
  93 #include "runtime/java.hpp"
  94 #include "runtime/orderAccess.hpp"
  95 #include "runtime/safepointMechanism.hpp"
  96 #include "runtime/stackWatermarkSet.hpp"
  97 #include "runtime/threads.hpp"
  98 #include "runtime/vmThread.hpp"
  99 #include "utilities/events.hpp"
 100 #include "utilities/globalDefinitions.hpp"
 101 #include "utilities/powerOfTwo.hpp"
 102 #if INCLUDE_JVMCI
 103 #include "jvmci/jvmci.hpp"
 104 #endif
 105 #if INCLUDE_JFR
 106 #include "gc/shenandoah/shenandoahJfrSupport.hpp"
 107 #endif
 108 
 109 class ShenandoahPretouchHeapTask : public WorkerTask {
 110 private:
 111   ShenandoahRegionIterator _regions;
 112   const size_t _page_size;
 113 public:
 114   ShenandoahPretouchHeapTask(size_t page_size) :
 115     WorkerTask("Shenandoah Pretouch Heap"),
 116     _page_size(page_size) {}
 117 
 118   virtual void work(uint worker_id) {
 119     ShenandoahHeapRegion* r = _regions.next();
 120     while (r != nullptr) {
 121       if (r->is_committed()) {
 122         os::pretouch_memory(r->bottom(), r->end(), _page_size);
 123       }
 124       r = _regions.next();
 125     }
 126   }
 127 };
 128 
 129 class ShenandoahPretouchBitmapTask : public WorkerTask {
 130 private:
 131   ShenandoahRegionIterator _regions;
 132   char* _bitmap_base;
 133   const size_t _bitmap_size;
 134   const size_t _page_size;
 135 public:
 136   ShenandoahPretouchBitmapTask(char* bitmap_base, size_t bitmap_size, size_t page_size) :
 137     WorkerTask("Shenandoah Pretouch Bitmap"),
 138     _bitmap_base(bitmap_base),
 139     _bitmap_size(bitmap_size),
 140     _page_size(page_size) {}
 141 
 142   virtual void work(uint worker_id) {
 143     ShenandoahHeapRegion* r = _regions.next();
 144     while (r != nullptr) {
 145       size_t start = r->index()       * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
 146       size_t end   = (r->index() + 1) * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
 147       assert (end <= _bitmap_size, "end is sane: %zu < %zu", end, _bitmap_size);
 148 
 149       if (r->is_committed()) {
 150         os::pretouch_memory(_bitmap_base + start, _bitmap_base + end, _page_size);
 151       }
 152 
 153       r = _regions.next();
 154     }
 155   }
 156 };
 157 
 158 static ReservedSpace reserve(size_t size, size_t preferred_page_size) {
 159   // When a page size is given we don't want to mix large
 160   // and normal pages. If the size is not a multiple of the
 161   // page size it will be aligned up to achieve this.
 162   size_t alignment = os::vm_allocation_granularity();
 163   if (preferred_page_size != os::vm_page_size()) {
 164     alignment = MAX2(preferred_page_size, alignment);
 165     size = align_up(size, alignment);
 166   }
 167 
 168   const ReservedSpace reserved = MemoryReserver::reserve(size, alignment, preferred_page_size, mtGC);
 169   if (!reserved.is_reserved()) {
 170     vm_exit_during_initialization("Could not reserve space");
 171   }
 172   return reserved;
 173 }
 174 
 175 jint ShenandoahHeap::initialize() {
 176   //
 177   // Figure out heap sizing
 178   //
 179 
 180   size_t init_byte_size = InitialHeapSize;
 181   size_t min_byte_size  = MinHeapSize;
 182   size_t max_byte_size  = MaxHeapSize;
 183   size_t heap_alignment = HeapAlignment;
 184 
 185   size_t reg_size_bytes = ShenandoahHeapRegion::region_size_bytes();
 186 
 187   Universe::check_alignment(max_byte_size,  reg_size_bytes, "Shenandoah heap");
 188   Universe::check_alignment(init_byte_size, reg_size_bytes, "Shenandoah heap");
 189 
 190   _num_regions = ShenandoahHeapRegion::region_count();
 191   assert(_num_regions == (max_byte_size / reg_size_bytes),
 192          "Regions should cover entire heap exactly: %zu != %zu/%zu",
 193          _num_regions, max_byte_size, reg_size_bytes);
 194 
 195   size_t num_committed_regions = init_byte_size / reg_size_bytes;
 196   num_committed_regions = MIN2(num_committed_regions, _num_regions);
 197   assert(num_committed_regions <= _num_regions, "sanity");
 198   _initial_size = num_committed_regions * reg_size_bytes;
 199 
 200   size_t num_min_regions = min_byte_size / reg_size_bytes;
 201   num_min_regions = MIN2(num_min_regions, _num_regions);
 202   assert(num_min_regions <= _num_regions, "sanity");
 203   _minimum_size = num_min_regions * reg_size_bytes;
 204 
 205   _soft_max_size.store_relaxed(clamp(SoftMaxHeapSize, min_capacity(), max_capacity()));
 206 
 207   _committed.store_relaxed(_initial_size);
 208 
 209   size_t heap_page_size   = UseLargePages ? os::large_page_size() : os::vm_page_size();
 210   size_t bitmap_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
 211   size_t region_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
 212 
 213   //
 214   // Reserve and commit memory for heap
 215   //
 216 
 217   ReservedHeapSpace heap_rs = Universe::reserve_heap(max_byte_size, heap_alignment);
 218   initialize_reserved_region(heap_rs);
 219   _heap_region = MemRegion((HeapWord*)heap_rs.base(), heap_rs.size() / HeapWordSize);
 220   _heap_region_special = heap_rs.special();
 221 
 222   assert((((size_t) base()) & ShenandoahHeapRegion::region_size_bytes_mask()) == 0,
 223          "Misaligned heap: " PTR_FORMAT, p2i(base()));
 224   os::trace_page_sizes_for_requested_size("Heap",
 225                                           max_byte_size, heap_alignment,
 226                                           heap_rs.base(),
 227                                           heap_rs.size(), heap_rs.page_size());
 228 
 229 #if SHENANDOAH_OPTIMIZED_MARKTASK
 230   // The optimized ShenandoahMarkTask takes some bits away from the full object bits.
 231   // Fail if we ever attempt to address more than we can.
 232   if ((uintptr_t)heap_rs.end() >= ShenandoahMarkTask::max_addressable()) {
 233     FormatBuffer<512> buf("Shenandoah reserved [" PTR_FORMAT ", " PTR_FORMAT") for the heap, \n"
 234                           "but max object address is " PTR_FORMAT ". Try to reduce heap size, or try other \n"
 235                           "VM options that allocate heap at lower addresses (HeapBaseMinAddress, AllocateHeapAt, etc).",
 236                 p2i(heap_rs.base()), p2i(heap_rs.end()), ShenandoahMarkTask::max_addressable());
 237     vm_exit_during_initialization("Fatal Error", buf);
 238   }
 239 #endif
 240 
 241   ReservedSpace sh_rs = heap_rs.first_part(max_byte_size);
 242   if (!_heap_region_special) {
 243     os::commit_memory_or_exit(sh_rs.base(), _initial_size, heap_alignment, false,
 244                               "Cannot commit heap memory");
 245   }
 246 
 247   BarrierSet::set_barrier_set(new ShenandoahBarrierSet(this, _heap_region));
 248 
 249   // Now we know the number of regions and heap sizes, initialize the heuristics.
 250   initialize_heuristics();
 251 
 252   // If ShenandoahCardBarrier is enabled but it's not generational mode
 253   // it means we're under passive mode and we have to initialize old gen
 254   // for the purpose of having card table.
 255   if (ShenandoahCardBarrier && !(mode()->is_generational())) {
 256     _old_generation = new ShenandoahOldGeneration(max_workers());
 257   }
 258 
 259   assert(_heap_region.byte_size() == heap_rs.size(), "Need to know reserved size for card table");
 260 
 261   //
 262   // Worker threads must be initialized after the barrier is configured
 263   //
 264   _workers = new ShenandoahWorkerThreads("Shenandoah GC Threads", _max_workers);
 265   if (_workers == nullptr) {
 266     vm_exit_during_initialization("Failed necessary allocation.");
 267   } else {
 268     _workers->initialize_workers();
 269   }
 270 
 271   if (ParallelGCThreads > 1) {
 272     _safepoint_workers = new ShenandoahWorkerThreads("Safepoint Cleanup Thread", ParallelGCThreads);
 273     _safepoint_workers->initialize_workers();
 274   }
 275 
 276   //
 277   // Reserve and commit memory for bitmap(s)
 278   //
 279 
 280   size_t bitmap_size_orig = ShenandoahMarkBitMap::compute_size(heap_rs.size());
 281   _bitmap_size = align_up(bitmap_size_orig, bitmap_page_size);
 282 
 283   size_t bitmap_bytes_per_region = reg_size_bytes / ShenandoahMarkBitMap::heap_map_factor();
 284 
 285   guarantee(bitmap_bytes_per_region != 0,
 286             "Bitmap bytes per region should not be zero");
 287   guarantee(is_power_of_2(bitmap_bytes_per_region),
 288             "Bitmap bytes per region should be power of two: %zu", bitmap_bytes_per_region);
 289 
 290   if (bitmap_page_size > bitmap_bytes_per_region) {
 291     _bitmap_regions_per_slice = bitmap_page_size / bitmap_bytes_per_region;
 292     _bitmap_bytes_per_slice = bitmap_page_size;
 293   } else {
 294     _bitmap_regions_per_slice = 1;
 295     _bitmap_bytes_per_slice = bitmap_bytes_per_region;
 296   }
 297 
 298   guarantee(_bitmap_regions_per_slice >= 1,
 299             "Should have at least one region per slice: %zu",
 300             _bitmap_regions_per_slice);
 301 
 302   guarantee(((_bitmap_bytes_per_slice) % bitmap_page_size) == 0,
 303             "Bitmap slices should be page-granular: bps = %zu, page size = %zu",
 304             _bitmap_bytes_per_slice, bitmap_page_size);
 305 
 306   ReservedSpace bitmap = reserve(_bitmap_size, bitmap_page_size);
 307   os::trace_page_sizes_for_requested_size("Mark Bitmap",
 308                                           bitmap_size_orig, bitmap_page_size,
 309                                           bitmap.base(),
 310                                           bitmap.size(), bitmap.page_size());
 311   MemTracker::record_virtual_memory_tag(bitmap, mtGC);
 312   _bitmap_region = MemRegion((HeapWord*) bitmap.base(), bitmap.size() / HeapWordSize);
 313   _bitmap_region_special = bitmap.special();
 314 
 315   size_t bitmap_init_commit = _bitmap_bytes_per_slice *
 316     align_up(num_committed_regions, _bitmap_regions_per_slice) / _bitmap_regions_per_slice;
 317   bitmap_init_commit = MIN2(_bitmap_size, bitmap_init_commit);
 318   if (!_bitmap_region_special) {
 319     os::commit_memory_or_exit((char *) _bitmap_region.start(), bitmap_init_commit, bitmap_page_size, false,
 320                               "Cannot commit bitmap memory");
 321   }
 322 
 323   _marking_context = new ShenandoahMarkingContext(_heap_region, _bitmap_region, _num_regions);
 324 
 325   if (ShenandoahVerify) {
 326     ReservedSpace verify_bitmap = reserve(_bitmap_size, bitmap_page_size);
 327     os::trace_page_sizes_for_requested_size("Verify Bitmap",
 328                                             bitmap_size_orig, bitmap_page_size,
 329                                             verify_bitmap.base(),
 330                                             verify_bitmap.size(), verify_bitmap.page_size());
 331     if (!verify_bitmap.special()) {
 332       os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), bitmap_page_size, false,
 333                                 "Cannot commit verification bitmap memory");
 334     }
 335     MemTracker::record_virtual_memory_tag(verify_bitmap, mtGC);
 336     MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize);
 337     _verification_bit_map.initialize(_heap_region, verify_bitmap_region);
 338     _verifier = new ShenandoahVerifier(this, &_verification_bit_map);
 339   }
 340 
 341   // Reserve aux bitmap for use in object_iterate(). We don't commit it here.
 342   size_t aux_bitmap_page_size = bitmap_page_size;
 343 
 344   ReservedSpace aux_bitmap = reserve(_bitmap_size, aux_bitmap_page_size);
 345   os::trace_page_sizes_for_requested_size("Aux Bitmap",
 346                                           bitmap_size_orig, aux_bitmap_page_size,
 347                                           aux_bitmap.base(),
 348                                           aux_bitmap.size(), aux_bitmap.page_size());
 349   MemTracker::record_virtual_memory_tag(aux_bitmap, mtGC);
 350   _aux_bitmap_region = MemRegion((HeapWord*) aux_bitmap.base(), aux_bitmap.size() / HeapWordSize);
 351   _aux_bitmap_region_special = aux_bitmap.special();
 352   _aux_bit_map.initialize(_heap_region, _aux_bitmap_region);
 353 
 354   //
 355   // Create regions and region sets
 356   //
 357   size_t region_align = align_up(sizeof(ShenandoahHeapRegion), SHENANDOAH_CACHE_LINE_SIZE);
 358   size_t region_storage_size_orig = region_align * _num_regions;
 359   size_t region_storage_size = align_up(region_storage_size_orig,
 360                                         MAX2(region_page_size, os::vm_allocation_granularity()));
 361 
 362   ReservedSpace region_storage = reserve(region_storage_size, region_page_size);
 363   os::trace_page_sizes_for_requested_size("Region Storage",
 364                                           region_storage_size_orig, region_page_size,
 365                                           region_storage.base(),
 366                                           region_storage.size(), region_storage.page_size());
 367   MemTracker::record_virtual_memory_tag(region_storage, mtGC);
 368   if (!region_storage.special()) {
 369     os::commit_memory_or_exit(region_storage.base(), region_storage_size, region_page_size, false,
 370                               "Cannot commit region memory");
 371   }
 372 
 373   // Try to fit the collection set bitmap at lower addresses. This optimizes code generation for cset checks.
 374   // Go up until a sensible limit (subject to encoding constraints) and try to reserve the space there.
 375   // If not successful, bite a bullet and allocate at whatever address.
 376   {
 377     const size_t cset_align = MAX2<size_t>(os::vm_page_size(), os::vm_allocation_granularity());
 378     const size_t cset_size = align_up(((size_t) sh_rs.base() + sh_rs.size()) >> ShenandoahHeapRegion::region_size_bytes_shift(), cset_align);
 379     const size_t cset_page_size = os::vm_page_size();
 380 
 381     uintptr_t min = round_up_power_of_2(cset_align);
 382     uintptr_t max = (1u << 30u);
 383     ReservedSpace cset_rs;
 384 
 385     for (uintptr_t addr = min; addr <= max; addr <<= 1u) {
 386       char* req_addr = (char*)addr;
 387       assert(is_aligned(req_addr, cset_align), "Should be aligned");
 388       cset_rs = MemoryReserver::reserve(req_addr, cset_size, cset_align, cset_page_size, mtGC);
 389       if (cset_rs.is_reserved()) {
 390         assert(cset_rs.base() == req_addr, "Allocated where requested: " PTR_FORMAT ", " PTR_FORMAT, p2i(cset_rs.base()), addr);
 391         _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
 392         break;
 393       }
 394     }
 395 
 396     if (_collection_set == nullptr) {
 397       cset_rs = MemoryReserver::reserve(cset_size, cset_align, os::vm_page_size(), mtGC);
 398       if (!cset_rs.is_reserved()) {
 399         vm_exit_during_initialization("Cannot reserve memory for collection set");
 400       }
 401 
 402       _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
 403     }
 404     os::trace_page_sizes_for_requested_size("Collection Set",
 405                                             cset_size, cset_page_size,
 406                                             cset_rs.base(),
 407                                             cset_rs.size(), cset_rs.page_size());
 408   }
 409 
 410   _regions = NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, _num_regions, mtGC);
 411   _affiliations = NEW_C_HEAP_ARRAY(uint8_t, _num_regions, mtGC);
 412 
 413   {
 414     ShenandoahHeapLocker locker(lock());
 415     for (size_t i = 0; i < _num_regions; i++) {
 416       HeapWord* start = (HeapWord*)sh_rs.base() + ShenandoahHeapRegion::region_size_words() * i;
 417       bool is_committed = i < num_committed_regions;
 418       void* loc = region_storage.base() + i * region_align;
 419 
 420       ShenandoahHeapRegion* r = new (loc) ShenandoahHeapRegion(start, i, is_committed);
 421       assert(is_aligned(r, SHENANDOAH_CACHE_LINE_SIZE), "Sanity");
 422 
 423       _marking_context->initialize_top_at_mark_start(r);
 424       _regions[i] = r;
 425       assert(!collection_set()->is_in(i), "New region should not be in collection set");
 426 
 427       _affiliations[i] = ShenandoahAffiliation::FREE;
 428     }
 429 
 430     if (mode()->is_generational()) {
 431       size_t young_reserve = (soft_max_capacity() * ShenandoahEvacReserve) / 100;
 432       young_generation()->set_evacuation_reserve(young_reserve);
 433       old_generation()->set_evacuation_reserve((size_t) 0);
 434       old_generation()->set_promoted_reserve((size_t) 0);
 435     }
 436 
 437     _free_set = new ShenandoahFreeSet(this, _num_regions);
 438     initialize_generations();
 439 
 440     // We are initializing free set.  We ignore cset region tallies.
 441     size_t young_trashed_regions, old_trashed_regions, first_old, last_old, num_old;
 442     _free_set->prepare_to_rebuild(young_trashed_regions, old_trashed_regions, first_old, last_old, num_old);
 443     if (mode()->is_generational()) {
 444       ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::heap();
 445       // We cannot call
 446       //  gen_heap->young_generation()->heuristics()->bytes_of_allocation_runway_before_gc_trigger(young_cset_regions)
 447       // until after the heap is fully initialized.  So we make up a safe value here.
 448       size_t allocation_runway = InitialHeapSize / 2;
 449       gen_heap->compute_old_generation_balance(allocation_runway, old_trashed_regions, young_trashed_regions);
 450     }
 451     _free_set->finish_rebuild(young_trashed_regions, old_trashed_regions, num_old);
 452   }
 453 
 454   if (AlwaysPreTouch) {
 455     // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads,
 456     // before initialize() below zeroes it with initializing thread. For any given region,
 457     // we touch the region and the corresponding bitmaps from the same thread.
 458     ShenandoahPushWorkerScope scope(workers(), _max_workers, false);
 459 
 460     _pretouch_heap_page_size = heap_page_size;
 461     _pretouch_bitmap_page_size = bitmap_page_size;
 462 
 463     // OS memory managers may want to coalesce back-to-back pages. Make their jobs
 464     // simpler by pre-touching continuous spaces (heap and bitmap) separately.
 465 
 466     ShenandoahPretouchBitmapTask bcl(bitmap.base(), _bitmap_size, _pretouch_bitmap_page_size);
 467     _workers->run_task(&bcl);
 468 
 469     ShenandoahPretouchHeapTask hcl(_pretouch_heap_page_size);
 470     _workers->run_task(&hcl);
 471   }
 472 
 473   //
 474   // Initialize the rest of GC subsystems
 475   //
 476 
 477   _liveness_cache = NEW_C_HEAP_ARRAY(ShenandoahLiveData*, _max_workers, mtGC);
 478   for (uint worker = 0; worker < _max_workers; worker++) {
 479     _liveness_cache[worker] = NEW_C_HEAP_ARRAY(ShenandoahLiveData, _num_regions, mtGC);
 480     Copy::fill_to_bytes(_liveness_cache[worker], _num_regions * sizeof(ShenandoahLiveData));
 481   }
 482 
 483   // There should probably be Shenandoah-specific options for these,
 484   // just as there are G1-specific options.
 485   {
 486     ShenandoahSATBMarkQueueSet& satbqs = ShenandoahBarrierSet::satb_mark_queue_set();
 487     satbqs.set_process_completed_buffers_threshold(20); // G1SATBProcessCompletedThreshold
 488     satbqs.set_buffer_enqueue_threshold_percentage(60); // G1SATBBufferEnqueueingThresholdPercent
 489   }
 490 
 491   _monitoring_support = new ShenandoahMonitoringSupport(this);
 492   _phase_timings = new ShenandoahPhaseTimings(max_workers());
 493   ShenandoahCodeRoots::initialize();
 494 
 495   // Initialization of controller makes use of variables established by initialize_heuristics.
 496   initialize_controller();
 497 
 498   // Certain initialization of heuristics must be deferred until after controller is initialized.
 499   post_initialize_heuristics();
 500   start_idle_span();
 501   if (ShenandoahUncommit) {
 502     _uncommit_thread = new ShenandoahUncommitThread(this);
 503   }
 504   print_init_logger();
 505   FullGCForwarding::initialize(_heap_region);
 506   return JNI_OK;
 507 }
 508 
 509 void ShenandoahHeap::initialize_controller() {
 510   _control_thread = new ShenandoahControlThread();
 511 }
 512 
 513 void ShenandoahHeap::print_init_logger() const {
 514   ShenandoahInitLogger::print();
 515 }
 516 
 517 void ShenandoahHeap::initialize_mode() {
 518   if (ShenandoahGCMode != nullptr) {
 519     if (strcmp(ShenandoahGCMode, "satb") == 0) {
 520       _gc_mode = new ShenandoahSATBMode();
 521     } else if (strcmp(ShenandoahGCMode, "passive") == 0) {
 522       _gc_mode = new ShenandoahPassiveMode();
 523     } else if (strcmp(ShenandoahGCMode, "generational") == 0) {
 524       _gc_mode = new ShenandoahGenerationalMode();
 525     } else {
 526       vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option");
 527     }
 528   } else {
 529     vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option (null)");
 530   }
 531   _gc_mode->initialize_flags();
 532   if (_gc_mode->is_diagnostic() && !UnlockDiagnosticVMOptions) {
 533     vm_exit_during_initialization(
 534             err_msg("GC mode \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
 535                     _gc_mode->name()));
 536   }
 537   if (_gc_mode->is_experimental() && !UnlockExperimentalVMOptions) {
 538     vm_exit_during_initialization(
 539             err_msg("GC mode \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
 540                     _gc_mode->name()));
 541   }
 542 }
 543 
 544 void ShenandoahHeap::initialize_heuristics() {
 545   _global_generation = new ShenandoahGlobalGeneration(mode()->is_generational(), max_workers());
 546   _global_generation->initialize_heuristics(mode());
 547 }
 548 
 549 #ifdef _MSC_VER
 550 #pragma warning( push )
 551 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
 552 #endif
 553 
 554 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
 555   CollectedHeap(),
 556   _active_generation(nullptr),
 557   _initial_size(0),
 558   _committed(0),
 559   _max_workers(MAX3(ConcGCThreads, ParallelGCThreads, 1U)),
 560   _workers(nullptr),
 561   _safepoint_workers(nullptr),
 562   _heap_region_special(false),
 563   _num_regions(0),
 564   _regions(nullptr),
 565   _affiliations(nullptr),
 566   _gc_state_changed(false),
 567   _gc_no_progress_count(0),
 568   _cancel_requested_time(0),
 569   _update_refs_iterator(this),
 570   _global_generation(nullptr),
 571   _control_thread(nullptr),
 572   _uncommit_thread(nullptr),
 573   _young_generation(nullptr),
 574   _old_generation(nullptr),
 575   _shenandoah_policy(policy),
 576   _gc_mode(nullptr),
 577   _free_set(nullptr),
 578   _verifier(nullptr),
 579   _phase_timings(nullptr),
 580   _monitoring_support(nullptr),
 581   _memory_pool(nullptr),
 582   _stw_memory_manager("Shenandoah Pauses"),
 583   _cycle_memory_manager("Shenandoah Cycles"),
 584   _gc_timer(new ConcurrentGCTimer()),
 585   _log_min_obj_alignment_in_bytes(LogMinObjAlignmentInBytes),
 586   _marking_context(nullptr),
 587   _bitmap_size(0),
 588   _bitmap_regions_per_slice(0),
 589   _bitmap_bytes_per_slice(0),
 590   _bitmap_region_special(false),
 591   _aux_bitmap_region_special(false),
 592   _liveness_cache(nullptr),
 593   _collection_set(nullptr),
 594   _evac_tracker(new ShenandoahEvacuationTracker())
 595 {
 596   // Initialize GC mode early, many subsequent initialization procedures depend on it
 597   initialize_mode();
 598   _cancelled_gc.set(GCCause::_no_gc);
 599 }
 600 
 601 #ifdef _MSC_VER
 602 #pragma warning( pop )
 603 #endif
 604 
 605 void ShenandoahHeap::print_heap_on(outputStream* st) const {
 606   const bool is_generational = mode()->is_generational();
 607   const char* front_spacing = "";
 608   if (is_generational) {
 609     st->print_cr("Generational Shenandoah Heap");
 610     st->print_cr(" Young:");
 611     st->print_cr("  " PROPERFMT " max, " PROPERFMT " used", PROPERFMTARGS(young_generation()->max_capacity()), PROPERFMTARGS(young_generation()->used()));
 612     st->print_cr(" Old:");
 613     st->print_cr("  " PROPERFMT " max, " PROPERFMT " used", PROPERFMTARGS(old_generation()->max_capacity()), PROPERFMTARGS(old_generation()->used()));
 614     st->print_cr(" Entire heap:");
 615     st->print_cr("  " PROPERFMT " soft max, " PROPERFMT " committed",
 616                 PROPERFMTARGS(soft_max_capacity()), PROPERFMTARGS(committed()));
 617     front_spacing = " ";
 618   } else {
 619     st->print_cr("Shenandoah Heap");
 620     st->print_cr("  " PROPERFMT " max, " PROPERFMT " soft max, " PROPERFMT " committed, " PROPERFMT " used",
 621       PROPERFMTARGS(max_capacity()),
 622       PROPERFMTARGS(soft_max_capacity()),
 623       PROPERFMTARGS(committed()),
 624       PROPERFMTARGS(used())
 625     );
 626   }
 627   st->print_cr("%s %zu x " PROPERFMT " regions",
 628           front_spacing,
 629           num_regions(),
 630           PROPERFMTARGS(ShenandoahHeapRegion::region_size_bytes()));
 631 
 632   st->print("Status: ");
 633   if (has_forwarded_objects())                 st->print("has forwarded objects, ");
 634   if (!is_generational) {
 635     if (is_concurrent_mark_in_progress())      st->print("marking,");
 636   } else {
 637     if (is_concurrent_old_mark_in_progress())    st->print("old marking, ");
 638     if (is_concurrent_young_mark_in_progress())  st->print("young marking, ");
 639   }
 640   if (is_evacuation_in_progress())             st->print("evacuating, ");
 641   if (is_update_refs_in_progress())            st->print("updating refs, ");
 642   if (is_degenerated_gc_in_progress())         st->print("degenerated gc, ");
 643   if (is_full_gc_in_progress())                st->print("full gc, ");
 644   if (is_full_gc_move_in_progress())           st->print("full gc move, ");
 645   if (is_concurrent_weak_root_in_progress())   st->print("concurrent weak roots, ");
 646   if (is_concurrent_strong_root_in_progress() &&
 647       !is_concurrent_weak_root_in_progress())  st->print("concurrent strong roots, ");
 648 
 649   if (cancelled_gc()) {
 650     st->print("cancelled");
 651   } else {
 652     st->print("not cancelled");
 653   }
 654   st->cr();
 655 
 656   st->print_cr("Reserved region:");
 657   st->print_cr(" - [" PTR_FORMAT ", " PTR_FORMAT ") ",
 658                p2i(reserved_region().start()),
 659                p2i(reserved_region().end()));
 660 
 661   ShenandoahCollectionSet* cset = collection_set();
 662   st->print_cr("Collection set:");
 663   if (cset != nullptr) {
 664     st->print_cr(" - map (vanilla): " PTR_FORMAT, p2i(cset->map_address()));
 665     st->print_cr(" - map (biased):  " PTR_FORMAT, p2i(cset->biased_map_address()));
 666   } else {
 667     st->print_cr(" (null)");
 668   }
 669 
 670   st->cr();
 671 
 672   if (Verbose) {
 673     st->cr();
 674     print_heap_regions_on(st);
 675   }
 676 }
 677 
 678 void ShenandoahHeap::print_gc_on(outputStream* st) const {
 679   print_heap_regions_on(st);
 680 }
 681 
 682 class ShenandoahInitWorkerGCLABClosure : public ThreadClosure {
 683 public:
 684   void do_thread(Thread* thread) {
 685     assert(thread != nullptr, "Sanity");
 686     ShenandoahThreadLocalData::initialize_gclab(thread);
 687   }
 688 };
 689 
 690 void ShenandoahHeap::initialize_generations() {
 691   _global_generation->post_initialize(this);
 692 }
 693 
 694 // We do not call this explicitly  It is called by Hotspot infrastructure.
 695 void ShenandoahHeap::post_initialize() {
 696   CollectedHeap::post_initialize();
 697 
 698   check_soft_max_changed();
 699 
 700   // Schedule periodic task to report on gc thread CPU utilization
 701   _mmu_tracker.initialize();
 702 
 703   MutexLocker ml(Threads_lock);
 704 
 705   ShenandoahInitWorkerGCLABClosure init_gclabs;
 706   _workers->threads_do(&init_gclabs);
 707 
 708   // gclab can not be initialized early during VM startup, as it can not determinate its max_size.
 709   // Now, we will let WorkerThreads to initialize gclab when new worker is created.
 710   _workers->set_initialize_gclab();
 711 
 712   // Note that the safepoint workers may require gclabs if the threads are used to create a heap dump
 713   // during a concurrent evacuation phase.
 714   if (_safepoint_workers != nullptr) {
 715     _safepoint_workers->threads_do(&init_gclabs);
 716     _safepoint_workers->set_initialize_gclab();
 717   }
 718 
 719   JFR_ONLY(ShenandoahJFRSupport::register_jfr_type_serializers();)
 720 }
 721 
 722 void ShenandoahHeap::post_initialize_heuristics() {
 723   _global_generation->post_initialize_heuristics();
 724 }
 725 
 726 ShenandoahHeuristics* ShenandoahHeap::heuristics() {
 727   return _global_generation->heuristics();
 728 }
 729 
 730 size_t ShenandoahHeap::used() const {
 731   return global_generation()->used();
 732 }
 733 
 734 size_t ShenandoahHeap::committed() const {
 735   return _committed.load_relaxed();
 736 }
 737 
 738 void ShenandoahHeap::increase_committed(size_t bytes) {
 739   shenandoah_assert_heaplocked_or_safepoint();
 740   _committed.fetch_then_add(bytes, memory_order_relaxed);
 741 }
 742 
 743 void ShenandoahHeap::decrease_committed(size_t bytes) {
 744   shenandoah_assert_heaplocked_or_safepoint();
 745   _committed.fetch_then_sub(bytes, memory_order_relaxed);
 746 }
 747 
 748 size_t ShenandoahHeap::capacity() const {
 749   return committed();
 750 }
 751 
 752 size_t ShenandoahHeap::max_capacity() const {
 753   return _num_regions * ShenandoahHeapRegion::region_size_bytes();
 754 }
 755 
 756 size_t ShenandoahHeap::soft_max_capacity() const {
 757   size_t v = _soft_max_size.load_relaxed();
 758   assert(min_capacity() <= v && v <= max_capacity(),
 759          "Should be in bounds: %zu <= %zu <= %zu",
 760          min_capacity(), v, max_capacity());
 761   return v;
 762 }
 763 
 764 void ShenandoahHeap::set_soft_max_capacity(size_t v) {
 765   assert(min_capacity() <= v && v <= max_capacity(),
 766          "Should be in bounds: %zu <= %zu <= %zu",
 767          min_capacity(), v, max_capacity());
 768   _soft_max_size.store_relaxed(v);
 769   heuristics()->compute_headroom_adjustment();
 770 }
 771 
 772 size_t ShenandoahHeap::min_capacity() const {
 773   return _minimum_size;
 774 }
 775 
 776 size_t ShenandoahHeap::initial_capacity() const {
 777   return _initial_size;
 778 }
 779 
 780 bool ShenandoahHeap::is_in(const void* p) const {
 781   if (!is_in_reserved(p)) {
 782     return false;
 783   }
 784 
 785   if (is_full_gc_move_in_progress()) {
 786     // Full GC move is running, we do not have a consistent region
 787     // information yet. But we know the pointer is in heap.
 788     return true;
 789   }
 790 
 791   // Now check if we point to a live section in active region.
 792   const ShenandoahHeapRegion* r = heap_region_containing(p);
 793   if (p >= r->top()) {
 794     return false;
 795   }
 796 
 797   if (r->is_active()) {
 798     return true;
 799   }
 800 
 801   // The region is trash, but won't be recycled until after concurrent weak
 802   // roots. We also don't allow mutators to allocate from trash regions
 803   // during weak roots. Concurrent class unloading may access unmarked oops
 804   // in trash regions.
 805   return r->is_trash() && is_concurrent_weak_root_in_progress();
 806 }
 807 
 808 void ShenandoahHeap::notify_soft_max_changed() {
 809   if (_uncommit_thread != nullptr) {
 810     _uncommit_thread->notify_soft_max_changed();
 811   }
 812 }
 813 
 814 void ShenandoahHeap::notify_explicit_gc_requested() {
 815   if (_uncommit_thread != nullptr) {
 816     _uncommit_thread->notify_explicit_gc_requested();
 817   }
 818 }
 819 
 820 bool ShenandoahHeap::check_soft_max_changed() {
 821   size_t new_soft_max = AtomicAccess::load(&SoftMaxHeapSize);
 822   size_t old_soft_max = soft_max_capacity();
 823   if (new_soft_max != old_soft_max) {
 824     new_soft_max = MAX2(min_capacity(), new_soft_max);
 825     new_soft_max = MIN2(max_capacity(), new_soft_max);
 826     if (new_soft_max != old_soft_max) {
 827       log_info(gc)("Soft Max Heap Size: %zu%s -> %zu%s",
 828                    byte_size_in_proper_unit(old_soft_max), proper_unit_for_byte_size(old_soft_max),
 829                    byte_size_in_proper_unit(new_soft_max), proper_unit_for_byte_size(new_soft_max)
 830       );
 831       set_soft_max_capacity(new_soft_max);
 832       return true;
 833     }
 834   }
 835   return false;
 836 }
 837 
 838 void ShenandoahHeap::notify_heap_changed() {
 839   // Update monitoring counters when we took a new region. This amortizes the
 840   // update costs on slow path.
 841   monitoring_support()->notify_heap_changed();
 842   _heap_changed.try_set();
 843 }
 844 
 845 void ShenandoahHeap::start_idle_span() {
 846   heuristics()->start_idle_span();
 847 }
 848 
 849 void ShenandoahHeap::set_forced_counters_update(bool value) {
 850   monitoring_support()->set_forced_counters_update(value);
 851 }
 852 
 853 void ShenandoahHeap::handle_force_counters_update() {
 854   monitoring_support()->handle_force_counters_update();
 855 }
 856 
 857 HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) {
 858   // New object should fit the GCLAB size
 859   size_t min_size = MAX2(size, PLAB::min_size());
 860 
 861   // Figure out size of new GCLAB, looking back at heuristics. Expand aggressively.
 862   size_t new_size = ShenandoahThreadLocalData::gclab_size(thread) * 2;
 863 
 864   new_size = MIN2(new_size, PLAB::max_size());
 865   new_size = MAX2(new_size, PLAB::min_size());
 866 
 867   // Record new heuristic value even if we take any shortcut. This captures
 868   // the case when moderately-sized objects always take a shortcut. At some point,
 869   // heuristics should catch up with them.
 870   log_debug(gc, free)("Set new GCLAB size: %zu", new_size);
 871   ShenandoahThreadLocalData::set_gclab_size(thread, new_size);
 872 
 873   if (new_size < size) {
 874     // New size still does not fit the object. Fall back to shared allocation.
 875     // This avoids retiring perfectly good GCLABs, when we encounter a large object.
 876     log_debug(gc, free)("New gclab size (%zu) is too small for %zu", new_size, size);
 877     return nullptr;
 878   }
 879 
 880   // Retire current GCLAB, and allocate a new one.
 881   PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
 882   gclab->retire();
 883 
 884   size_t actual_size = 0;
 885   HeapWord* gclab_buf = allocate_new_gclab(min_size, new_size, &actual_size);
 886   if (gclab_buf == nullptr) {
 887     return nullptr;
 888   }
 889 
 890   assert (size <= actual_size, "allocation should fit");
 891 
 892   // ...and clear or zap just allocated TLAB, if needed.
 893   if (ZeroTLAB) {
 894     Copy::zero_to_words(gclab_buf, actual_size);
 895   } else if (ZapTLAB) {
 896     // Skip mangling the space corresponding to the object header to
 897     // ensure that the returned space is not considered parsable by
 898     // any concurrent GC thread.
 899     size_t hdr_size = oopDesc::header_size();
 900     Copy::fill_to_words(gclab_buf + hdr_size, actual_size - hdr_size, badHeapWordVal);
 901   }
 902   gclab->set_buf(gclab_buf, actual_size);
 903   return gclab->allocate(size);
 904 }
 905 
 906 // Called from stubs in JIT code or interpreter
 907 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t min_size,
 908                                             size_t requested_size,
 909                                             size_t* actual_size) {
 910   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_tlab(min_size, requested_size);
 911   HeapWord* res = allocate_memory(req);
 912   if (res != nullptr) {
 913     *actual_size = req.actual_size();
 914   } else {
 915     *actual_size = 0;
 916   }
 917   return res;
 918 }
 919 
 920 HeapWord* ShenandoahHeap::allocate_new_gclab(size_t min_size,
 921                                              size_t word_size,
 922                                              size_t* actual_size) {
 923   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_gclab(min_size, word_size);
 924   HeapWord* res = allocate_memory(req);
 925   if (res != nullptr) {
 926     *actual_size = req.actual_size();
 927   } else {
 928     *actual_size = 0;
 929   }
 930   return res;
 931 }
 932 
 933 HeapWord* ShenandoahHeap::allocate_memory(ShenandoahAllocRequest& req) {
 934   bool in_new_region = false;
 935   HeapWord* result = nullptr;
 936 
 937   if (req.is_mutator_alloc()) {
 938 
 939     if (!ShenandoahAllocFailureALot || !should_inject_alloc_failure()) {
 940       result = allocate_memory_under_lock(req, in_new_region);
 941     }
 942 
 943     // Check that gc overhead is not exceeded.
 944     //
 945     // Shenandoah will grind along for quite a while allocating one
 946     // object at a time using shared (non-tlab) allocations. This check
 947     // is testing that the GC overhead limit has not been exceeded.
 948     // This will notify the collector to start a cycle, but will raise
 949     // an OOME to the mutator if the last Full GCs have not made progress.
 950     // gc_no_progress_count is incremented following each degen or full GC that fails to achieve is_good_progress().
 951     if (result == nullptr && !req.is_lab_alloc() && get_gc_no_progress_count() > ShenandoahNoProgressThreshold) {
 952       control_thread()->handle_alloc_failure(req, false);
 953       req.set_actual_size(0);
 954       return nullptr;
 955     }
 956 
 957     if (result == nullptr) {
 958       // Block until control thread reacted, then retry allocation.
 959       //
 960       // It might happen that one of the threads requesting allocation would unblock
 961       // way later after GC happened, only to fail the second allocation, because
 962       // other threads have already depleted the free storage. In this case, a better
 963       // strategy is to try again, until at least one full GC has completed.
 964       //
 965       // Stop retrying and return nullptr to cause OOMError exception if our allocation failed even after:
 966       //   a) We experienced a GC that had good progress, or
 967       //   b) We experienced at least one Full GC (whether or not it had good progress)
 968 
 969       const size_t original_count = shenandoah_policy()->full_gc_count();
 970       while (result == nullptr && should_retry_allocation(original_count)) {
 971         control_thread()->handle_alloc_failure(req, true);
 972         result = allocate_memory_under_lock(req, in_new_region);
 973       }
 974       if (result != nullptr) {
 975         // If our allocation request has been satisfied after it initially failed, we count this as good gc progress
 976         notify_gc_progress();
 977       }
 978       if (log_develop_is_enabled(Debug, gc, alloc)) {
 979         ResourceMark rm;
 980         log_debug(gc, alloc)("Thread: %s, Result: " PTR_FORMAT ", Request: %s, Size: %zu"
 981                              ", Original: %zu, Latest: %zu",
 982                              Thread::current()->name(), p2i(result), req.type_string(), req.size(),
 983                              original_count, get_gc_no_progress_count());
 984       }
 985     }
 986   } else {
 987     assert(req.is_gc_alloc(), "Can only accept GC allocs here");
 988     result = allocate_memory_under_lock(req, in_new_region);
 989     // Do not call handle_alloc_failure() here, because we cannot block.
 990     // The allocation failure would be handled by the LRB slowpath with handle_alloc_failure_evac().
 991   }
 992 
 993   if (in_new_region) {
 994     notify_heap_changed();
 995   }
 996 
 997   if (result == nullptr) {
 998     req.set_actual_size(0);
 999   }
1000 
1001   if (result != nullptr) {
1002     size_t requested = req.size();
1003     size_t actual = req.actual_size();
1004 
1005     assert (req.is_lab_alloc() || (requested == actual),
1006             "Only LAB allocations are elastic: %s, requested = %zu, actual = %zu",
1007             req.type_string(), requested, actual);
1008   }
1009 
1010   return result;
1011 }
1012 
1013 inline bool ShenandoahHeap::should_retry_allocation(size_t original_full_gc_count) const {
1014   return shenandoah_policy()->full_gc_count() == original_full_gc_count
1015       && !shenandoah_policy()->is_at_shutdown();
1016 }
1017 
1018 HeapWord* ShenandoahHeap::allocate_memory_under_lock(ShenandoahAllocRequest& req, bool& in_new_region) {
1019   // If we are dealing with mutator allocation, then we may need to block for safepoint.
1020   // We cannot block for safepoint for GC allocations, because there is a high chance
1021   // we are already running at safepoint or from stack watermark machinery, and we cannot
1022   // block again.
1023   ShenandoahHeapLocker locker(lock(), req.is_mutator_alloc());
1024 
1025   // Make sure the old generation has room for either evacuations or promotions before trying to allocate.
1026   if (req.is_old() && !old_generation()->can_allocate(req)) {
1027     return nullptr;
1028   }
1029 
1030   // If TLAB request size is greater than available, allocate() will attempt to downsize request to fit within available
1031   // memory.
1032   HeapWord* result = _free_set->allocate(req, in_new_region);
1033 
1034   // Record the plab configuration for this result and register the object.
1035   if (result != nullptr && req.is_old()) {
1036     if (req.is_lab_alloc()) {
1037       old_generation()->configure_plab_for_current_thread(req);
1038     } else {
1039       // Register the newly allocated object while we're holding the global lock since there's no synchronization
1040       // built in to the implementation of register_object().  There are potential races when multiple independent
1041       // threads are allocating objects, some of which might span the same card region.  For example, consider
1042       // a card table's memory region within which three objects are being allocated by three different threads:
1043       //
1044       // objects being "concurrently" allocated:
1045       //    [-----a------][-----b-----][--------------c------------------]
1046       //            [---- card table memory range --------------]
1047       //
1048       // Before any objects are allocated, this card's memory range holds no objects.  Note that allocation of object a
1049       // wants to set the starts-object, first-start, and last-start attributes of the preceding card region.
1050       // Allocation of object b wants to set the starts-object, first-start, and last-start attributes of this card region.
1051       // Allocation of object c also wants to set the starts-object, first-start, and last-start attributes of this
1052       // card region.
1053       //
1054       // The thread allocating b and the thread allocating c can "race" in various ways, resulting in confusion, such as
1055       // last-start representing object b while first-start represents object c.  This is why we need to require all
1056       // register_object() invocations to be "mutually exclusive" with respect to each card's memory range.
1057       old_generation()->card_scan()->register_object(result);
1058 
1059       if (req.is_promotion()) {
1060         // Shared promotion.
1061         const size_t actual_size = req.actual_size() * HeapWordSize;
1062         log_debug(gc, plab)("Expend shared promotion of %zu bytes", actual_size);
1063         old_generation()->expend_promoted(actual_size);
1064       }
1065     }
1066   }
1067 
1068   return result;
1069 }
1070 
1071 HeapWord* ShenandoahHeap::mem_allocate(size_t size) {
1072   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared(size);
1073   return allocate_memory(req);
1074 }
1075 
1076 MetaWord* ShenandoahHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
1077                                                              size_t size,
1078                                                              Metaspace::MetadataType mdtype) {
1079   MetaWord* result;
1080 
1081   // Inform metaspace OOM to GC heuristics if class unloading is possible.
1082   ShenandoahHeuristics* h = global_generation()->heuristics();
1083   if (h->can_unload_classes()) {
1084     h->record_metaspace_oom();
1085   }
1086 
1087   // Expand and retry allocation
1088   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
1089   if (result != nullptr) {
1090     return result;
1091   }
1092 
1093   // Start full GC
1094   collect(GCCause::_metadata_GC_clear_soft_refs);
1095 
1096   // Retry allocation
1097   result = loader_data->metaspace_non_null()->allocate(size, mdtype);
1098   if (result != nullptr) {
1099     return result;
1100   }
1101 
1102   // Expand and retry allocation
1103   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
1104   if (result != nullptr) {
1105     return result;
1106   }
1107 
1108   // Out of memory
1109   return nullptr;
1110 }
1111 
1112 class ShenandoahConcurrentEvacuateRegionObjectClosure : public ObjectClosure {
1113 private:
1114   ShenandoahHeap* const _heap;
1115   Thread* const _thread;
1116 public:
1117   ShenandoahConcurrentEvacuateRegionObjectClosure(ShenandoahHeap* heap) :
1118     _heap(heap), _thread(Thread::current()) {}
1119 
1120   void do_object(oop p) {
1121     shenandoah_assert_marked(nullptr, p);
1122     if (!p->is_forwarded()) {
1123       _heap->evacuate_object(p, _thread);
1124     }
1125   }
1126 };
1127 
1128 class ShenandoahEvacuationTask : public WorkerTask {
1129 private:
1130   ShenandoahHeap* const _sh;
1131   ShenandoahCollectionSet* const _cs;
1132   bool _concurrent;
1133 public:
1134   ShenandoahEvacuationTask(ShenandoahHeap* sh,
1135                            ShenandoahCollectionSet* cs,
1136                            bool concurrent) :
1137     WorkerTask("Shenandoah Evacuation"),
1138     _sh(sh),
1139     _cs(cs),
1140     _concurrent(concurrent)
1141   {}
1142 
1143   void work(uint worker_id) {
1144     if (_concurrent) {
1145       ShenandoahConcurrentWorkerSession worker_session(worker_id);
1146       ShenandoahSuspendibleThreadSetJoiner stsj;
1147       ShenandoahEvacOOMScope oom_evac_scope;
1148       do_work();
1149     } else {
1150       ShenandoahParallelWorkerSession worker_session(worker_id);
1151       ShenandoahEvacOOMScope oom_evac_scope;
1152       do_work();
1153     }
1154   }
1155 
1156 private:
1157   void do_work() {
1158     ShenandoahConcurrentEvacuateRegionObjectClosure cl(_sh);
1159     ShenandoahHeapRegion* r;
1160     while ((r =_cs->claim_next()) != nullptr) {
1161       assert(r->has_live(), "Region %zu should have been reclaimed early", r->index());
1162       _sh->marked_object_iterate(r, &cl);
1163 
1164       if (_sh->check_cancelled_gc_and_yield(_concurrent)) {
1165         break;
1166       }
1167     }
1168   }
1169 };
1170 
1171 class ShenandoahRetireGCLABClosure : public ThreadClosure {
1172 private:
1173   bool const _resize;
1174 public:
1175   explicit ShenandoahRetireGCLABClosure(bool resize) : _resize(resize) {}
1176   void do_thread(Thread* thread) override {
1177     PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1178     assert(gclab != nullptr, "GCLAB should be initialized for %s", thread->name());
1179     gclab->retire();
1180     if (_resize && ShenandoahThreadLocalData::gclab_size(thread) > 0) {
1181       ShenandoahThreadLocalData::set_gclab_size(thread, 0);
1182     }
1183 
1184     if (ShenandoahHeap::heap()->mode()->is_generational()) {
1185       PLAB* plab = ShenandoahThreadLocalData::plab(thread);
1186       assert(plab != nullptr, "PLAB should be initialized for %s", thread->name());
1187 
1188       // There are two reasons to retire all plabs between old-gen evacuation passes.
1189       //  1. We need to make the plab memory parsable by remembered-set scanning.
1190       //  2. We need to establish a trustworthy UpdateWaterMark value within each old-gen heap region
1191       ShenandoahGenerationalHeap::heap()->retire_plab(plab, thread);
1192 
1193       // Re-enable promotions for the next evacuation phase.
1194       ShenandoahThreadLocalData::enable_plab_promotions(thread);
1195 
1196       // Reset the fill size for next evacuation phase.
1197       if (_resize && ShenandoahThreadLocalData::plab_size(thread) > 0) {
1198         ShenandoahThreadLocalData::set_plab_size(thread, 0);
1199       }
1200     }
1201   }
1202 };
1203 
1204 class ShenandoahGCStatePropagatorHandshakeClosure : public HandshakeClosure {
1205 public:
1206   explicit ShenandoahGCStatePropagatorHandshakeClosure(char gc_state) :
1207     HandshakeClosure("Shenandoah GC State Change"),
1208     _gc_state(gc_state) {}
1209 
1210   void do_thread(Thread* thread) override {
1211     ShenandoahThreadLocalData::set_gc_state(thread, _gc_state);
1212   }
1213 private:
1214   char _gc_state;
1215 };
1216 
1217 class ShenandoahPrepareForUpdateRefsHandshakeClosure : public HandshakeClosure {
1218 public:
1219   explicit ShenandoahPrepareForUpdateRefsHandshakeClosure(char gc_state) :
1220     HandshakeClosure("Shenandoah Prepare for Update Refs"),
1221     _retire(ResizeTLAB), _propagator(gc_state) {}
1222 
1223   void do_thread(Thread* thread) override {
1224     _propagator.do_thread(thread);
1225     if (ShenandoahThreadLocalData::gclab(thread) != nullptr) {
1226       _retire.do_thread(thread);
1227     }
1228   }
1229 private:
1230   ShenandoahRetireGCLABClosure _retire;
1231   ShenandoahGCStatePropagatorHandshakeClosure _propagator;
1232 };
1233 
1234 void ShenandoahHeap::evacuate_collection_set(ShenandoahGeneration* generation, bool concurrent) {
1235   assert(generation->is_global(), "Only global generation expected here");
1236   ShenandoahEvacuationTask task(this, _collection_set, concurrent);
1237   workers()->run_task(&task);
1238 }
1239 
1240 void ShenandoahHeap::concurrent_prepare_for_update_refs() {
1241   {
1242     // Java threads take this lock while they are being attached and added to the list of threads.
1243     // If another thread holds this lock before we update the gc state, it will receive a stale
1244     // gc state, but they will have been added to the list of java threads and so will be corrected
1245     // by the following handshake.
1246     MutexLocker lock(Threads_lock);
1247 
1248     // A cancellation at this point means the degenerated cycle must resume from update-refs.
1249     set_gc_state_concurrent(EVACUATION, false);
1250     set_gc_state_concurrent(WEAK_ROOTS, false);
1251     set_gc_state_concurrent(UPDATE_REFS, true);
1252   }
1253 
1254   // This will propagate the gc state and retire gclabs and plabs for threads that require it.
1255   ShenandoahPrepareForUpdateRefsHandshakeClosure prepare_for_update_refs(_gc_state.raw_value());
1256 
1257   // The handshake won't touch worker threads (or control thread, or VM thread), so do those separately.
1258   Threads::non_java_threads_do(&prepare_for_update_refs);
1259 
1260   // Now retire gclabs and plabs and propagate gc_state for mutator threads
1261   Handshake::execute(&prepare_for_update_refs);
1262 
1263   _update_refs_iterator.reset();
1264 }
1265 
1266 class ShenandoahCompositeHandshakeClosure : public HandshakeClosure {
1267   HandshakeClosure* _handshake_1;
1268   HandshakeClosure* _handshake_2;
1269   public:
1270     ShenandoahCompositeHandshakeClosure(HandshakeClosure* handshake_1, HandshakeClosure* handshake_2) :
1271       HandshakeClosure(handshake_2->name()),
1272       _handshake_1(handshake_1), _handshake_2(handshake_2) {}
1273 
1274   void do_thread(Thread* thread) override {
1275       _handshake_1->do_thread(thread);
1276       _handshake_2->do_thread(thread);
1277     }
1278 };
1279 
1280 void ShenandoahHeap::concurrent_final_roots(HandshakeClosure* handshake_closure) {
1281   {
1282     assert(!is_evacuation_in_progress(), "Should not evacuate for abbreviated or old cycles");
1283     MutexLocker lock(Threads_lock);
1284     set_gc_state_concurrent(WEAK_ROOTS, false);
1285   }
1286 
1287   ShenandoahGCStatePropagatorHandshakeClosure propagator(_gc_state.raw_value());
1288   Threads::non_java_threads_do(&propagator);
1289   if (handshake_closure == nullptr) {
1290     Handshake::execute(&propagator);
1291   } else {
1292     ShenandoahCompositeHandshakeClosure composite(&propagator, handshake_closure);
1293     Handshake::execute(&composite);
1294   }
1295 }
1296 
1297 oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) {
1298   assert(thread == Thread::current(), "Expected thread parameter to be current thread.");
1299   if (ShenandoahThreadLocalData::is_oom_during_evac(thread)) {
1300     // This thread went through the OOM during evac protocol. It is safe to return
1301     // the forward pointer. It must not attempt to evacuate any other objects.
1302     return ShenandoahBarrierSet::resolve_forwarded(p);
1303   }
1304 
1305   assert(ShenandoahThreadLocalData::is_evac_allowed(thread), "must be enclosed in oom-evac scope");
1306 
1307   ShenandoahHeapRegion* r = heap_region_containing(p);
1308   assert(!r->is_humongous(), "never evacuate humongous objects");
1309 
1310   ShenandoahAffiliation target_gen = r->affiliation();
1311   return try_evacuate_object(p, thread, r, target_gen);
1312 }
1313 
1314 oop ShenandoahHeap::try_evacuate_object(oop p, Thread* thread, ShenandoahHeapRegion* from_region,
1315                                                ShenandoahAffiliation target_gen) {
1316   assert(target_gen == YOUNG_GENERATION, "Only expect evacuations to young in this mode");
1317   assert(from_region->is_young(), "Only expect evacuations from young in this mode");
1318   bool alloc_from_lab = true;
1319   HeapWord* copy = nullptr;
1320   size_t size = ShenandoahForwarding::size(p);
1321 
1322 #ifdef ASSERT
1323   if (ShenandoahOOMDuringEvacALot &&
1324       (os::random() & 1) == 0) { // Simulate OOM every ~2nd slow-path call
1325     copy = nullptr;
1326   } else {
1327 #endif
1328     if (UseTLAB) {
1329       copy = allocate_from_gclab(thread, size);
1330     }
1331     if (copy == nullptr) {
1332       // If we failed to allocate in LAB, we'll try a shared allocation.
1333       ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared_gc(size, target_gen);
1334       copy = allocate_memory(req);
1335       alloc_from_lab = false;
1336     }
1337 #ifdef ASSERT
1338   }
1339 #endif
1340 
1341   if (copy == nullptr) {
1342     control_thread()->handle_alloc_failure_evac(size);
1343 
1344     _oom_evac_handler.handle_out_of_memory_during_evacuation();
1345 
1346     return ShenandoahBarrierSet::resolve_forwarded(p);
1347   }
1348 
1349   if (ShenandoahEvacTracking) {
1350     evac_tracker()->begin_evacuation(thread, size * HeapWordSize, from_region->affiliation(), target_gen);
1351   }
1352 
1353   // Copy the object:
1354   Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(p), copy, size);
1355 
1356   // Try to install the new forwarding pointer.
1357   oop copy_val = cast_to_oop(copy);
1358   oop result = ShenandoahForwarding::try_update_forwardee(p, copy_val);
1359   if (result == copy_val) {
1360     // Successfully evacuated. Our copy is now the public one!
1361     ContinuationGCSupport::relativize_stack_chunk(copy_val);
1362     shenandoah_assert_correct(nullptr, copy_val);
1363     if (ShenandoahEvacTracking) {
1364       evac_tracker()->end_evacuation(thread, size * HeapWordSize, from_region->affiliation(), target_gen);
1365     }
1366     return copy_val;
1367   }  else {
1368     // Failed to evacuate. We need to deal with the object that is left behind. Since this
1369     // new allocation is certainly after TAMS, it will be considered live in the next cycle.
1370     // But if it happens to contain references to evacuated regions, those references would
1371     // not get updated for this stale copy during this cycle, and we will crash while scanning
1372     // it the next cycle.
1373     if (alloc_from_lab) {
1374       // For LAB allocations, it is enough to rollback the allocation ptr. Either the next
1375       // object will overwrite this stale copy, or the filler object on LAB retirement will
1376       // do this.
1377       ShenandoahThreadLocalData::gclab(thread)->undo_allocation(copy, size);
1378     } else {
1379       // For non-LAB allocations, we have no way to retract the allocation, and
1380       // have to explicitly overwrite the copy with the filler object. With that overwrite,
1381       // we have to keep the fwdptr initialized and pointing to our (stale) copy.
1382       assert(size >= ShenandoahHeap::min_fill_size(), "previously allocated object known to be larger than min_size");
1383       fill_with_object(copy, size);
1384       shenandoah_assert_correct(nullptr, copy_val);
1385       // For non-LAB allocations, the object has already been registered
1386     }
1387     shenandoah_assert_correct(nullptr, result);
1388     return result;
1389   }
1390 }
1391 
1392 void ShenandoahHeap::trash_cset_regions() {
1393   ShenandoahHeapLocker locker(lock());
1394 
1395   ShenandoahCollectionSet* set = collection_set();
1396   ShenandoahHeapRegion* r;
1397   set->clear_current_index();
1398   while ((r = set->next()) != nullptr) {
1399     r->make_trash();
1400   }
1401   collection_set()->clear();
1402 }
1403 
1404 void ShenandoahHeap::print_heap_regions_on(outputStream* st) const {
1405   st->print_cr("Heap Regions:");
1406   st->print_cr("Region state: EU=empty-uncommitted, EC=empty-committed, R=regular, H=humongous start, HP=pinned humongous start");
1407   st->print_cr("              HC=humongous continuation, CS=collection set, TR=trash, P=pinned, CSP=pinned collection set");
1408   st->print_cr("BTE=bottom/top/end, TAMS=top-at-mark-start");
1409   st->print_cr("UWM=update watermark, U=used");
1410   st->print_cr("T=TLAB allocs, G=GCLAB allocs");
1411   st->print_cr("S=shared allocs, L=live data");
1412   st->print_cr("CP=critical pins");
1413 
1414   for (size_t i = 0; i < num_regions(); i++) {
1415     get_region(i)->print_on(st);
1416   }
1417 }
1418 
1419 void ShenandoahHeap::process_gc_stats() const {
1420   // Commit worker statistics to cycle data
1421   phase_timings()->flush_par_workers_to_cycle();
1422 
1423   // Print GC stats for current cycle
1424   LogTarget(Info, gc, stats) lt;
1425   if (lt.is_enabled()) {
1426     ResourceMark rm;
1427     LogStream ls(lt);
1428     phase_timings()->print_cycle_on(&ls);
1429     if (ShenandoahEvacTracking) {
1430       ShenandoahCycleStats  evac_stats = evac_tracker()->flush_cycle_to_global();
1431       evac_tracker()->print_evacuations_on(&ls, &evac_stats.workers,
1432                                                &evac_stats.mutators);
1433     }
1434   }
1435 
1436   // Commit statistics to globals
1437   phase_timings()->flush_cycle_to_global();
1438 }
1439 
1440 size_t ShenandoahHeap::trash_humongous_region_at(ShenandoahHeapRegion* start) const {
1441   assert(start->is_humongous_start(), "reclaim regions starting with the first one");
1442   assert(!start->has_live(), "liveness must be zero");
1443 
1444   // Do not try to get the size of this humongous object. STW collections will
1445   // have already unloaded classes, so an unmarked object may have a bad klass pointer.
1446   ShenandoahHeapRegion* region = start;
1447   size_t index = region->index();
1448   do {
1449     assert(region->is_humongous(), "Expect correct humongous start or continuation");
1450     assert(!region->is_cset(), "Humongous region should not be in collection set");
1451     region->make_trash_immediate();
1452     region = get_region(++index);
1453   } while (region != nullptr && region->is_humongous_continuation());
1454 
1455   // Return number of regions trashed
1456   return index - start->index();
1457 }
1458 
1459 class ShenandoahCheckCleanGCLABClosure : public ThreadClosure {
1460 public:
1461   ShenandoahCheckCleanGCLABClosure() {}
1462   void do_thread(Thread* thread) {
1463     PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1464     assert(gclab != nullptr, "GCLAB should be initialized for %s", thread->name());
1465     assert(gclab->words_remaining() == 0, "GCLAB should not need retirement");
1466 
1467     if (ShenandoahHeap::heap()->mode()->is_generational()) {
1468       PLAB* plab = ShenandoahThreadLocalData::plab(thread);
1469       assert(plab != nullptr, "PLAB should be initialized for %s", thread->name());
1470       assert(plab->words_remaining() == 0, "PLAB should not need retirement");
1471     }
1472   }
1473 };
1474 
1475 void ShenandoahHeap::labs_make_parsable() {
1476   assert(UseTLAB, "Only call with UseTLAB");
1477 
1478   ShenandoahRetireGCLABClosure cl(false);
1479 
1480   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1481     ThreadLocalAllocBuffer& tlab = t->tlab();
1482     tlab.make_parsable();
1483     if (ZeroTLAB) {
1484       t->retire_tlab();
1485     }
1486     cl.do_thread(t);
1487   }
1488 
1489   workers()->threads_do(&cl);
1490 
1491   if (safepoint_workers() != nullptr) {
1492     safepoint_workers()->threads_do(&cl);
1493   }
1494 }
1495 
1496 void ShenandoahHeap::tlabs_retire(bool resize) {
1497   assert(UseTLAB, "Only call with UseTLAB");
1498   assert(!resize || ResizeTLAB, "Only call for resize when ResizeTLAB is enabled");
1499 
1500   ThreadLocalAllocStats stats;
1501 
1502   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1503     t->retire_tlab(&stats);
1504     if (resize) {
1505       t->tlab().resize();
1506     }
1507   }
1508 
1509   stats.publish();
1510 
1511 #ifdef ASSERT
1512   ShenandoahCheckCleanGCLABClosure cl;
1513   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1514     cl.do_thread(t);
1515   }
1516   workers()->threads_do(&cl);
1517 #endif
1518 }
1519 
1520 void ShenandoahHeap::gclabs_retire(bool resize) {
1521   assert(UseTLAB, "Only call with UseTLAB");
1522   assert(!resize || ResizeTLAB, "Only call for resize when ResizeTLAB is enabled");
1523 
1524   ShenandoahRetireGCLABClosure cl(resize);
1525   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1526     cl.do_thread(t);
1527   }
1528 
1529   workers()->threads_do(&cl);
1530 
1531   if (safepoint_workers() != nullptr) {
1532     safepoint_workers()->threads_do(&cl);
1533   }
1534 }
1535 
1536 // Returns size in bytes
1537 size_t ShenandoahHeap::unsafe_max_tlab_alloc() const {
1538   // Return the max allowed size, and let the allocation path
1539   // figure out the safe size for current allocation.
1540   return ShenandoahHeapRegion::max_tlab_size_bytes();
1541 }
1542 
1543 size_t ShenandoahHeap::max_tlab_size() const {
1544   // Returns size in words
1545   return ShenandoahHeapRegion::max_tlab_size_words();
1546 }
1547 
1548 void ShenandoahHeap::collect_as_vm_thread(GCCause::Cause cause) {
1549   // These requests are ignored because we can't easily have Shenandoah jump into
1550   // a synchronous (degenerated or full) cycle while it is in the middle of a concurrent
1551   // cycle. We _could_ cancel the concurrent cycle and then try to run a cycle directly
1552   // on the VM thread, but this would confuse the control thread mightily and doesn't
1553   // seem worth the trouble. Instead, we will have the caller thread run (and wait for) a
1554   // concurrent cycle in the prologue of the heap inspect/dump operation (see VM_HeapDumper::doit_prologue).
1555   // This is how other concurrent collectors in the JVM handle this scenario as well.
1556   assert(Thread::current()->is_VM_thread(), "Should be the VM thread");
1557   guarantee(cause == GCCause::_heap_dump || cause == GCCause::_heap_inspection, "Invalid cause");
1558 }
1559 
1560 void ShenandoahHeap::collect(GCCause::Cause cause) {
1561   control_thread()->request_gc(cause);
1562 }
1563 
1564 void ShenandoahHeap::do_full_collection(bool clear_all_soft_refs) {
1565   // This method is only called by `CollectedHeap::collect_as_vm_thread`, which we have
1566   // overridden to do nothing. See the comment there for an explanation of how heap inspections
1567   // work for Shenandoah.
1568   ShouldNotReachHere();
1569 }
1570 
1571 HeapWord* ShenandoahHeap::block_start(const void* addr) const {
1572   ShenandoahHeapRegion* r = heap_region_containing(addr);
1573   if (r != nullptr) {
1574     return r->block_start(addr);
1575   }
1576   return nullptr;
1577 }
1578 
1579 bool ShenandoahHeap::block_is_obj(const HeapWord* addr) const {
1580   ShenandoahHeapRegion* r = heap_region_containing(addr);
1581   return r->block_is_obj(addr);
1582 }
1583 
1584 bool ShenandoahHeap::print_location(outputStream* st, void* addr) const {
1585   return BlockLocationPrinter<ShenandoahHeap>::print_location(st, addr);
1586 }
1587 
1588 void ShenandoahHeap::prepare_for_verify() {
1589   if (SafepointSynchronize::is_at_safepoint() && UseTLAB) {
1590     labs_make_parsable();
1591   }
1592 }
1593 
1594 void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const {
1595   if (_shenandoah_policy->is_at_shutdown()) {
1596     return;
1597   }
1598 
1599   if (_control_thread != nullptr) {
1600     tcl->do_thread(_control_thread);
1601   }
1602 
1603   if (_uncommit_thread != nullptr) {
1604     tcl->do_thread(_uncommit_thread);
1605   }
1606 
1607   workers()->threads_do(tcl);
1608   if (_safepoint_workers != nullptr) {
1609     _safepoint_workers->threads_do(tcl);
1610   }
1611 }
1612 
1613 void ShenandoahHeap::print_tracing_info() const {
1614   LogTarget(Info, gc, stats) lt;
1615   if (lt.is_enabled()) {
1616     ResourceMark rm;
1617     LogStream ls(lt);
1618 
1619     if (ShenandoahEvacTracking) {
1620       evac_tracker()->print_global_on(&ls);
1621       ls.cr();
1622       ls.cr();
1623     }
1624 
1625     phase_timings()->print_global_on(&ls);
1626 
1627     ls.cr();
1628     ls.cr();
1629 
1630     shenandoah_policy()->print_gc_stats(&ls);
1631 
1632     ls.cr();
1633     ls.cr();
1634   }
1635 }
1636 
1637 // Active generation may only be set by the VM thread at a safepoint.
1638 void ShenandoahHeap::set_active_generation(ShenandoahGeneration* generation) {
1639   assert(Thread::current()->is_VM_thread(), "Only the VM Thread");
1640   assert(SafepointSynchronize::is_at_safepoint(), "Only at a safepoint!");
1641   _active_generation = generation;
1642 }
1643 
1644 void ShenandoahHeap::on_cycle_start(GCCause::Cause cause, ShenandoahGeneration* generation) {
1645   shenandoah_policy()->record_collection_cause(cause);
1646 
1647   const GCCause::Cause current = gc_cause();
1648   assert(current == GCCause::_no_gc, "Over-writing cause: %s, with: %s",
1649          GCCause::to_string(current), GCCause::to_string(cause));
1650 
1651   set_gc_cause(cause);
1652 
1653   generation->heuristics()->record_cycle_start();
1654 }
1655 
1656 void ShenandoahHeap::on_cycle_end(ShenandoahGeneration* generation) {
1657   assert(gc_cause() != GCCause::_no_gc, "cause wasn't set");
1658 
1659   generation->heuristics()->record_cycle_end();
1660   if (mode()->is_generational() && generation->is_global()) {
1661     // If we just completed a GLOBAL GC, claim credit for completion of young-gen and old-gen GC as well
1662     young_generation()->heuristics()->record_cycle_end();
1663     old_generation()->heuristics()->record_cycle_end();
1664   }
1665 
1666   set_gc_cause(GCCause::_no_gc);
1667 }
1668 
1669 void ShenandoahHeap::verify(VerifyOption vo) {
1670   if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
1671     if (ShenandoahVerify) {
1672       verifier()->verify_generic(active_generation(), vo);
1673     } else {
1674       // TODO: Consider allocating verification bitmaps on demand,
1675       // and turn this on unconditionally.
1676     }
1677   }
1678 }
1679 size_t ShenandoahHeap::tlab_capacity() const {
1680   return _free_set->capacity_not_holding_lock();
1681 }
1682 
1683 class ObjectIterateScanRootClosure : public BasicOopIterateClosure {
1684 private:
1685   MarkBitMap* _bitmap;
1686   ShenandoahScanObjectStack* _oop_stack;
1687   ShenandoahHeap* const _heap;
1688   ShenandoahMarkingContext* const _marking_context;
1689 
1690   template <class T>
1691   void do_oop_work(T* p) {
1692     T o = RawAccess<>::oop_load(p);
1693     if (!CompressedOops::is_null(o)) {
1694       oop obj = CompressedOops::decode_not_null(o);
1695       if (_heap->is_concurrent_weak_root_in_progress() && !_marking_context->is_marked(obj)) {
1696         // There may be dead oops in weak roots in concurrent root phase, do not touch them.
1697         return;
1698       }
1699       obj = ShenandoahBarrierSet::barrier_set()->load_reference_barrier(obj);
1700 
1701       assert(oopDesc::is_oop(obj), "must be a valid oop");
1702       if (!_bitmap->is_marked(obj)) {
1703         _bitmap->mark(obj);
1704         _oop_stack->push(obj);
1705       }
1706     }
1707   }
1708 public:
1709   ObjectIterateScanRootClosure(MarkBitMap* bitmap, ShenandoahScanObjectStack* oop_stack) :
1710     _bitmap(bitmap), _oop_stack(oop_stack), _heap(ShenandoahHeap::heap()),
1711     _marking_context(_heap->marking_context()) {}
1712   void do_oop(oop* p)       { do_oop_work(p); }
1713   void do_oop(narrowOop* p) { do_oop_work(p); }
1714 };
1715 
1716 /*
1717  * This is public API, used in preparation of object_iterate().
1718  * Since we don't do linear scan of heap in object_iterate() (see comment below), we don't
1719  * need to make the heap parsable. For Shenandoah-internal linear heap scans that we can
1720  * control, we call SH::tlabs_retire, SH::gclabs_retire.
1721  */
1722 void ShenandoahHeap::ensure_parsability(bool retire_tlabs) {
1723   // No-op.
1724 }
1725 
1726 /*
1727  * Iterates objects in the heap. This is public API, used for, e.g., heap dumping.
1728  *
1729  * We cannot safely iterate objects by doing a linear scan at random points in time. Linear
1730  * scanning needs to deal with dead objects, which may have dead Klass* pointers (e.g.
1731  * calling oopDesc::size() would crash) or dangling reference fields (crashes) etc. Linear
1732  * scanning therefore depends on having a valid marking bitmap to support it. However, we only
1733  * have a valid marking bitmap after successful marking. In particular, we *don't* have a valid
1734  * marking bitmap during marking, after aborted marking or during/after cleanup (when we just
1735  * wiped the bitmap in preparation for next marking).
1736  *
1737  * For all those reasons, we implement object iteration as a single marking traversal, reporting
1738  * objects as we mark+traverse through the heap, starting from GC roots. JVMTI IterateThroughHeap
1739  * is allowed to report dead objects, but is not required to do so.
1740  */
1741 void ShenandoahHeap::object_iterate(ObjectClosure* cl) {
1742   // Reset bitmap
1743   if (!prepare_aux_bitmap_for_iteration())
1744     return;
1745 
1746   ShenandoahScanObjectStack oop_stack;
1747   ObjectIterateScanRootClosure oops(&_aux_bit_map, &oop_stack);
1748   // Seed the stack with root scan
1749   scan_roots_for_iteration(&oop_stack, &oops);
1750 
1751   // Work through the oop stack to traverse heap
1752   while (! oop_stack.is_empty()) {
1753     oop obj = oop_stack.pop();
1754     assert(oopDesc::is_oop(obj), "must be a valid oop");
1755     cl->do_object(obj);
1756     obj->oop_iterate(&oops);
1757   }
1758 
1759   assert(oop_stack.is_empty(), "should be empty");
1760   // Reclaim bitmap
1761   reclaim_aux_bitmap_for_iteration();
1762 }
1763 
1764 bool ShenandoahHeap::prepare_aux_bitmap_for_iteration() {
1765   assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1766   if (!_aux_bitmap_region_special) {
1767     bool success = os::commit_memory((char *) _aux_bitmap_region.start(), _aux_bitmap_region.byte_size(), false);
1768     if (!success) {
1769       log_warning(gc)("Auxiliary marking bitmap commit failed: " PTR_FORMAT " (%zu bytes)",
1770                       p2i(_aux_bitmap_region.start()), _aux_bitmap_region.byte_size());
1771       return false;
1772     }
1773   }
1774   _aux_bit_map.clear();
1775   return true;
1776 }
1777 
1778 void ShenandoahHeap::scan_roots_for_iteration(ShenandoahScanObjectStack* oop_stack, ObjectIterateScanRootClosure* oops) {
1779   // Process GC roots according to current GC cycle
1780   // This populates the work stack with initial objects
1781   // It is important to relinquish the associated locks before diving
1782   // into heap dumper
1783   uint n_workers = safepoint_workers() != nullptr ? safepoint_workers()->active_workers() : 1;
1784   ShenandoahHeapIterationRootScanner rp(n_workers);
1785   rp.roots_do(oops);
1786 }
1787 
1788 void ShenandoahHeap::reclaim_aux_bitmap_for_iteration() {
1789   if (!_aux_bitmap_region_special) {
1790     os::uncommit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size());
1791   }
1792 }
1793 
1794 // Closure for parallelly iterate objects
1795 class ShenandoahObjectIterateParScanClosure : public BasicOopIterateClosure {
1796 private:
1797   MarkBitMap* _bitmap;
1798   ShenandoahObjToScanQueue* _queue;
1799   ShenandoahHeap* const _heap;
1800   ShenandoahMarkingContext* const _marking_context;
1801 
1802   template <class T>
1803   void do_oop_work(T* p) {
1804     T o = RawAccess<>::oop_load(p);
1805     if (!CompressedOops::is_null(o)) {
1806       oop obj = CompressedOops::decode_not_null(o);
1807       if (_heap->is_concurrent_weak_root_in_progress() && !_marking_context->is_marked(obj)) {
1808         // There may be dead oops in weak roots in concurrent root phase, do not touch them.
1809         return;
1810       }
1811       obj = ShenandoahBarrierSet::barrier_set()->load_reference_barrier(obj);
1812 
1813       assert(oopDesc::is_oop(obj), "Must be a valid oop");
1814       if (_bitmap->par_mark(obj)) {
1815         _queue->push(ShenandoahMarkTask(obj));
1816       }
1817     }
1818   }
1819 public:
1820   ShenandoahObjectIterateParScanClosure(MarkBitMap* bitmap, ShenandoahObjToScanQueue* q) :
1821     _bitmap(bitmap), _queue(q), _heap(ShenandoahHeap::heap()),
1822     _marking_context(_heap->marking_context()) {}
1823   void do_oop(oop* p)       { do_oop_work(p); }
1824   void do_oop(narrowOop* p) { do_oop_work(p); }
1825 };
1826 
1827 // Object iterator for parallel heap iteraion.
1828 // The root scanning phase happenes in construction as a preparation of
1829 // parallel marking queues.
1830 // Every worker processes it's own marking queue. work-stealing is used
1831 // to balance workload.
1832 class ShenandoahParallelObjectIterator : public ParallelObjectIteratorImpl {
1833 private:
1834   uint                         _num_workers;
1835   bool                         _init_ready;
1836   MarkBitMap*                  _aux_bit_map;
1837   ShenandoahHeap*              _heap;
1838   ShenandoahScanObjectStack    _roots_stack; // global roots stack
1839   ShenandoahObjToScanQueueSet* _task_queues;
1840 public:
1841   ShenandoahParallelObjectIterator(uint num_workers, MarkBitMap* bitmap) :
1842         _num_workers(num_workers),
1843         _init_ready(false),
1844         _aux_bit_map(bitmap),
1845         _heap(ShenandoahHeap::heap()) {
1846     // Initialize bitmap
1847     _init_ready = _heap->prepare_aux_bitmap_for_iteration();
1848     if (!_init_ready) {
1849       return;
1850     }
1851 
1852     ObjectIterateScanRootClosure oops(_aux_bit_map, &_roots_stack);
1853     _heap->scan_roots_for_iteration(&_roots_stack, &oops);
1854 
1855     _init_ready = prepare_worker_queues();
1856   }
1857 
1858   ~ShenandoahParallelObjectIterator() {
1859     // Reclaim bitmap
1860     _heap->reclaim_aux_bitmap_for_iteration();
1861     // Reclaim queue for workers
1862     if (_task_queues!= nullptr) {
1863       for (uint i = 0; i < _num_workers; ++i) {
1864         ShenandoahObjToScanQueue* q = _task_queues->queue(i);
1865         if (q != nullptr) {
1866           delete q;
1867           _task_queues->register_queue(i, nullptr);
1868         }
1869       }
1870       delete _task_queues;
1871       _task_queues = nullptr;
1872     }
1873   }
1874 
1875   virtual void object_iterate(ObjectClosure* cl, uint worker_id) {
1876     if (_init_ready) {
1877       object_iterate_parallel(cl, worker_id, _task_queues);
1878     }
1879   }
1880 
1881 private:
1882   // Divide global root_stack into worker queues
1883   bool prepare_worker_queues() {
1884     _task_queues = new ShenandoahObjToScanQueueSet((int) _num_workers);
1885     // Initialize queues for every workers
1886     for (uint i = 0; i < _num_workers; ++i) {
1887       ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue();
1888       _task_queues->register_queue(i, task_queue);
1889     }
1890     // Divide roots among the workers. Assume that object referencing distribution
1891     // is related with root kind, use round-robin to make every worker have same chance
1892     // to process every kind of roots
1893     size_t roots_num = _roots_stack.size();
1894     if (roots_num == 0) {
1895       // No work to do
1896       return false;
1897     }
1898 
1899     for (uint j = 0; j < roots_num; j++) {
1900       uint stack_id = j % _num_workers;
1901       oop obj = _roots_stack.pop();
1902       _task_queues->queue(stack_id)->push(ShenandoahMarkTask(obj));
1903     }
1904     return true;
1905   }
1906 
1907   void object_iterate_parallel(ObjectClosure* cl,
1908                                uint worker_id,
1909                                ShenandoahObjToScanQueueSet* queue_set) {
1910     assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1911     assert(queue_set != nullptr, "task queue must not be null");
1912 
1913     ShenandoahObjToScanQueue* q = queue_set->queue(worker_id);
1914     assert(q != nullptr, "object iterate queue must not be null");
1915 
1916     ShenandoahMarkTask t;
1917     ShenandoahObjectIterateParScanClosure oops(_aux_bit_map, q);
1918 
1919     // Work through the queue to traverse heap.
1920     // Steal when there is no task in queue.
1921     while (q->pop(t) || queue_set->steal(worker_id, t)) {
1922       oop obj = t.obj();
1923       assert(oopDesc::is_oop(obj), "must be a valid oop");
1924       cl->do_object(obj);
1925       obj->oop_iterate(&oops);
1926     }
1927     assert(q->is_empty(), "should be empty");
1928   }
1929 };
1930 
1931 ParallelObjectIteratorImpl* ShenandoahHeap::parallel_object_iterator(uint workers) {
1932   return new ShenandoahParallelObjectIterator(workers, &_aux_bit_map);
1933 }
1934 
1935 // Keep alive an object that was loaded with AS_NO_KEEPALIVE.
1936 void ShenandoahHeap::keep_alive(oop obj) {
1937   if (is_concurrent_mark_in_progress() && (obj != nullptr)) {
1938     ShenandoahBarrierSet::barrier_set()->enqueue(obj);
1939   }
1940 }
1941 
1942 void ShenandoahHeap::heap_region_iterate(ShenandoahHeapRegionClosure* blk) const {
1943   for (size_t i = 0; i < num_regions(); i++) {
1944     ShenandoahHeapRegion* current = get_region(i);
1945     blk->heap_region_do(current);
1946   }
1947 }
1948 
1949 class ShenandoahParallelHeapRegionTask : public WorkerTask {
1950 private:
1951   ShenandoahHeap* const _heap;
1952   ShenandoahHeapRegionClosure* const _blk;
1953   size_t const _stride;
1954 
1955   shenandoah_padding(0);
1956   Atomic<size_t> _index;
1957   shenandoah_padding(1);
1958 
1959 public:
1960   ShenandoahParallelHeapRegionTask(ShenandoahHeapRegionClosure* blk, size_t stride) :
1961           WorkerTask("Shenandoah Parallel Region Operation"),
1962           _heap(ShenandoahHeap::heap()), _blk(blk), _stride(stride), _index(0) {}
1963 
1964   void work(uint worker_id) {
1965     ShenandoahParallelWorkerSession worker_session(worker_id);
1966     size_t stride = _stride;
1967 
1968     size_t max = _heap->num_regions();
1969     while (_index.load_relaxed() < max) {
1970       size_t cur = _index.fetch_then_add(stride, memory_order_relaxed);
1971       size_t start = cur;
1972       size_t end = MIN2(cur + stride, max);
1973       if (start >= max) break;
1974 
1975       for (size_t i = cur; i < end; i++) {
1976         ShenandoahHeapRegion* current = _heap->get_region(i);
1977         _blk->heap_region_do(current);
1978       }
1979     }
1980   }
1981 };
1982 
1983 void ShenandoahHeap::parallel_heap_region_iterate(ShenandoahHeapRegionClosure* blk) const {
1984   assert(blk->is_thread_safe(), "Only thread-safe closures here");
1985   const uint active_workers = workers()->active_workers();
1986   const size_t n_regions = num_regions();
1987   size_t stride = blk->parallel_region_stride();
1988   if (stride == 0 && active_workers > 1) {
1989     // Automatically derive the stride to balance the work between threads
1990     // evenly. Do not try to split work if below the reasonable threshold.
1991     constexpr size_t threshold = 4096;
1992     stride = n_regions <= threshold ?
1993             threshold :
1994             (n_regions + active_workers - 1) / active_workers;
1995   }
1996 
1997   if (n_regions > stride && active_workers > 1) {
1998     ShenandoahParallelHeapRegionTask task(blk, stride);
1999     workers()->run_task(&task);
2000   } else {
2001     heap_region_iterate(blk);
2002   }
2003 }
2004 
2005 class ShenandoahRendezvousHandshakeClosure : public HandshakeClosure {
2006 public:
2007   inline ShenandoahRendezvousHandshakeClosure(const char* name) : HandshakeClosure(name) {}
2008   inline void do_thread(Thread* thread) {}
2009 };
2010 
2011 void ShenandoahHeap::rendezvous_threads(const char* name) {
2012   ShenandoahRendezvousHandshakeClosure cl(name);
2013   Handshake::execute(&cl);
2014 }
2015 
2016 void ShenandoahHeap::recycle_trash() {
2017   free_set()->recycle_trash();
2018 }
2019 
2020 void ShenandoahHeap::do_class_unloading() {
2021   _unloader.unload();
2022   if (mode()->is_generational()) {
2023     old_generation()->set_parsable(false);
2024   }
2025 }
2026 
2027 void ShenandoahHeap::stw_weak_refs(ShenandoahGeneration* generation, bool full_gc) {
2028   // Weak refs processing
2029   ShenandoahPhaseTimings::Phase phase = full_gc ? ShenandoahPhaseTimings::full_gc_weakrefs
2030                                                 : ShenandoahPhaseTimings::degen_gc_weakrefs;
2031   ShenandoahTimingsTracker t(phase);
2032   ShenandoahGCWorkerPhase worker_phase(phase);
2033   generation->ref_processor()->process_references(phase, workers(), false /* concurrent */);
2034 }
2035 
2036 void ShenandoahHeap::prepare_update_heap_references() {
2037   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
2038 
2039   // Evacuation is over, no GCLABs are needed anymore. GCLABs are under URWM, so we need to
2040   // make them parsable for update code to work correctly. Plus, we can compute new sizes
2041   // for future GCLABs here.
2042   if (UseTLAB) {
2043     ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_init_update_refs_manage_gclabs);
2044     gclabs_retire(ResizeTLAB);
2045   }
2046 
2047   _update_refs_iterator.reset();
2048 }
2049 
2050 void ShenandoahHeap::propagate_gc_state_to_all_threads() {
2051   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at Shenandoah safepoint");
2052   if (_gc_state_changed) {
2053     // If we are only marking old, we do not need to process young pointers
2054     ShenandoahBarrierSet::satb_mark_queue_set().set_filter_out_young(
2055       is_concurrent_old_mark_in_progress() && !is_concurrent_young_mark_in_progress()
2056     );
2057     ShenandoahGCStatePropagatorHandshakeClosure propagator(_gc_state.raw_value());
2058     Threads::threads_do(&propagator);
2059     _gc_state_changed = false;
2060   }
2061 }
2062 
2063 void ShenandoahHeap::set_gc_state_at_safepoint(uint mask, bool value) {
2064   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at Shenandoah safepoint");
2065   _gc_state.set_cond(mask, value);
2066   _gc_state_changed = true;
2067 }
2068 
2069 void ShenandoahHeap::set_gc_state_concurrent(uint mask, bool value) {
2070   // Holding the thread lock here assures that any thread created after we change the gc
2071   // state will have the correct state. It also prevents attaching threads from seeing
2072   // an inconsistent state. See ShenandoahBarrierSet::on_thread_attach for reference. Established
2073   // threads will use their thread local copy of the gc state (changed by a handshake, or on a
2074   // safepoint).
2075   assert(Threads_lock->is_locked(), "Must hold thread lock for concurrent gc state change");
2076   _gc_state.set_cond(mask, value);
2077 }
2078 
2079 void ShenandoahHeap::set_concurrent_young_mark_in_progress(bool in_progress) {
2080   uint mask;
2081   assert(!has_forwarded_objects(), "Young marking is not concurrent with evacuation");
2082   if (!in_progress && is_concurrent_old_mark_in_progress()) {
2083     assert(mode()->is_generational(), "Only generational GC has old marking");
2084     assert(_gc_state.is_set(MARKING), "concurrent_old_marking_in_progress implies MARKING");
2085     // If old-marking is in progress when we turn off YOUNG_MARKING, leave MARKING (and OLD_MARKING) on
2086     mask = YOUNG_MARKING;
2087   } else {
2088     mask = MARKING | YOUNG_MARKING;
2089   }
2090   set_gc_state_at_safepoint(mask, in_progress);
2091   manage_satb_barrier(in_progress);
2092 }
2093 
2094 void ShenandoahHeap::set_concurrent_old_mark_in_progress(bool in_progress) {
2095 #ifdef ASSERT
2096   // has_forwarded_objects() iff UPDATE_REFS or EVACUATION
2097   bool has_forwarded = has_forwarded_objects();
2098   bool updating_or_evacuating = _gc_state.is_set(UPDATE_REFS | EVACUATION);
2099   bool evacuating = _gc_state.is_set(EVACUATION);
2100   assert ((has_forwarded == updating_or_evacuating) || (evacuating && !has_forwarded && collection_set()->is_empty()),
2101           "Updating or evacuating iff has forwarded objects, or if evacuation phase is promoting in place without forwarding");
2102 #endif
2103   if (!in_progress && is_concurrent_young_mark_in_progress()) {
2104     // If young-marking is in progress when we turn off OLD_MARKING, leave MARKING (and YOUNG_MARKING) on
2105     assert(_gc_state.is_set(MARKING), "concurrent_young_marking_in_progress implies MARKING");
2106     set_gc_state_at_safepoint(OLD_MARKING, in_progress);
2107   } else {
2108     set_gc_state_at_safepoint(MARKING | OLD_MARKING, in_progress);
2109   }
2110   manage_satb_barrier(in_progress);
2111 }
2112 
2113 bool ShenandoahHeap::is_prepare_for_old_mark_in_progress() const {
2114   return old_generation()->is_preparing_for_mark();
2115 }
2116 
2117 void ShenandoahHeap::manage_satb_barrier(bool active) {
2118   if (is_concurrent_mark_in_progress()) {
2119     // Ignore request to deactivate barrier while concurrent mark is in progress.
2120     // Do not attempt to re-activate the barrier if it is already active.
2121     if (active && !ShenandoahBarrierSet::satb_mark_queue_set().is_active()) {
2122       ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(active, !active);
2123     }
2124   } else {
2125     // No concurrent marking is in progress so honor request to deactivate,
2126     // but only if the barrier is already active.
2127     if (!active && ShenandoahBarrierSet::satb_mark_queue_set().is_active()) {
2128       ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(active, !active);
2129     }
2130   }
2131 }
2132 
2133 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
2134   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only call this at safepoint");
2135   set_gc_state_at_safepoint(EVACUATION, in_progress);
2136 }
2137 
2138 void ShenandoahHeap::set_concurrent_strong_root_in_progress(bool in_progress) {
2139   if (in_progress) {
2140     _concurrent_strong_root_in_progress.set();
2141   } else {
2142     _concurrent_strong_root_in_progress.unset();
2143   }
2144 }
2145 
2146 void ShenandoahHeap::set_concurrent_weak_root_in_progress(bool cond) {
2147   set_gc_state_at_safepoint(WEAK_ROOTS, cond);
2148 }
2149 
2150 GCTracer* ShenandoahHeap::tracer() {
2151   return shenandoah_policy()->tracer();
2152 }
2153 
2154 size_t ShenandoahHeap::tlab_used() const {
2155   return _free_set->used_not_holding_lock();
2156 }
2157 
2158 bool ShenandoahHeap::try_cancel_gc(GCCause::Cause cause) {
2159   const GCCause::Cause prev = _cancelled_gc.xchg(cause);
2160   return prev == GCCause::_no_gc || prev == GCCause::_shenandoah_concurrent_gc;
2161 }
2162 
2163 void ShenandoahHeap::cancel_concurrent_mark() {
2164   if (mode()->is_generational()) {
2165     young_generation()->cancel_marking();
2166     old_generation()->cancel_marking();
2167   }
2168 
2169   global_generation()->cancel_marking();
2170 
2171   ShenandoahBarrierSet::satb_mark_queue_set().abandon_partial_marking();
2172 }
2173 
2174 bool ShenandoahHeap::cancel_gc(GCCause::Cause cause) {
2175   if (try_cancel_gc(cause)) {
2176     FormatBuffer<> msg("Cancelling GC: %s", GCCause::to_string(cause));
2177     log_info(gc,thread)("%s", msg.buffer());
2178     Events::log(Thread::current(), "%s", msg.buffer());
2179     _cancel_requested_time = os::elapsedTime();
2180     return true;
2181   }
2182   return false;
2183 }
2184 
2185 uint ShenandoahHeap::max_workers() {
2186   return _max_workers;
2187 }
2188 
2189 void ShenandoahHeap::stop() {
2190   // The shutdown sequence should be able to terminate when GC is running.
2191 
2192   // Step 0. Notify policy to disable event recording and prevent visiting gc threads during shutdown
2193   _shenandoah_policy->record_shutdown();
2194 
2195   // Step 1. Stop reporting on gc thread cpu utilization
2196   mmu_tracker()->stop();
2197 
2198   // Step 2. Wait until GC worker exits normally (this will cancel any ongoing GC).
2199   control_thread()->stop();
2200 
2201   // Stop 4. Shutdown uncommit thread.
2202   if (_uncommit_thread != nullptr) {
2203     _uncommit_thread->stop();
2204   }
2205 }
2206 
2207 void ShenandoahHeap::stw_unload_classes(bool full_gc) {
2208   if (!unload_classes()) return;
2209   ClassUnloadingContext ctx(_workers->active_workers(),
2210                             true /* unregister_nmethods_during_purge */,
2211                             false /* lock_nmethod_free_separately */);
2212 
2213   // Unload classes and purge SystemDictionary.
2214   {
2215     ShenandoahPhaseTimings::Phase phase = full_gc ?
2216                                           ShenandoahPhaseTimings::full_gc_purge_class_unload :
2217                                           ShenandoahPhaseTimings::degen_gc_purge_class_unload;
2218     ShenandoahIsAliveSelector is_alive;
2219     {
2220       CodeCache::UnlinkingScope scope(is_alive.is_alive_closure());
2221       ShenandoahGCPhase gc_phase(phase);
2222       ShenandoahGCWorkerPhase worker_phase(phase);
2223       bool unloading_occurred = SystemDictionary::do_unloading(gc_timer());
2224 
2225       // Clean JVMCI metadata handles.
2226       JVMCI_ONLY(JVMCI::do_unloading(unloading_occurred));
2227 
2228       ShenandoahClassUnloadingTask unlink_task(phase, unloading_occurred);
2229       _workers->run_task(&unlink_task);
2230     }
2231     // Release unloaded nmethods's memory.
2232     ClassUnloadingContext::context()->purge_and_free_nmethods();
2233   }
2234 
2235   {
2236     ShenandoahGCPhase phase(full_gc ?
2237                             ShenandoahPhaseTimings::full_gc_purge_cldg :
2238                             ShenandoahPhaseTimings::degen_gc_purge_cldg);
2239     ClassLoaderDataGraph::purge(true /* at_safepoint */);
2240   }
2241   // Resize and verify metaspace
2242   MetaspaceGC::compute_new_size();
2243   DEBUG_ONLY(MetaspaceUtils::verify();)
2244 }
2245 
2246 // Weak roots are either pre-evacuated (final mark) or updated (final update refs),
2247 // so they should not have forwarded oops.
2248 // However, we do need to "null" dead oops in the roots, if can not be done
2249 // in concurrent cycles.
2250 void ShenandoahHeap::stw_process_weak_roots(bool full_gc) {
2251   uint num_workers = _workers->active_workers();
2252   ShenandoahPhaseTimings::Phase timing_phase = full_gc ?
2253                                                ShenandoahPhaseTimings::full_gc_purge_weak_par :
2254                                                ShenandoahPhaseTimings::degen_gc_purge_weak_par;
2255   ShenandoahGCPhase phase(timing_phase);
2256   ShenandoahGCWorkerPhase worker_phase(timing_phase);
2257   // Cleanup weak roots
2258   if (has_forwarded_objects()) {
2259     ShenandoahForwardedIsAliveClosure is_alive;
2260     ShenandoahNonConcUpdateRefsClosure keep_alive;
2261     ShenandoahParallelWeakRootsCleaningTask<ShenandoahForwardedIsAliveClosure, ShenandoahNonConcUpdateRefsClosure>
2262       cleaning_task(timing_phase, &is_alive, &keep_alive, num_workers);
2263     _workers->run_task(&cleaning_task);
2264   } else {
2265     ShenandoahIsAliveClosure is_alive;
2266 #ifdef ASSERT
2267     ShenandoahAssertNotForwardedClosure verify_cl;
2268     ShenandoahParallelWeakRootsCleaningTask<ShenandoahIsAliveClosure, ShenandoahAssertNotForwardedClosure>
2269       cleaning_task(timing_phase, &is_alive, &verify_cl, num_workers);
2270 #else
2271     ShenandoahParallelWeakRootsCleaningTask<ShenandoahIsAliveClosure, DoNothingClosure>
2272       cleaning_task(timing_phase, &is_alive, &do_nothing_cl, num_workers);
2273 #endif
2274     _workers->run_task(&cleaning_task);
2275   }
2276 }
2277 
2278 void ShenandoahHeap::parallel_cleaning(ShenandoahGeneration* generation, bool full_gc) {
2279   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2280   assert(is_stw_gc_in_progress(), "Only for Degenerated and Full GC");
2281   ShenandoahGCPhase phase(full_gc ?
2282                           ShenandoahPhaseTimings::full_gc_purge :
2283                           ShenandoahPhaseTimings::degen_gc_purge);
2284   stw_weak_refs(generation, full_gc);
2285   stw_process_weak_roots(full_gc);
2286   stw_unload_classes(full_gc);
2287 }
2288 
2289 void ShenandoahHeap::set_has_forwarded_objects(bool cond) {
2290   set_gc_state_at_safepoint(HAS_FORWARDED, cond);
2291 }
2292 
2293 void ShenandoahHeap::set_unload_classes(bool uc) {
2294   _unload_classes.set_cond(uc);
2295 }
2296 
2297 bool ShenandoahHeap::unload_classes() const {
2298   return _unload_classes.is_set();
2299 }
2300 
2301 address ShenandoahHeap::in_cset_fast_test_addr() {
2302   ShenandoahHeap* heap = ShenandoahHeap::heap();
2303   assert(heap->collection_set() != nullptr, "Sanity");
2304   return (address) heap->collection_set()->biased_map_address();
2305 }
2306 
2307 void ShenandoahHeap::reset_bytes_allocated_since_gc_start() {
2308   // It is important to force_alloc_rate_sample() before the associated generation's bytes_allocated has been reset.
2309   // Note that there is no lock to prevent additional alloations between sampling bytes_allocated_since_gc_start() and
2310   // reset_bytes_allocated_since_gc_start().  If additional allocations happen, they will be ignored in the average
2311   // allocation rate computations.  This effect is considered to be be negligible.
2312 
2313   // unaccounted_bytes is the bytes not accounted for by our forced sample.  If the sample interval is too short,
2314   // the "forced sample" will not happen, and any recently allocated bytes are "unaccounted for".  We pretend these
2315   // bytes are allocated after the start of subsequent gc.
2316   size_t unaccounted_bytes;
2317   ShenandoahFreeSet* _free_set = free_set();
2318   size_t bytes_allocated = _free_set->get_bytes_allocated_since_gc_start();
2319   if (mode()->is_generational()) {
2320     unaccounted_bytes = young_generation()->heuristics()->force_alloc_rate_sample(bytes_allocated);
2321   } else {
2322     // Single-gen Shenandoah uses global heuristics.
2323     unaccounted_bytes = heuristics()->force_alloc_rate_sample(bytes_allocated);
2324   }
2325   ShenandoahHeapLocker locker(lock());
2326   _free_set->reset_bytes_allocated_since_gc_start(unaccounted_bytes);
2327 }
2328 
2329 void ShenandoahHeap::set_degenerated_gc_in_progress(bool in_progress) {
2330   _degenerated_gc_in_progress.set_cond(in_progress);
2331 }
2332 
2333 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) {
2334   _full_gc_in_progress.set_cond(in_progress);
2335 }
2336 
2337 void ShenandoahHeap::set_full_gc_move_in_progress(bool in_progress) {
2338   assert (is_full_gc_in_progress(), "should be");
2339   _full_gc_move_in_progress.set_cond(in_progress);
2340 }
2341 
2342 void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) {
2343   set_gc_state_at_safepoint(UPDATE_REFS, in_progress);
2344 }
2345 
2346 void ShenandoahHeap::register_nmethod(nmethod* nm) {
2347   ShenandoahCodeRoots::register_nmethod(nm);
2348 }
2349 
2350 void ShenandoahHeap::unregister_nmethod(nmethod* nm) {
2351   ShenandoahCodeRoots::unregister_nmethod(nm);
2352 }
2353 
2354 void ShenandoahHeap::pin_object(JavaThread* thr, oop o) {
2355   heap_region_containing(o)->record_pin();
2356 }
2357 
2358 void ShenandoahHeap::unpin_object(JavaThread* thr, oop o) {
2359   ShenandoahHeapRegion* r = heap_region_containing(o);
2360   assert(r != nullptr, "Sanity");
2361   assert(r->pin_count() > 0, "Region %zu should have non-zero pins", r->index());
2362   r->record_unpin();
2363 }
2364 
2365 void ShenandoahHeap::sync_pinned_region_status() {
2366   ShenandoahHeapLocker locker(lock());
2367 
2368   for (size_t i = 0; i < num_regions(); i++) {
2369     ShenandoahHeapRegion *r = get_region(i);
2370     if (r->is_active()) {
2371       if (r->is_pinned()) {
2372         if (r->pin_count() == 0) {
2373           r->make_unpinned();
2374         }
2375       } else {
2376         if (r->pin_count() > 0) {
2377           r->make_pinned();
2378         }
2379       }
2380     }
2381   }
2382 
2383   assert_pinned_region_status();
2384 }
2385 
2386 #ifdef ASSERT
2387 void ShenandoahHeap::assert_pinned_region_status() const {
2388   assert_pinned_region_status(global_generation());
2389 }
2390 
2391 void ShenandoahHeap::assert_pinned_region_status(ShenandoahGeneration* generation) const {
2392   for (size_t i = 0; i < num_regions(); i++) {
2393     ShenandoahHeapRegion* r = get_region(i);
2394     if (generation->contains(r)) {
2395       assert((r->is_pinned() && r->pin_count() > 0) || (!r->is_pinned() && r->pin_count() == 0),
2396              "Region %zu pinning status is inconsistent", i);
2397     }
2398   }
2399 }
2400 #endif
2401 
2402 ConcurrentGCTimer* ShenandoahHeap::gc_timer() const {
2403   return _gc_timer;
2404 }
2405 
2406 void ShenandoahHeap::prepare_concurrent_roots() {
2407   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2408   assert(!is_stw_gc_in_progress(), "Only concurrent GC");
2409   set_concurrent_strong_root_in_progress(!collection_set()->is_empty());
2410   set_concurrent_weak_root_in_progress(true);
2411   if (unload_classes()) {
2412     _unloader.prepare();
2413   }
2414 }
2415 
2416 void ShenandoahHeap::finish_concurrent_roots() {
2417   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2418   assert(!is_stw_gc_in_progress(), "Only concurrent GC");
2419   if (unload_classes()) {
2420     _unloader.finish();
2421   }
2422 }
2423 
2424 #ifdef ASSERT
2425 void ShenandoahHeap::assert_gc_workers(uint nworkers) {
2426   assert(nworkers > 0 && nworkers <= max_workers(), "Sanity");
2427 
2428   if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
2429     // Use ParallelGCThreads inside safepoints
2430     assert(nworkers == ParallelGCThreads, "Use ParallelGCThreads (%u) within safepoint, not %u",
2431            ParallelGCThreads, nworkers);
2432   } else {
2433     // Use ConcGCThreads outside safepoints
2434     assert(nworkers == ConcGCThreads, "Use ConcGCThreads (%u) outside safepoints, %u",
2435            ConcGCThreads, nworkers);
2436   }
2437 }
2438 #endif
2439 
2440 ShenandoahVerifier* ShenandoahHeap::verifier() {
2441   guarantee(ShenandoahVerify, "Should be enabled");
2442   assert (_verifier != nullptr, "sanity");
2443   return _verifier;
2444 }
2445 
2446 template<bool CONCURRENT>
2447 class ShenandoahUpdateHeapRefsTask : public WorkerTask {
2448 private:
2449   ShenandoahHeap* _heap;
2450   ShenandoahRegionIterator* _regions;
2451 public:
2452   explicit ShenandoahUpdateHeapRefsTask(ShenandoahRegionIterator* regions) :
2453     WorkerTask("Shenandoah Update References"),
2454     _heap(ShenandoahHeap::heap()),
2455     _regions(regions) {
2456   }
2457 
2458   void work(uint worker_id) {
2459     if (CONCURRENT) {
2460       ShenandoahConcurrentWorkerSession worker_session(worker_id);
2461       ShenandoahSuspendibleThreadSetJoiner stsj;
2462       do_work<ShenandoahConcUpdateRefsClosure>(worker_id);
2463     } else {
2464       ShenandoahParallelWorkerSession worker_session(worker_id);
2465       do_work<ShenandoahNonConcUpdateRefsClosure>(worker_id);
2466     }
2467   }
2468 
2469 private:
2470   template<class T>
2471   void do_work(uint worker_id) {
2472     if (CONCURRENT && (worker_id == 0)) {
2473       // We ask the first worker to replenish the Mutator free set by moving regions previously reserved to hold the
2474       // results of evacuation.  These reserves are no longer necessary because evacuation has completed.
2475       size_t cset_regions = _heap->collection_set()->count();
2476 
2477       // Now that evacuation is done, we can reassign any regions that had been reserved to hold the results of evacuation
2478       // to the mutator free set.  At the end of GC, we will have cset_regions newly evacuated fully empty regions from
2479       // which we will be able to replenish the Collector free set and the OldCollector free set in preparation for the
2480       // next GC cycle.
2481       _heap->free_set()->move_regions_from_collector_to_mutator(cset_regions);
2482     }
2483     // If !CONCURRENT, there's no value in expanding Mutator free set
2484     T cl;
2485     ShenandoahHeapRegion* r = _regions->next();
2486     while (r != nullptr) {
2487       HeapWord* update_watermark = r->get_update_watermark();
2488       assert (update_watermark >= r->bottom(), "sanity");
2489       if (r->is_active() && !r->is_cset()) {
2490         _heap->marked_object_oop_iterate(r, &cl, update_watermark);
2491       }
2492       if (_heap->check_cancelled_gc_and_yield(CONCURRENT)) {
2493         return;
2494       }
2495       r = _regions->next();
2496     }
2497   }
2498 };
2499 
2500 void ShenandoahHeap::update_heap_references(ShenandoahGeneration* generation, bool concurrent) {
2501   assert(generation->is_global(), "Should only get global generation here");
2502   assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
2503 
2504   if (concurrent) {
2505     ShenandoahUpdateHeapRefsTask<true> task(&_update_refs_iterator);
2506     workers()->run_task(&task);
2507   } else {
2508     ShenandoahUpdateHeapRefsTask<false> task(&_update_refs_iterator);
2509     workers()->run_task(&task);
2510   }
2511 }
2512 
2513 void ShenandoahHeap::update_heap_region_states(bool concurrent) {
2514   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2515   assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
2516 
2517   {
2518     ShenandoahGCPhase phase(concurrent ?
2519                             ShenandoahPhaseTimings::final_update_refs_update_region_states :
2520                             ShenandoahPhaseTimings::degen_gc_final_update_refs_update_region_states);
2521 
2522     final_update_refs_update_region_states();
2523 
2524     assert_pinned_region_status();
2525   }
2526 
2527   {
2528     ShenandoahGCPhase phase(concurrent ?
2529                             ShenandoahPhaseTimings::final_update_refs_trash_cset :
2530                             ShenandoahPhaseTimings::degen_gc_final_update_refs_trash_cset);
2531     trash_cset_regions();
2532   }
2533 }
2534 
2535 void ShenandoahHeap::final_update_refs_update_region_states() {
2536   ShenandoahSynchronizePinnedRegionStates cl;
2537   parallel_heap_region_iterate(&cl);
2538 }
2539 
2540 void ShenandoahHeap::rebuild_free_set_within_phase() {
2541   ShenandoahHeapLocker locker(lock());
2542   size_t young_trashed_regions, old_trashed_regions, first_old_region, last_old_region, old_region_count;
2543   _free_set->prepare_to_rebuild(young_trashed_regions, old_trashed_regions, first_old_region, last_old_region, old_region_count);
2544   // If there are no old regions, first_old_region will be greater than last_old_region
2545   assert((first_old_region > last_old_region) ||
2546          ((last_old_region + 1 - first_old_region >= old_region_count) &&
2547           get_region(first_old_region)->is_old() && get_region(last_old_region)->is_old()),
2548          "sanity: old_region_count: %zu, first_old_region: %zu, last_old_region: %zu",
2549          old_region_count, first_old_region, last_old_region);
2550 
2551   if (mode()->is_generational()) {
2552 #ifdef ASSERT
2553     if (ShenandoahVerify) {
2554       verifier()->verify_before_rebuilding_free_set();
2555     }
2556 #endif
2557 
2558     // The computation of bytes_of_allocation_runway_before_gc_trigger is quite conservative so consider all of this
2559     // available for transfer to old. Note that transfer of humongous regions does not impact available.
2560     ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::heap();
2561     size_t allocation_runway =
2562       gen_heap->young_generation()->heuristics()->bytes_of_allocation_runway_before_gc_trigger(young_trashed_regions);
2563     gen_heap->compute_old_generation_balance(allocation_runway, old_trashed_regions, young_trashed_regions);
2564   }
2565   // Rebuild free set based on adjusted generation sizes.
2566   _free_set->finish_rebuild(young_trashed_regions, old_trashed_regions, old_region_count);
2567 
2568   if (mode()->is_generational()) {
2569     ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::heap();
2570     ShenandoahOldGeneration* old_gen = gen_heap->old_generation();
2571     old_gen->heuristics()->evaluate_triggers(first_old_region, last_old_region, old_region_count, num_regions());
2572   }
2573 }
2574 
2575 void ShenandoahHeap::rebuild_free_set(bool concurrent) {
2576   ShenandoahGCPhase phase(concurrent ?
2577                           ShenandoahPhaseTimings::final_update_refs_rebuild_freeset :
2578                           ShenandoahPhaseTimings::degen_gc_final_update_refs_rebuild_freeset);
2579   rebuild_free_set_within_phase();
2580 }
2581 
2582 bool ShenandoahHeap::is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self) {
2583   size_t slice = r->index() / _bitmap_regions_per_slice;
2584 
2585   size_t regions_from = _bitmap_regions_per_slice * slice;
2586   size_t regions_to   = MIN2(num_regions(), _bitmap_regions_per_slice * (slice + 1));
2587   for (size_t g = regions_from; g < regions_to; g++) {
2588     assert (g / _bitmap_regions_per_slice == slice, "same slice");
2589     if (skip_self && g == r->index()) continue;
2590     if (get_region(g)->is_committed()) {
2591       return true;
2592     }
2593   }
2594   return false;
2595 }
2596 
2597 void ShenandoahHeap::commit_bitmap_slice(ShenandoahHeapRegion* r) {
2598   shenandoah_assert_heaplocked();
2599   assert(!is_bitmap_region_special(), "Not for special memory");
2600 
2601   if (is_bitmap_slice_committed(r, true)) {
2602     // Some other region from the group is already committed, meaning the bitmap
2603     // slice is already committed, we exit right away.
2604     return;
2605   }
2606 
2607   // Commit the bitmap slice:
2608   size_t slice = r->index() / _bitmap_regions_per_slice;
2609   size_t off = _bitmap_bytes_per_slice * slice;
2610   size_t len = _bitmap_bytes_per_slice;
2611   char* start = (char*) _bitmap_region.start() + off;
2612 
2613   os::commit_memory_or_exit(start, len, false, "Unable to commit bitmap slice");
2614 
2615   if (AlwaysPreTouch) {
2616     os::pretouch_memory(start, start + len, _pretouch_bitmap_page_size);
2617   }
2618 }
2619 
2620 void ShenandoahHeap::uncommit_bitmap_slice(ShenandoahHeapRegion *r) {
2621   shenandoah_assert_heaplocked();
2622   assert(!is_bitmap_region_special(), "Not for special memory");
2623 
2624   if (is_bitmap_slice_committed(r, true)) {
2625     // Some other region from the group is still committed, meaning the bitmap
2626     // slice should stay committed, exit right away.
2627     return;
2628   }
2629 
2630   // Uncommit the bitmap slice:
2631   size_t slice = r->index() / _bitmap_regions_per_slice;
2632   size_t off = _bitmap_bytes_per_slice * slice;
2633   size_t len = _bitmap_bytes_per_slice;
2634 
2635   char* addr = (char*) _bitmap_region.start() + off;
2636   os::uncommit_memory(addr, len);
2637 }
2638 
2639 void ShenandoahHeap::forbid_uncommit() {
2640   if (_uncommit_thread != nullptr) {
2641     _uncommit_thread->forbid_uncommit();
2642   }
2643 }
2644 
2645 void ShenandoahHeap::allow_uncommit() {
2646   if (_uncommit_thread != nullptr) {
2647     _uncommit_thread->allow_uncommit();
2648   }
2649 }
2650 
2651 #ifdef ASSERT
2652 bool ShenandoahHeap::is_uncommit_in_progress() {
2653   if (_uncommit_thread != nullptr) {
2654     return _uncommit_thread->is_uncommit_in_progress();
2655   }
2656   return false;
2657 }
2658 #endif
2659 
2660 void ShenandoahHeap::safepoint_synchronize_begin() {
2661   StackWatermarkSet::safepoint_synchronize_begin();
2662   SuspendibleThreadSet::synchronize();
2663 }
2664 
2665 void ShenandoahHeap::safepoint_synchronize_end() {
2666   SuspendibleThreadSet::desynchronize();
2667 }
2668 
2669 void ShenandoahHeap::try_inject_alloc_failure() {
2670   if (ShenandoahAllocFailureALot && !cancelled_gc() && ((os::random() % 1000) > 950)) {
2671     _inject_alloc_failure.set();
2672     os::naked_short_sleep(1);
2673     if (cancelled_gc()) {
2674       log_info(gc)("Allocation failure was successfully injected");
2675     }
2676   }
2677 }
2678 
2679 bool ShenandoahHeap::should_inject_alloc_failure() {
2680   return _inject_alloc_failure.is_set() && _inject_alloc_failure.try_unset();
2681 }
2682 
2683 void ShenandoahHeap::initialize_serviceability() {
2684   _memory_pool = new ShenandoahMemoryPool(this);
2685   _cycle_memory_manager.add_pool(_memory_pool);
2686   _stw_memory_manager.add_pool(_memory_pool);
2687 }
2688 
2689 GrowableArray<GCMemoryManager*> ShenandoahHeap::memory_managers() {
2690   GrowableArray<GCMemoryManager*> memory_managers(2);
2691   memory_managers.append(&_cycle_memory_manager);
2692   memory_managers.append(&_stw_memory_manager);
2693   return memory_managers;
2694 }
2695 
2696 GrowableArray<MemoryPool*> ShenandoahHeap::memory_pools() {
2697   GrowableArray<MemoryPool*> memory_pools(1);
2698   memory_pools.append(_memory_pool);
2699   return memory_pools;
2700 }
2701 
2702 MemoryUsage ShenandoahHeap::memory_usage() {
2703   assert(_initial_size <= ShenandoahHeap::heap()->max_capacity(), "sanity");
2704   assert(used() <= ShenandoahHeap::heap()->max_capacity(), "sanity");
2705   assert(committed() <= ShenandoahHeap::heap()->max_capacity(), "sanity");
2706   return MemoryUsage(_initial_size, used(), committed(), max_capacity());
2707 }
2708 
2709 ShenandoahRegionIterator::ShenandoahRegionIterator() :
2710   _heap(ShenandoahHeap::heap()),
2711   _index(0) {}
2712 
2713 ShenandoahRegionIterator::ShenandoahRegionIterator(ShenandoahHeap* heap) :
2714   _heap(heap),
2715   _index(0) {}
2716 
2717 void ShenandoahRegionIterator::reset() {
2718   _index.store_relaxed(0);
2719 }
2720 
2721 bool ShenandoahRegionIterator::has_next() const {
2722   return _index.load_relaxed() < _heap->num_regions();
2723 }
2724 
2725 ShenandoahLiveData* ShenandoahHeap::get_liveness_cache(uint worker_id) {
2726 #ifdef ASSERT
2727   assert(_liveness_cache != nullptr, "sanity");
2728   assert(worker_id < _max_workers, "sanity");
2729   for (uint i = 0; i < num_regions(); i++) {
2730     assert(_liveness_cache[worker_id][i] == 0, "liveness cache should be empty");
2731   }
2732 #endif
2733   return _liveness_cache[worker_id];
2734 }
2735 
2736 void ShenandoahHeap::flush_liveness_cache(uint worker_id) {
2737   assert(worker_id < _max_workers, "sanity");
2738   assert(_liveness_cache != nullptr, "sanity");
2739   ShenandoahLiveData* ld = _liveness_cache[worker_id];
2740   for (uint i = 0; i < num_regions(); i++) {
2741     ShenandoahLiveData live = ld[i];
2742     if (live > 0) {
2743       ShenandoahHeapRegion* r = get_region(i);
2744       r->increase_live_data_gc_words(live);
2745       ld[i] = 0;
2746     }
2747   }
2748 }
2749 
2750 bool ShenandoahHeap::requires_barriers(stackChunkOop obj) const {
2751   if (is_idle()) return false;
2752 
2753   // Objects allocated after marking start are implicitly alive, don't need any barriers during
2754   // marking phase.
2755   if (is_concurrent_mark_in_progress() &&
2756      !marking_context()->allocated_after_mark_start(obj)) {
2757     return true;
2758   }
2759 
2760   // Can not guarantee obj is deeply good.
2761   if (has_forwarded_objects()) {
2762     return true;
2763   }
2764 
2765   return false;
2766 }
2767 
2768 HeapWord* ShenandoahHeap::allocate_loaded_archive_space(size_t size) {
2769 #if INCLUDE_CDS_JAVA_HEAP
2770   // CDS wants a raw continuous memory range to load a bunch of objects itself.
2771   // This is an unusual request, since all requested regions should be regular, not humongous.
2772   //
2773   // CDS would guarantee no objects straddle multiple regions, as long as regions are as large
2774   // as MIN_GC_REGION_ALIGNMENT.
2775   guarantee(ShenandoahHeapRegion::region_size_bytes() >= AOTMappedHeapWriter::MIN_GC_REGION_ALIGNMENT, "Must be");
2776 
2777   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_cds(size);
2778   return allocate_memory(req);
2779 #else
2780   assert(false, "Archive heap loader should not be available, should not be here");
2781   return nullptr;
2782 #endif // INCLUDE_CDS_JAVA_HEAP
2783 }
2784 
2785 void ShenandoahHeap::complete_loaded_archive_space(MemRegion archive_space) {
2786   // Nothing to do here, except checking that heap looks fine.
2787 #ifdef ASSERT
2788   HeapWord* start = archive_space.start();
2789   HeapWord* end = archive_space.end();
2790 
2791   // No unclaimed space between the objects.
2792   // Objects are properly allocated in correct regions.
2793   HeapWord* cur = start;
2794   while (cur < end) {
2795     oop oop = cast_to_oop(cur);
2796     shenandoah_assert_in_correct_region(nullptr, oop);
2797     cur += oop->size();
2798   }
2799 
2800   // No unclaimed tail at the end of archive space.
2801   assert(cur == end,
2802          "Archive space should be fully used: " PTR_FORMAT " " PTR_FORMAT,
2803          p2i(cur), p2i(end));
2804 
2805   // All regions in contiguous space have good state.
2806   size_t begin_reg_idx = heap_region_index_containing(start);
2807   size_t end_reg_idx   = heap_region_index_containing(end);
2808 
2809   for (size_t idx = begin_reg_idx; idx <= end_reg_idx; idx++) {
2810     ShenandoahHeapRegion* r = get_region(idx);
2811     assert(r->is_regular(), "Must be regular");
2812     assert(r->is_young(), "Must be young");
2813     assert(idx == end_reg_idx || r->top() == r->end(),
2814            "All regions except the last one should be full: " PTR_FORMAT " " PTR_FORMAT,
2815            p2i(r->top()), p2i(r->end()));
2816     assert(idx != begin_reg_idx || r->bottom() == start,
2817            "Archive space start should be at the bottom of first region: " PTR_FORMAT " " PTR_FORMAT,
2818            p2i(r->bottom()), p2i(start));
2819     assert(idx != end_reg_idx || r->top() == end,
2820            "Archive space end should be at the top of last region: " PTR_FORMAT " " PTR_FORMAT,
2821            p2i(r->top()), p2i(end));
2822   }
2823 
2824 #endif
2825 }
2826 
2827 ShenandoahGeneration* ShenandoahHeap::generation_for(ShenandoahAffiliation affiliation) const {
2828   if (!mode()->is_generational()) {
2829     return global_generation();
2830   } else if (affiliation == YOUNG_GENERATION) {
2831     return young_generation();
2832   } else if (affiliation == OLD_GENERATION) {
2833     return old_generation();
2834   }
2835 
2836   ShouldNotReachHere();
2837   return nullptr;
2838 }
2839 
2840 void ShenandoahHeap::log_heap_status(const char* msg) const {
2841   if (mode()->is_generational()) {
2842     young_generation()->log_status(msg);
2843     old_generation()->log_status(msg);
2844   } else {
2845     global_generation()->log_status(msg);
2846   }
2847 }
2848 
2849 ShenandoahHeapLocker::ShenandoahHeapLocker(ShenandoahHeapLock* lock, bool allow_block_for_safepoint) : _lock(lock) {
2850 #ifdef ASSERT
2851   ShenandoahFreeSet* free_set = ShenandoahHeap::heap()->free_set();
2852   // free_set is nullptr only at pre-initialized state
2853   assert(free_set == nullptr || !free_set->rebuild_lock()->owned_by_self(), "Dead lock, can't acquire heap lock while holding free-set rebuild lock");
2854   assert(_lock != nullptr, "Must not");
2855 #endif
2856   _lock->lock(allow_block_for_safepoint);
2857 }