1 /*
   2  * Copyright (c) 2023, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2013, 2022, Red Hat, Inc. All rights reserved.
   4  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
   5  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   6  *
   7  * This code is free software; you can redistribute it and/or modify it
   8  * under the terms of the GNU General Public License version 2 only, as
   9  * published by the Free Software Foundation.
  10  *
  11  * This code is distributed in the hope that it will be useful, but WITHOUT
  12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  14  * version 2 for more details (a copy is included in the LICENSE file that
  15  * accompanied this code).
  16  *
  17  * You should have received a copy of the GNU General Public License version
  18  * 2 along with this work; if not, write to the Free Software Foundation,
  19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20  *
  21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22  * or visit www.oracle.com if you need additional information or have any
  23  * questions.
  24  *
  25  */
  26 
  27 
  28 #include "cds/archiveHeapWriter.hpp"
  29 #include "classfile/systemDictionary.hpp"
  30 
  31 #include "gc/shared/classUnloadingContext.hpp"
  32 #include "gc/shared/fullGCForwarding.hpp"
  33 #include "gc/shared/gcArguments.hpp"
  34 #include "gc/shared/gcTimer.hpp"
  35 #include "gc/shared/gcTraceTime.inline.hpp"
  36 #include "gc/shared/locationPrinter.inline.hpp"
  37 #include "gc/shared/memAllocator.hpp"
  38 #include "gc/shared/plab.hpp"
  39 #include "gc/shared/tlab_globals.hpp"
  40 
  41 #include "gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp"
  42 #include "gc/shenandoah/heuristics/shenandoahYoungHeuristics.hpp"
  43 #include "gc/shenandoah/shenandoahAllocRequest.hpp"
  44 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  45 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
  46 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  47 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  48 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
  49 #include "gc/shenandoah/shenandoahControlThread.hpp"
  50 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
  51 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  52 #include "gc/shenandoah/shenandoahGenerationalEvacuationTask.hpp"
  53 #include "gc/shenandoah/shenandoahGenerationalHeap.hpp"
  54 #include "gc/shenandoah/shenandoahGlobalGeneration.hpp"
  55 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  56 #include "gc/shenandoah/shenandoahHeapRegionClosures.hpp"
  57 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
  58 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  59 #include "gc/shenandoah/shenandoahInitLogger.hpp"
  60 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  61 #include "gc/shenandoah/shenandoahMemoryPool.hpp"
  62 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  63 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
  64 #include "gc/shenandoah/shenandoahPacer.inline.hpp"
  65 #include "gc/shenandoah/shenandoahPadding.hpp"
  66 #include "gc/shenandoah/shenandoahParallelCleaning.inline.hpp"
  67 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  68 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
  69 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
  70 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
  71 #include "gc/shenandoah/shenandoahSTWMark.hpp"
  72 #include "gc/shenandoah/shenandoahUncommitThread.hpp"
  73 #include "gc/shenandoah/shenandoahUtils.hpp"
  74 #include "gc/shenandoah/shenandoahVerifier.hpp"
  75 #include "gc/shenandoah/shenandoahVMOperations.hpp"
  76 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
  77 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  78 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
  79 #include "gc/shenandoah/mode/shenandoahGenerationalMode.hpp"
  80 #include "gc/shenandoah/mode/shenandoahPassiveMode.hpp"
  81 #include "gc/shenandoah/mode/shenandoahSATBMode.hpp"
  82 
  83 #if INCLUDE_JFR
  84 #include "gc/shenandoah/shenandoahJfrSupport.hpp"
  85 #endif
  86 
  87 #include "memory/allocation.hpp"
  88 #include "memory/allocation.hpp"
  89 #include "memory/classLoaderMetaspace.hpp"
  90 #include "memory/memoryReserver.hpp"
  91 #include "memory/metaspaceUtils.hpp"
  92 #include "memory/universe.hpp"
  93 #include "nmt/mallocTracker.hpp"
  94 #include "nmt/memTracker.hpp"
  95 #include "oops/compressedOops.inline.hpp"
  96 #include "prims/jvmtiTagMap.hpp"
  97 #include "runtime/atomic.hpp"
  98 #include "runtime/globals.hpp"
  99 #include "runtime/interfaceSupport.inline.hpp"
 100 #include "runtime/java.hpp"
 101 #include "runtime/orderAccess.hpp"
 102 #include "runtime/safepointMechanism.hpp"
 103 #include "runtime/stackWatermarkSet.hpp"
 104 #include "runtime/threads.hpp"
 105 #include "runtime/vmThread.hpp"
 106 #include "utilities/globalDefinitions.hpp"
 107 #include "utilities/events.hpp"
 108 #include "utilities/powerOfTwo.hpp"
 109 
 110 class ShenandoahPretouchHeapTask : public WorkerTask {
 111 private:
 112   ShenandoahRegionIterator _regions;
 113   const size_t _page_size;
 114 public:
 115   ShenandoahPretouchHeapTask(size_t page_size) :
 116     WorkerTask("Shenandoah Pretouch Heap"),
 117     _page_size(page_size) {}
 118 
 119   virtual void work(uint worker_id) {
 120     ShenandoahHeapRegion* r = _regions.next();
 121     while (r != nullptr) {
 122       if (r->is_committed()) {
 123         os::pretouch_memory(r->bottom(), r->end(), _page_size);
 124       }
 125       r = _regions.next();
 126     }
 127   }
 128 };
 129 
 130 class ShenandoahPretouchBitmapTask : public WorkerTask {
 131 private:
 132   ShenandoahRegionIterator _regions;
 133   char* _bitmap_base;
 134   const size_t _bitmap_size;
 135   const size_t _page_size;
 136 public:
 137   ShenandoahPretouchBitmapTask(char* bitmap_base, size_t bitmap_size, size_t page_size) :
 138     WorkerTask("Shenandoah Pretouch Bitmap"),
 139     _bitmap_base(bitmap_base),
 140     _bitmap_size(bitmap_size),
 141     _page_size(page_size) {}
 142 
 143   virtual void work(uint worker_id) {
 144     ShenandoahHeapRegion* r = _regions.next();
 145     while (r != nullptr) {
 146       size_t start = r->index()       * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
 147       size_t end   = (r->index() + 1) * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
 148       assert (end <= _bitmap_size, "end is sane: %zu < %zu", end, _bitmap_size);
 149 
 150       if (r->is_committed()) {
 151         os::pretouch_memory(_bitmap_base + start, _bitmap_base + end, _page_size);
 152       }
 153 
 154       r = _regions.next();
 155     }
 156   }
 157 };
 158 
 159 static ReservedSpace reserve(size_t size, size_t preferred_page_size) {
 160   // When a page size is given we don't want to mix large
 161   // and normal pages. If the size is not a multiple of the
 162   // page size it will be aligned up to achieve this.
 163   size_t alignment = os::vm_allocation_granularity();
 164   if (preferred_page_size != os::vm_page_size()) {
 165     alignment = MAX2(preferred_page_size, alignment);
 166     size = align_up(size, alignment);
 167   }
 168 
 169   const ReservedSpace reserved = MemoryReserver::reserve(size, alignment, preferred_page_size);
 170   if (!reserved.is_reserved()) {
 171     vm_exit_during_initialization("Could not reserve space");
 172   }
 173   return reserved;
 174 }
 175 
 176 jint ShenandoahHeap::initialize() {
 177   //
 178   // Figure out heap sizing
 179   //
 180 
 181   size_t init_byte_size = InitialHeapSize;
 182   size_t min_byte_size  = MinHeapSize;
 183   size_t max_byte_size  = MaxHeapSize;
 184   size_t heap_alignment = HeapAlignment;
 185 
 186   size_t reg_size_bytes = ShenandoahHeapRegion::region_size_bytes();
 187 
 188   Universe::check_alignment(max_byte_size,  reg_size_bytes, "Shenandoah heap");
 189   Universe::check_alignment(init_byte_size, reg_size_bytes, "Shenandoah heap");
 190 
 191   _num_regions = ShenandoahHeapRegion::region_count();
 192   assert(_num_regions == (max_byte_size / reg_size_bytes),
 193          "Regions should cover entire heap exactly: %zu != %zu/%zu",
 194          _num_regions, max_byte_size, reg_size_bytes);
 195 
 196   size_t num_committed_regions = init_byte_size / reg_size_bytes;
 197   num_committed_regions = MIN2(num_committed_regions, _num_regions);
 198   assert(num_committed_regions <= _num_regions, "sanity");
 199   _initial_size = num_committed_regions * reg_size_bytes;
 200 
 201   size_t num_min_regions = min_byte_size / reg_size_bytes;
 202   num_min_regions = MIN2(num_min_regions, _num_regions);
 203   assert(num_min_regions <= _num_regions, "sanity");
 204   _minimum_size = num_min_regions * reg_size_bytes;
 205 
 206   // Default to max heap size.
 207   _soft_max_size = _num_regions * reg_size_bytes;
 208 
 209   _committed = _initial_size;
 210 
 211   size_t heap_page_size   = UseLargePages ? os::large_page_size() : os::vm_page_size();
 212   size_t bitmap_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
 213   size_t region_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
 214 
 215   //
 216   // Reserve and commit memory for heap
 217   //
 218 
 219   ReservedHeapSpace heap_rs = Universe::reserve_heap(max_byte_size, heap_alignment);
 220   initialize_reserved_region(heap_rs);
 221   _heap_region = MemRegion((HeapWord*)heap_rs.base(), heap_rs.size() / HeapWordSize);
 222   _heap_region_special = heap_rs.special();
 223 
 224   assert((((size_t) base()) & ShenandoahHeapRegion::region_size_bytes_mask()) == 0,
 225          "Misaligned heap: " PTR_FORMAT, p2i(base()));
 226   os::trace_page_sizes_for_requested_size("Heap",
 227                                           max_byte_size, heap_alignment,
 228                                           heap_rs.base(),
 229                                           heap_rs.size(), heap_rs.page_size());
 230 
 231 #if SHENANDOAH_OPTIMIZED_MARKTASK
 232   // The optimized ShenandoahMarkTask takes some bits away from the full object bits.
 233   // Fail if we ever attempt to address more than we can.
 234   if ((uintptr_t)heap_rs.end() >= ShenandoahMarkTask::max_addressable()) {
 235     FormatBuffer<512> buf("Shenandoah reserved [" PTR_FORMAT ", " PTR_FORMAT") for the heap, \n"
 236                           "but max object address is " PTR_FORMAT ". Try to reduce heap size, or try other \n"
 237                           "VM options that allocate heap at lower addresses (HeapBaseMinAddress, AllocateHeapAt, etc).",
 238                 p2i(heap_rs.base()), p2i(heap_rs.end()), ShenandoahMarkTask::max_addressable());
 239     vm_exit_during_initialization("Fatal Error", buf);
 240   }
 241 #endif
 242 
 243   ReservedSpace sh_rs = heap_rs.first_part(max_byte_size);
 244   if (!_heap_region_special) {
 245     os::commit_memory_or_exit(sh_rs.base(), _initial_size, heap_alignment, false,
 246                               "Cannot commit heap memory");
 247   }
 248 
 249   BarrierSet::set_barrier_set(new ShenandoahBarrierSet(this, _heap_region));
 250 
 251   // Now we know the number of regions and heap sizes, initialize the heuristics.
 252   initialize_heuristics();
 253 
 254   assert(_heap_region.byte_size() == heap_rs.size(), "Need to know reserved size for card table");
 255 
 256   //
 257   // Worker threads must be initialized after the barrier is configured
 258   //
 259   _workers = new ShenandoahWorkerThreads("Shenandoah GC Threads", _max_workers);
 260   if (_workers == nullptr) {
 261     vm_exit_during_initialization("Failed necessary allocation.");
 262   } else {
 263     _workers->initialize_workers();
 264   }
 265 
 266   if (ParallelGCThreads > 1) {
 267     _safepoint_workers = new ShenandoahWorkerThreads("Safepoint Cleanup Thread", ParallelGCThreads);
 268     _safepoint_workers->initialize_workers();
 269   }
 270 
 271   //
 272   // Reserve and commit memory for bitmap(s)
 273   //
 274 
 275   size_t bitmap_size_orig = ShenandoahMarkBitMap::compute_size(heap_rs.size());
 276   _bitmap_size = align_up(bitmap_size_orig, bitmap_page_size);
 277 
 278   size_t bitmap_bytes_per_region = reg_size_bytes / ShenandoahMarkBitMap::heap_map_factor();
 279 
 280   guarantee(bitmap_bytes_per_region != 0,
 281             "Bitmap bytes per region should not be zero");
 282   guarantee(is_power_of_2(bitmap_bytes_per_region),
 283             "Bitmap bytes per region should be power of two: %zu", bitmap_bytes_per_region);
 284 
 285   if (bitmap_page_size > bitmap_bytes_per_region) {
 286     _bitmap_regions_per_slice = bitmap_page_size / bitmap_bytes_per_region;
 287     _bitmap_bytes_per_slice = bitmap_page_size;
 288   } else {
 289     _bitmap_regions_per_slice = 1;
 290     _bitmap_bytes_per_slice = bitmap_bytes_per_region;
 291   }
 292 
 293   guarantee(_bitmap_regions_per_slice >= 1,
 294             "Should have at least one region per slice: %zu",
 295             _bitmap_regions_per_slice);
 296 
 297   guarantee(((_bitmap_bytes_per_slice) % bitmap_page_size) == 0,
 298             "Bitmap slices should be page-granular: bps = %zu, page size = %zu",
 299             _bitmap_bytes_per_slice, bitmap_page_size);
 300 
 301   ReservedSpace bitmap = reserve(_bitmap_size, bitmap_page_size);
 302   os::trace_page_sizes_for_requested_size("Mark Bitmap",
 303                                           bitmap_size_orig, bitmap_page_size,
 304                                           bitmap.base(),
 305                                           bitmap.size(), bitmap.page_size());
 306   MemTracker::record_virtual_memory_tag(bitmap, mtGC);
 307   _bitmap_region = MemRegion((HeapWord*) bitmap.base(), bitmap.size() / HeapWordSize);
 308   _bitmap_region_special = bitmap.special();
 309 
 310   size_t bitmap_init_commit = _bitmap_bytes_per_slice *
 311     align_up(num_committed_regions, _bitmap_regions_per_slice) / _bitmap_regions_per_slice;
 312   bitmap_init_commit = MIN2(_bitmap_size, bitmap_init_commit);
 313   if (!_bitmap_region_special) {
 314     os::commit_memory_or_exit((char *) _bitmap_region.start(), bitmap_init_commit, bitmap_page_size, false,
 315                               "Cannot commit bitmap memory");
 316   }
 317 
 318   _marking_context = new ShenandoahMarkingContext(_heap_region, _bitmap_region, _num_regions);
 319 
 320   if (ShenandoahVerify) {
 321     ReservedSpace verify_bitmap = reserve(_bitmap_size, bitmap_page_size);
 322     os::trace_page_sizes_for_requested_size("Verify Bitmap",
 323                                             bitmap_size_orig, bitmap_page_size,
 324                                             verify_bitmap.base(),
 325                                             verify_bitmap.size(), verify_bitmap.page_size());
 326     if (!verify_bitmap.special()) {
 327       os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), bitmap_page_size, false,
 328                                 "Cannot commit verification bitmap memory");
 329     }
 330     MemTracker::record_virtual_memory_tag(verify_bitmap, mtGC);
 331     MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize);
 332     _verification_bit_map.initialize(_heap_region, verify_bitmap_region);
 333     _verifier = new ShenandoahVerifier(this, &_verification_bit_map);
 334   }
 335 
 336   // Reserve aux bitmap for use in object_iterate(). We don't commit it here.
 337   size_t aux_bitmap_page_size = bitmap_page_size;
 338 
 339   ReservedSpace aux_bitmap = reserve(_bitmap_size, aux_bitmap_page_size);
 340   os::trace_page_sizes_for_requested_size("Aux Bitmap",
 341                                           bitmap_size_orig, aux_bitmap_page_size,
 342                                           aux_bitmap.base(),
 343                                           aux_bitmap.size(), aux_bitmap.page_size());
 344   MemTracker::record_virtual_memory_tag(aux_bitmap, mtGC);
 345   _aux_bitmap_region = MemRegion((HeapWord*) aux_bitmap.base(), aux_bitmap.size() / HeapWordSize);
 346   _aux_bitmap_region_special = aux_bitmap.special();
 347   _aux_bit_map.initialize(_heap_region, _aux_bitmap_region);
 348 
 349   //
 350   // Create regions and region sets
 351   //
 352   size_t region_align = align_up(sizeof(ShenandoahHeapRegion), SHENANDOAH_CACHE_LINE_SIZE);
 353   size_t region_storage_size_orig = region_align * _num_regions;
 354   size_t region_storage_size = align_up(region_storage_size_orig,
 355                                         MAX2(region_page_size, os::vm_allocation_granularity()));
 356 
 357   ReservedSpace region_storage = reserve(region_storage_size, region_page_size);
 358   os::trace_page_sizes_for_requested_size("Region Storage",
 359                                           region_storage_size_orig, region_page_size,
 360                                           region_storage.base(),
 361                                           region_storage.size(), region_storage.page_size());
 362   MemTracker::record_virtual_memory_tag(region_storage, mtGC);
 363   if (!region_storage.special()) {
 364     os::commit_memory_or_exit(region_storage.base(), region_storage_size, region_page_size, false,
 365                               "Cannot commit region memory");
 366   }
 367 
 368   // Try to fit the collection set bitmap at lower addresses. This optimizes code generation for cset checks.
 369   // Go up until a sensible limit (subject to encoding constraints) and try to reserve the space there.
 370   // If not successful, bite a bullet and allocate at whatever address.
 371   {
 372     const size_t cset_align = MAX2<size_t>(os::vm_page_size(), os::vm_allocation_granularity());
 373     const size_t cset_size = align_up(((size_t) sh_rs.base() + sh_rs.size()) >> ShenandoahHeapRegion::region_size_bytes_shift(), cset_align);
 374     const size_t cset_page_size = os::vm_page_size();
 375 
 376     uintptr_t min = round_up_power_of_2(cset_align);
 377     uintptr_t max = (1u << 30u);
 378     ReservedSpace cset_rs;
 379 
 380     for (uintptr_t addr = min; addr <= max; addr <<= 1u) {
 381       char* req_addr = (char*)addr;
 382       assert(is_aligned(req_addr, cset_align), "Should be aligned");
 383       cset_rs = MemoryReserver::reserve(req_addr, cset_size, cset_align, cset_page_size);
 384       if (cset_rs.is_reserved()) {
 385         assert(cset_rs.base() == req_addr, "Allocated where requested: " PTR_FORMAT ", " PTR_FORMAT, p2i(cset_rs.base()), addr);
 386         _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
 387         break;
 388       }
 389     }
 390 
 391     if (_collection_set == nullptr) {
 392       cset_rs = MemoryReserver::reserve(cset_size, cset_align, os::vm_page_size());
 393       if (!cset_rs.is_reserved()) {
 394         vm_exit_during_initialization("Cannot reserve memory for collection set");
 395       }
 396 
 397       _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
 398     }
 399     os::trace_page_sizes_for_requested_size("Collection Set",
 400                                             cset_size, cset_page_size,
 401                                             cset_rs.base(),
 402                                             cset_rs.size(), cset_rs.page_size());
 403   }
 404 
 405   _regions = NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, _num_regions, mtGC);
 406   _affiliations = NEW_C_HEAP_ARRAY(uint8_t, _num_regions, mtGC);
 407   _free_set = new ShenandoahFreeSet(this, _num_regions);
 408 
 409   {
 410     ShenandoahHeapLocker locker(lock());
 411 
 412     for (size_t i = 0; i < _num_regions; i++) {
 413       HeapWord* start = (HeapWord*)sh_rs.base() + ShenandoahHeapRegion::region_size_words() * i;
 414       bool is_committed = i < num_committed_regions;
 415       void* loc = region_storage.base() + i * region_align;
 416 
 417       ShenandoahHeapRegion* r = new (loc) ShenandoahHeapRegion(start, i, is_committed);
 418       assert(is_aligned(r, SHENANDOAH_CACHE_LINE_SIZE), "Sanity");
 419 
 420       _marking_context->initialize_top_at_mark_start(r);
 421       _regions[i] = r;
 422       assert(!collection_set()->is_in(i), "New region should not be in collection set");
 423 
 424       _affiliations[i] = ShenandoahAffiliation::FREE;
 425     }
 426 
 427     // Initialize to complete
 428     _marking_context->mark_complete();
 429     size_t young_cset_regions, old_cset_regions;
 430 
 431     // We are initializing free set.  We ignore cset region tallies.
 432     size_t first_old, last_old, num_old;
 433     _free_set->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old, last_old, num_old);
 434     _free_set->finish_rebuild(young_cset_regions, old_cset_regions, num_old);
 435   }
 436 
 437   if (AlwaysPreTouch) {
 438     // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads,
 439     // before initialize() below zeroes it with initializing thread. For any given region,
 440     // we touch the region and the corresponding bitmaps from the same thread.
 441     ShenandoahPushWorkerScope scope(workers(), _max_workers, false);
 442 
 443     _pretouch_heap_page_size = heap_page_size;
 444     _pretouch_bitmap_page_size = bitmap_page_size;
 445 
 446     // OS memory managers may want to coalesce back-to-back pages. Make their jobs
 447     // simpler by pre-touching continuous spaces (heap and bitmap) separately.
 448 
 449     ShenandoahPretouchBitmapTask bcl(bitmap.base(), _bitmap_size, _pretouch_bitmap_page_size);
 450     _workers->run_task(&bcl);
 451 
 452     ShenandoahPretouchHeapTask hcl(_pretouch_heap_page_size);
 453     _workers->run_task(&hcl);
 454   }
 455 
 456   //
 457   // Initialize the rest of GC subsystems
 458   //
 459 
 460   _liveness_cache = NEW_C_HEAP_ARRAY(ShenandoahLiveData*, _max_workers, mtGC);
 461   for (uint worker = 0; worker < _max_workers; worker++) {
 462     _liveness_cache[worker] = NEW_C_HEAP_ARRAY(ShenandoahLiveData, _num_regions, mtGC);
 463     Copy::fill_to_bytes(_liveness_cache[worker], _num_regions * sizeof(ShenandoahLiveData));
 464   }
 465 
 466   // There should probably be Shenandoah-specific options for these,
 467   // just as there are G1-specific options.
 468   {
 469     ShenandoahSATBMarkQueueSet& satbqs = ShenandoahBarrierSet::satb_mark_queue_set();
 470     satbqs.set_process_completed_buffers_threshold(20); // G1SATBProcessCompletedThreshold
 471     satbqs.set_buffer_enqueue_threshold_percentage(60); // G1SATBBufferEnqueueingThresholdPercent
 472   }
 473 
 474   _monitoring_support = new ShenandoahMonitoringSupport(this);
 475   _phase_timings = new ShenandoahPhaseTimings(max_workers());
 476   ShenandoahCodeRoots::initialize();
 477 
 478   if (ShenandoahPacing) {
 479     _pacer = new ShenandoahPacer(this);
 480     _pacer->setup_for_idle();
 481   }
 482 
 483   initialize_controller();
 484 
 485   if (ShenandoahUncommit) {
 486     _uncommit_thread = new ShenandoahUncommitThread(this);
 487   }
 488 
 489   print_init_logger();
 490 
 491   FullGCForwarding::initialize(_heap_region);
 492 
 493   return JNI_OK;
 494 }
 495 
 496 void ShenandoahHeap::initialize_controller() {
 497   _control_thread = new ShenandoahControlThread();
 498 }
 499 
 500 void ShenandoahHeap::print_init_logger() const {
 501   ShenandoahInitLogger::print();
 502 }
 503 
 504 void ShenandoahHeap::initialize_mode() {
 505   if (ShenandoahGCMode != nullptr) {
 506     if (strcmp(ShenandoahGCMode, "satb") == 0) {
 507       _gc_mode = new ShenandoahSATBMode();
 508     } else if (strcmp(ShenandoahGCMode, "passive") == 0) {
 509       _gc_mode = new ShenandoahPassiveMode();
 510     } else if (strcmp(ShenandoahGCMode, "generational") == 0) {
 511       _gc_mode = new ShenandoahGenerationalMode();
 512     } else {
 513       vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option");
 514     }
 515   } else {
 516     vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option (null)");
 517   }
 518   _gc_mode->initialize_flags();
 519   if (_gc_mode->is_diagnostic() && !UnlockDiagnosticVMOptions) {
 520     vm_exit_during_initialization(
 521             err_msg("GC mode \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
 522                     _gc_mode->name()));
 523   }
 524   if (_gc_mode->is_experimental() && !UnlockExperimentalVMOptions) {
 525     vm_exit_during_initialization(
 526             err_msg("GC mode \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
 527                     _gc_mode->name()));
 528   }
 529 }
 530 
 531 void ShenandoahHeap::initialize_heuristics() {
 532   _global_generation = new ShenandoahGlobalGeneration(mode()->is_generational(), max_workers(), max_capacity(), max_capacity());
 533   _global_generation->initialize_heuristics(mode());
 534 }
 535 
 536 #ifdef _MSC_VER
 537 #pragma warning( push )
 538 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
 539 #endif
 540 
 541 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
 542   CollectedHeap(),
 543   _gc_generation(nullptr),
 544   _active_generation(nullptr),
 545   _initial_size(0),
 546   _committed(0),
 547   _max_workers(MAX3(ConcGCThreads, ParallelGCThreads, 1U)),
 548   _workers(nullptr),
 549   _safepoint_workers(nullptr),
 550   _heap_region_special(false),
 551   _num_regions(0),
 552   _regions(nullptr),
 553   _affiliations(nullptr),
 554   _gc_state_changed(false),
 555   _gc_no_progress_count(0),
 556   _cancel_requested_time(0),
 557   _update_refs_iterator(this),
 558   _global_generation(nullptr),
 559   _control_thread(nullptr),
 560   _uncommit_thread(nullptr),
 561   _young_generation(nullptr),
 562   _old_generation(nullptr),
 563   _shenandoah_policy(policy),
 564   _gc_mode(nullptr),
 565   _free_set(nullptr),
 566   _pacer(nullptr),
 567   _verifier(nullptr),
 568   _phase_timings(nullptr),
 569   _monitoring_support(nullptr),
 570   _memory_pool(nullptr),
 571   _stw_memory_manager("Shenandoah Pauses"),
 572   _cycle_memory_manager("Shenandoah Cycles"),
 573   _gc_timer(new ConcurrentGCTimer()),
 574   _log_min_obj_alignment_in_bytes(LogMinObjAlignmentInBytes),
 575   _marking_context(nullptr),
 576   _bitmap_size(0),
 577   _bitmap_regions_per_slice(0),
 578   _bitmap_bytes_per_slice(0),
 579   _bitmap_region_special(false),
 580   _aux_bitmap_region_special(false),
 581   _liveness_cache(nullptr),
 582   _collection_set(nullptr)
 583 {
 584   // Initialize GC mode early, many subsequent initialization procedures depend on it
 585   initialize_mode();
 586   _cancelled_gc.set(GCCause::_no_gc);
 587 }
 588 
 589 #ifdef _MSC_VER
 590 #pragma warning( pop )
 591 #endif
 592 
 593 void ShenandoahHeap::print_on(outputStream* st) const {
 594   st->print_cr("Shenandoah Heap");
 595   st->print_cr(" %zu%s max, %zu%s soft max, %zu%s committed, %zu%s used",
 596                byte_size_in_proper_unit(max_capacity()), proper_unit_for_byte_size(max_capacity()),
 597                byte_size_in_proper_unit(soft_max_capacity()), proper_unit_for_byte_size(soft_max_capacity()),
 598                byte_size_in_proper_unit(committed()),    proper_unit_for_byte_size(committed()),
 599                byte_size_in_proper_unit(used()),         proper_unit_for_byte_size(used()));
 600   st->print_cr(" %zu x %zu %s regions",
 601                num_regions(),
 602                byte_size_in_proper_unit(ShenandoahHeapRegion::region_size_bytes()),
 603                proper_unit_for_byte_size(ShenandoahHeapRegion::region_size_bytes()));
 604 
 605   st->print("Status: ");
 606   if (has_forwarded_objects())                 st->print("has forwarded objects, ");
 607   if (!mode()->is_generational()) {
 608     if (is_concurrent_mark_in_progress())      st->print("marking,");
 609   } else {
 610     if (is_concurrent_old_mark_in_progress())    st->print("old marking, ");
 611     if (is_concurrent_young_mark_in_progress())  st->print("young marking, ");
 612   }
 613   if (is_evacuation_in_progress())             st->print("evacuating, ");
 614   if (is_update_refs_in_progress())            st->print("updating refs, ");
 615   if (is_degenerated_gc_in_progress())         st->print("degenerated gc, ");
 616   if (is_full_gc_in_progress())                st->print("full gc, ");
 617   if (is_full_gc_move_in_progress())           st->print("full gc move, ");
 618   if (is_concurrent_weak_root_in_progress())   st->print("concurrent weak roots, ");
 619   if (is_concurrent_strong_root_in_progress() &&
 620       !is_concurrent_weak_root_in_progress())  st->print("concurrent strong roots, ");
 621 
 622   if (cancelled_gc()) {
 623     st->print("cancelled");
 624   } else {
 625     st->print("not cancelled");
 626   }
 627   st->cr();
 628 
 629   st->print_cr("Reserved region:");
 630   st->print_cr(" - [" PTR_FORMAT ", " PTR_FORMAT ") ",
 631                p2i(reserved_region().start()),
 632                p2i(reserved_region().end()));
 633 
 634   ShenandoahCollectionSet* cset = collection_set();
 635   st->print_cr("Collection set:");
 636   if (cset != nullptr) {
 637     st->print_cr(" - map (vanilla): " PTR_FORMAT, p2i(cset->map_address()));
 638     st->print_cr(" - map (biased):  " PTR_FORMAT, p2i(cset->biased_map_address()));
 639   } else {
 640     st->print_cr(" (null)");
 641   }
 642 
 643   st->cr();
 644   MetaspaceUtils::print_on(st);
 645 
 646   if (Verbose) {
 647     st->cr();
 648     print_heap_regions_on(st);
 649   }
 650 }
 651 
 652 class ShenandoahInitWorkerGCLABClosure : public ThreadClosure {
 653 public:
 654   void do_thread(Thread* thread) {
 655     assert(thread != nullptr, "Sanity");
 656     ShenandoahThreadLocalData::initialize_gclab(thread);
 657   }
 658 };
 659 
 660 void ShenandoahHeap::post_initialize() {
 661   CollectedHeap::post_initialize();
 662 
 663   // Schedule periodic task to report on gc thread CPU utilization
 664   _mmu_tracker.initialize();
 665 
 666   MutexLocker ml(Threads_lock);
 667 
 668   ShenandoahInitWorkerGCLABClosure init_gclabs;
 669   _workers->threads_do(&init_gclabs);
 670 
 671   // gclab can not be initialized early during VM startup, as it can not determinate its max_size.
 672   // Now, we will let WorkerThreads to initialize gclab when new worker is created.
 673   _workers->set_initialize_gclab();
 674 
 675   // Note that the safepoint workers may require gclabs if the threads are used to create a heap dump
 676   // during a concurrent evacuation phase.
 677   if (_safepoint_workers != nullptr) {
 678     _safepoint_workers->threads_do(&init_gclabs);
 679     _safepoint_workers->set_initialize_gclab();
 680   }
 681 
 682   JFR_ONLY(ShenandoahJFRSupport::register_jfr_type_serializers();)
 683 }
 684 
 685 ShenandoahHeuristics* ShenandoahHeap::heuristics() {
 686   return _global_generation->heuristics();
 687 }
 688 
 689 size_t ShenandoahHeap::used() const {
 690   return global_generation()->used();
 691 }
 692 
 693 size_t ShenandoahHeap::committed() const {
 694   return Atomic::load(&_committed);
 695 }
 696 
 697 void ShenandoahHeap::increase_committed(size_t bytes) {
 698   shenandoah_assert_heaplocked_or_safepoint();
 699   _committed += bytes;
 700 }
 701 
 702 void ShenandoahHeap::decrease_committed(size_t bytes) {
 703   shenandoah_assert_heaplocked_or_safepoint();
 704   _committed -= bytes;
 705 }
 706 
 707 // For tracking usage based on allocations, it should be the case that:
 708 // * The sum of regions::used == heap::used
 709 // * The sum of a generation's regions::used == generation::used
 710 // * The sum of a generation's humongous regions::free == generation::humongous_waste
 711 // These invariants are checked by the verifier on GC safepoints.
 712 //
 713 // Additional notes:
 714 // * When a mutator's allocation request causes a region to be retired, the
 715 //   free memory left in that region is considered waste. It does not contribute
 716 //   to the usage, but it _does_ contribute to allocation rate.
 717 // * The bottom of a PLAB must be aligned on card size. In some cases this will
 718 //   require padding in front of the PLAB (a filler object). Because this padding
 719 //   is included in the region's used memory we include the padding in the usage
 720 //   accounting as waste.
 721 // * Mutator allocations are used to compute an allocation rate. They are also
 722 //   sent to the Pacer for those purposes.
 723 // * There are three sources of waste:
 724 //  1. The padding used to align a PLAB on card size
 725 //  2. Region's free is less than minimum TLAB size and is retired
 726 //  3. The unused portion of memory in the last region of a humongous object
 727 void ShenandoahHeap::increase_used(const ShenandoahAllocRequest& req) {
 728   size_t actual_bytes = req.actual_size() * HeapWordSize;
 729   size_t wasted_bytes = req.waste() * HeapWordSize;
 730   ShenandoahGeneration* generation = generation_for(req.affiliation());
 731 
 732   if (req.is_gc_alloc()) {
 733     assert(wasted_bytes == 0 || req.type() == ShenandoahAllocRequest::_alloc_plab, "Only PLABs have waste");
 734     increase_used(generation, actual_bytes + wasted_bytes);
 735   } else {
 736     assert(req.is_mutator_alloc(), "Expected mutator alloc here");
 737     // padding and actual size both count towards allocation counter
 738     generation->increase_allocated(actual_bytes + wasted_bytes);
 739 
 740     // only actual size counts toward usage for mutator allocations
 741     increase_used(generation, actual_bytes);
 742 
 743     // notify pacer of both actual size and waste
 744     notify_mutator_alloc_words(req.actual_size(), req.waste());
 745 
 746     if (wasted_bytes > 0 && ShenandoahHeapRegion::requires_humongous(req.actual_size())) {
 747       increase_humongous_waste(generation,wasted_bytes);
 748     }
 749   }
 750 }
 751 
 752 void ShenandoahHeap::increase_humongous_waste(ShenandoahGeneration* generation, size_t bytes) {
 753   generation->increase_humongous_waste(bytes);
 754   if (!generation->is_global()) {
 755     global_generation()->increase_humongous_waste(bytes);
 756   }
 757 }
 758 
 759 void ShenandoahHeap::decrease_humongous_waste(ShenandoahGeneration* generation, size_t bytes) {
 760   generation->decrease_humongous_waste(bytes);
 761   if (!generation->is_global()) {
 762     global_generation()->decrease_humongous_waste(bytes);
 763   }
 764 }
 765 
 766 void ShenandoahHeap::increase_used(ShenandoahGeneration* generation, size_t bytes) {
 767   generation->increase_used(bytes);
 768   if (!generation->is_global()) {
 769     global_generation()->increase_used(bytes);
 770   }
 771 }
 772 
 773 void ShenandoahHeap::decrease_used(ShenandoahGeneration* generation, size_t bytes) {
 774   generation->decrease_used(bytes);
 775   if (!generation->is_global()) {
 776     global_generation()->decrease_used(bytes);
 777   }
 778 }
 779 
 780 void ShenandoahHeap::notify_mutator_alloc_words(size_t words, size_t waste) {
 781   if (ShenandoahPacing) {
 782     control_thread()->pacing_notify_alloc(words);
 783     if (waste > 0) {
 784       pacer()->claim_for_alloc<true>(waste);
 785     }
 786   }
 787 }
 788 
 789 size_t ShenandoahHeap::capacity() const {
 790   return committed();
 791 }
 792 
 793 size_t ShenandoahHeap::max_capacity() const {
 794   return _num_regions * ShenandoahHeapRegion::region_size_bytes();
 795 }
 796 
 797 size_t ShenandoahHeap::soft_max_capacity() const {
 798   size_t v = Atomic::load(&_soft_max_size);
 799   assert(min_capacity() <= v && v <= max_capacity(),
 800          "Should be in bounds: %zu <= %zu <= %zu",
 801          min_capacity(), v, max_capacity());
 802   return v;
 803 }
 804 
 805 void ShenandoahHeap::set_soft_max_capacity(size_t v) {
 806   assert(min_capacity() <= v && v <= max_capacity(),
 807          "Should be in bounds: %zu <= %zu <= %zu",
 808          min_capacity(), v, max_capacity());
 809   Atomic::store(&_soft_max_size, v);
 810 }
 811 
 812 size_t ShenandoahHeap::min_capacity() const {
 813   return _minimum_size;
 814 }
 815 
 816 size_t ShenandoahHeap::initial_capacity() const {
 817   return _initial_size;
 818 }
 819 
 820 bool ShenandoahHeap::is_in(const void* p) const {
 821   if (!is_in_reserved(p)) {
 822     return false;
 823   }
 824 
 825   if (is_full_gc_move_in_progress()) {
 826     // Full GC move is running, we do not have a consistent region
 827     // information yet. But we know the pointer is in heap.
 828     return true;
 829   }
 830 
 831   // Now check if we point to a live section in active region.
 832   const ShenandoahHeapRegion* r = heap_region_containing(p);
 833   if (p >= r->top()) {
 834     return false;
 835   }
 836 
 837   if (r->is_active()) {
 838     return true;
 839   }
 840 
 841   // The region is trash, but won't be recycled until after concurrent weak
 842   // roots. We also don't allow mutators to allocate from trash regions
 843   // during weak roots. Concurrent class unloading may access unmarked oops
 844   // in trash regions.
 845   return r->is_trash() && is_concurrent_weak_root_in_progress();
 846 }
 847 
 848 void ShenandoahHeap::notify_soft_max_changed() {
 849   if (_uncommit_thread != nullptr) {
 850     _uncommit_thread->notify_soft_max_changed();
 851   }
 852 }
 853 
 854 void ShenandoahHeap::notify_explicit_gc_requested() {
 855   if (_uncommit_thread != nullptr) {
 856     _uncommit_thread->notify_explicit_gc_requested();
 857   }
 858 }
 859 
 860 bool ShenandoahHeap::check_soft_max_changed() {
 861   size_t new_soft_max = Atomic::load(&SoftMaxHeapSize);
 862   size_t old_soft_max = soft_max_capacity();
 863   if (new_soft_max != old_soft_max) {
 864     new_soft_max = MAX2(min_capacity(), new_soft_max);
 865     new_soft_max = MIN2(max_capacity(), new_soft_max);
 866     if (new_soft_max != old_soft_max) {
 867       log_info(gc)("Soft Max Heap Size: %zu%s -> %zu%s",
 868                    byte_size_in_proper_unit(old_soft_max), proper_unit_for_byte_size(old_soft_max),
 869                    byte_size_in_proper_unit(new_soft_max), proper_unit_for_byte_size(new_soft_max)
 870       );
 871       set_soft_max_capacity(new_soft_max);
 872       return true;
 873     }
 874   }
 875   return false;
 876 }
 877 
 878 void ShenandoahHeap::notify_heap_changed() {
 879   // Update monitoring counters when we took a new region. This amortizes the
 880   // update costs on slow path.
 881   monitoring_support()->notify_heap_changed();
 882   _heap_changed.try_set();
 883 }
 884 
 885 void ShenandoahHeap::set_forced_counters_update(bool value) {
 886   monitoring_support()->set_forced_counters_update(value);
 887 }
 888 
 889 void ShenandoahHeap::handle_force_counters_update() {
 890   monitoring_support()->handle_force_counters_update();
 891 }
 892 
 893 HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) {
 894   // New object should fit the GCLAB size
 895   size_t min_size = MAX2(size, PLAB::min_size());
 896 
 897   // Figure out size of new GCLAB, looking back at heuristics. Expand aggressively.
 898   size_t new_size = ShenandoahThreadLocalData::gclab_size(thread) * 2;
 899 
 900   new_size = MIN2(new_size, PLAB::max_size());
 901   new_size = MAX2(new_size, PLAB::min_size());
 902 
 903   // Record new heuristic value even if we take any shortcut. This captures
 904   // the case when moderately-sized objects always take a shortcut. At some point,
 905   // heuristics should catch up with them.
 906   log_debug(gc, free)("Set new GCLAB size: %zu", new_size);
 907   ShenandoahThreadLocalData::set_gclab_size(thread, new_size);
 908 
 909   if (new_size < size) {
 910     // New size still does not fit the object. Fall back to shared allocation.
 911     // This avoids retiring perfectly good GCLABs, when we encounter a large object.
 912     log_debug(gc, free)("New gclab size (%zu) is too small for %zu", new_size, size);
 913     return nullptr;
 914   }
 915 
 916   // Retire current GCLAB, and allocate a new one.
 917   PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
 918   gclab->retire();
 919 
 920   size_t actual_size = 0;
 921   HeapWord* gclab_buf = allocate_new_gclab(min_size, new_size, &actual_size);
 922   if (gclab_buf == nullptr) {
 923     return nullptr;
 924   }
 925 
 926   assert (size <= actual_size, "allocation should fit");
 927 
 928   // ...and clear or zap just allocated TLAB, if needed.
 929   if (ZeroTLAB) {
 930     Copy::zero_to_words(gclab_buf, actual_size);
 931   } else if (ZapTLAB) {
 932     // Skip mangling the space corresponding to the object header to
 933     // ensure that the returned space is not considered parsable by
 934     // any concurrent GC thread.
 935     size_t hdr_size = oopDesc::header_size();
 936     Copy::fill_to_words(gclab_buf + hdr_size, actual_size - hdr_size, badHeapWordVal);
 937   }
 938   gclab->set_buf(gclab_buf, actual_size);
 939   return gclab->allocate(size);
 940 }
 941 
 942 // Called from stubs in JIT code or interpreter
 943 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t min_size,
 944                                             size_t requested_size,
 945                                             size_t* actual_size) {
 946   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_tlab(min_size, requested_size);
 947   HeapWord* res = allocate_memory(req);
 948   if (res != nullptr) {
 949     *actual_size = req.actual_size();
 950   } else {
 951     *actual_size = 0;
 952   }
 953   return res;
 954 }
 955 
 956 HeapWord* ShenandoahHeap::allocate_new_gclab(size_t min_size,
 957                                              size_t word_size,
 958                                              size_t* actual_size) {
 959   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_gclab(min_size, word_size);
 960   HeapWord* res = allocate_memory(req);
 961   if (res != nullptr) {
 962     *actual_size = req.actual_size();
 963   } else {
 964     *actual_size = 0;
 965   }
 966   return res;
 967 }
 968 
 969 HeapWord* ShenandoahHeap::allocate_memory(ShenandoahAllocRequest& req) {
 970   intptr_t pacer_epoch = 0;
 971   bool in_new_region = false;
 972   HeapWord* result = nullptr;
 973 
 974   if (req.is_mutator_alloc()) {
 975     if (ShenandoahPacing) {
 976       pacer()->pace_for_alloc(req.size());
 977       pacer_epoch = pacer()->epoch();
 978     }
 979 
 980     if (!ShenandoahAllocFailureALot || !should_inject_alloc_failure()) {
 981       result = allocate_memory_under_lock(req, in_new_region);
 982     }
 983 
 984     // Check that gc overhead is not exceeded.
 985     //
 986     // Shenandoah will grind along for quite a while allocating one
 987     // object at a time using shared (non-tlab) allocations. This check
 988     // is testing that the GC overhead limit has not been exceeded.
 989     // This will notify the collector to start a cycle, but will raise
 990     // an OOME to the mutator if the last Full GCs have not made progress.
 991     // gc_no_progress_count is incremented following each degen or full GC that fails to achieve is_good_progress().
 992     if (result == nullptr && !req.is_lab_alloc() && get_gc_no_progress_count() > ShenandoahNoProgressThreshold) {
 993       control_thread()->handle_alloc_failure(req, false);
 994       req.set_actual_size(0);
 995       return nullptr;
 996     }
 997 
 998     if (result == nullptr) {
 999       // Block until control thread reacted, then retry allocation.
1000       //
1001       // It might happen that one of the threads requesting allocation would unblock
1002       // way later after GC happened, only to fail the second allocation, because
1003       // other threads have already depleted the free storage. In this case, a better
1004       // strategy is to try again, until at least one full GC has completed.
1005       //
1006       // Stop retrying and return nullptr to cause OOMError exception if our allocation failed even after:
1007       //   a) We experienced a GC that had good progress, or
1008       //   b) We experienced at least one Full GC (whether or not it had good progress)
1009 
1010       const size_t original_count = shenandoah_policy()->full_gc_count();
1011       while (result == nullptr && should_retry_allocation(original_count)) {
1012         control_thread()->handle_alloc_failure(req, true);
1013         result = allocate_memory_under_lock(req, in_new_region);
1014       }
1015       if (result != nullptr) {
1016         // If our allocation request has been satisfied after it initially failed, we count this as good gc progress
1017         notify_gc_progress();
1018       }
1019       if (log_develop_is_enabled(Debug, gc, alloc)) {
1020         ResourceMark rm;
1021         log_debug(gc, alloc)("Thread: %s, Result: " PTR_FORMAT ", Request: %s, Size: %zu"
1022                              ", Original: %zu, Latest: %zu",
1023                              Thread::current()->name(), p2i(result), req.type_string(), req.size(),
1024                              original_count, get_gc_no_progress_count());
1025       }
1026     }
1027   } else {
1028     assert(req.is_gc_alloc(), "Can only accept GC allocs here");
1029     result = allocate_memory_under_lock(req, in_new_region);
1030     // Do not call handle_alloc_failure() here, because we cannot block.
1031     // The allocation failure would be handled by the LRB slowpath with handle_alloc_failure_evac().
1032   }
1033 
1034   if (in_new_region) {
1035     notify_heap_changed();
1036   }
1037 
1038   if (result == nullptr) {
1039     req.set_actual_size(0);
1040   }
1041 
1042   // This is called regardless of the outcome of the allocation to account
1043   // for any waste created by retiring regions with this request.
1044   increase_used(req);
1045 
1046   if (result != nullptr) {
1047     size_t requested = req.size();
1048     size_t actual = req.actual_size();
1049 
1050     assert (req.is_lab_alloc() || (requested == actual),
1051             "Only LAB allocations are elastic: %s, requested = %zu, actual = %zu",
1052             ShenandoahAllocRequest::alloc_type_to_string(req.type()), requested, actual);
1053 
1054     if (req.is_mutator_alloc()) {
1055       // If we requested more than we were granted, give the rest back to pacer.
1056       // This only matters if we are in the same pacing epoch: do not try to unpace
1057       // over the budget for the other phase.
1058       if (ShenandoahPacing && (pacer_epoch > 0) && (requested > actual)) {
1059         pacer()->unpace_for_alloc(pacer_epoch, requested - actual);
1060       }
1061     }
1062   }
1063 
1064   return result;
1065 }
1066 
1067 inline bool ShenandoahHeap::should_retry_allocation(size_t original_full_gc_count) const {
1068   return shenandoah_policy()->full_gc_count() == original_full_gc_count
1069       && !shenandoah_policy()->is_at_shutdown();
1070 }
1071 
1072 HeapWord* ShenandoahHeap::allocate_memory_under_lock(ShenandoahAllocRequest& req, bool& in_new_region) {
1073   // If we are dealing with mutator allocation, then we may need to block for safepoint.
1074   // We cannot block for safepoint for GC allocations, because there is a high chance
1075   // we are already running at safepoint or from stack watermark machinery, and we cannot
1076   // block again.
1077   ShenandoahHeapLocker locker(lock(), req.is_mutator_alloc());
1078 
1079   // Make sure the old generation has room for either evacuations or promotions before trying to allocate.
1080   if (req.is_old() && !old_generation()->can_allocate(req)) {
1081     return nullptr;
1082   }
1083 
1084   // If TLAB request size is greater than available, allocate() will attempt to downsize request to fit within available
1085   // memory.
1086   HeapWord* result = _free_set->allocate(req, in_new_region);
1087 
1088   // Record the plab configuration for this result and register the object.
1089   if (result != nullptr && req.is_old()) {
1090     old_generation()->configure_plab_for_current_thread(req);
1091     if (req.type() == ShenandoahAllocRequest::_alloc_shared_gc) {
1092       // Register the newly allocated object while we're holding the global lock since there's no synchronization
1093       // built in to the implementation of register_object().  There are potential races when multiple independent
1094       // threads are allocating objects, some of which might span the same card region.  For example, consider
1095       // a card table's memory region within which three objects are being allocated by three different threads:
1096       //
1097       // objects being "concurrently" allocated:
1098       //    [-----a------][-----b-----][--------------c------------------]
1099       //            [---- card table memory range --------------]
1100       //
1101       // Before any objects are allocated, this card's memory range holds no objects.  Note that allocation of object a
1102       // wants to set the starts-object, first-start, and last-start attributes of the preceding card region.
1103       // Allocation of object b wants to set the starts-object, first-start, and last-start attributes of this card region.
1104       // Allocation of object c also wants to set the starts-object, first-start, and last-start attributes of this
1105       // card region.
1106       //
1107       // The thread allocating b and the thread allocating c can "race" in various ways, resulting in confusion, such as
1108       // last-start representing object b while first-start represents object c.  This is why we need to require all
1109       // register_object() invocations to be "mutually exclusive" with respect to each card's memory range.
1110       old_generation()->card_scan()->register_object(result);
1111     }
1112   }
1113 
1114   return result;
1115 }
1116 
1117 HeapWord* ShenandoahHeap::mem_allocate(size_t size,
1118                                         bool*  gc_overhead_limit_was_exceeded) {
1119   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared(size);
1120   return allocate_memory(req);
1121 }
1122 
1123 MetaWord* ShenandoahHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
1124                                                              size_t size,
1125                                                              Metaspace::MetadataType mdtype) {
1126   MetaWord* result;
1127 
1128   // Inform metaspace OOM to GC heuristics if class unloading is possible.
1129   ShenandoahHeuristics* h = global_generation()->heuristics();
1130   if (h->can_unload_classes()) {
1131     h->record_metaspace_oom();
1132   }
1133 
1134   // Expand and retry allocation
1135   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
1136   if (result != nullptr) {
1137     return result;
1138   }
1139 
1140   // Start full GC
1141   collect(GCCause::_metadata_GC_clear_soft_refs);
1142 
1143   // Retry allocation
1144   result = loader_data->metaspace_non_null()->allocate(size, mdtype);
1145   if (result != nullptr) {
1146     return result;
1147   }
1148 
1149   // Expand and retry allocation
1150   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
1151   if (result != nullptr) {
1152     return result;
1153   }
1154 
1155   // Out of memory
1156   return nullptr;
1157 }
1158 
1159 class ShenandoahConcurrentEvacuateRegionObjectClosure : public ObjectClosure {
1160 private:
1161   ShenandoahHeap* const _heap;
1162   Thread* const _thread;
1163 public:
1164   ShenandoahConcurrentEvacuateRegionObjectClosure(ShenandoahHeap* heap) :
1165     _heap(heap), _thread(Thread::current()) {}
1166 
1167   void do_object(oop p) {
1168     shenandoah_assert_marked(nullptr, p);
1169     if (!p->is_forwarded()) {
1170       _heap->evacuate_object(p, _thread);
1171     }
1172   }
1173 };
1174 
1175 class ShenandoahEvacuationTask : public WorkerTask {
1176 private:
1177   ShenandoahHeap* const _sh;
1178   ShenandoahCollectionSet* const _cs;
1179   bool _concurrent;
1180 public:
1181   ShenandoahEvacuationTask(ShenandoahHeap* sh,
1182                            ShenandoahCollectionSet* cs,
1183                            bool concurrent) :
1184     WorkerTask("Shenandoah Evacuation"),
1185     _sh(sh),
1186     _cs(cs),
1187     _concurrent(concurrent)
1188   {}
1189 
1190   void work(uint worker_id) {
1191     if (_concurrent) {
1192       ShenandoahConcurrentWorkerSession worker_session(worker_id);
1193       ShenandoahSuspendibleThreadSetJoiner stsj;
1194       ShenandoahEvacOOMScope oom_evac_scope;
1195       do_work();
1196     } else {
1197       ShenandoahParallelWorkerSession worker_session(worker_id);
1198       ShenandoahEvacOOMScope oom_evac_scope;
1199       do_work();
1200     }
1201   }
1202 
1203 private:
1204   void do_work() {
1205     ShenandoahConcurrentEvacuateRegionObjectClosure cl(_sh);
1206     ShenandoahHeapRegion* r;
1207     while ((r =_cs->claim_next()) != nullptr) {
1208       assert(r->has_live(), "Region %zu should have been reclaimed early", r->index());
1209       _sh->marked_object_iterate(r, &cl);
1210 
1211       if (ShenandoahPacing) {
1212         _sh->pacer()->report_evac(r->used() >> LogHeapWordSize);
1213       }
1214 
1215       if (_sh->check_cancelled_gc_and_yield(_concurrent)) {
1216         break;
1217       }
1218     }
1219   }
1220 };
1221 
1222 class ShenandoahRetireGCLABClosure : public ThreadClosure {
1223 private:
1224   bool const _resize;
1225 public:
1226   explicit ShenandoahRetireGCLABClosure(bool resize) : _resize(resize) {}
1227   void do_thread(Thread* thread) override {
1228     PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1229     assert(gclab != nullptr, "GCLAB should be initialized for %s", thread->name());
1230     gclab->retire();
1231     if (_resize && ShenandoahThreadLocalData::gclab_size(thread) > 0) {
1232       ShenandoahThreadLocalData::set_gclab_size(thread, 0);
1233     }
1234 
1235     if (ShenandoahHeap::heap()->mode()->is_generational()) {
1236       PLAB* plab = ShenandoahThreadLocalData::plab(thread);
1237       assert(plab != nullptr, "PLAB should be initialized for %s", thread->name());
1238 
1239       // There are two reasons to retire all plabs between old-gen evacuation passes.
1240       //  1. We need to make the plab memory parsable by remembered-set scanning.
1241       //  2. We need to establish a trustworthy UpdateWaterMark value within each old-gen heap region
1242       ShenandoahGenerationalHeap::heap()->retire_plab(plab, thread);
1243       if (_resize && ShenandoahThreadLocalData::plab_size(thread) > 0) {
1244         ShenandoahThreadLocalData::set_plab_size(thread, 0);
1245       }
1246     }
1247   }
1248 };
1249 
1250 class ShenandoahGCStatePropagator : public HandshakeClosure {
1251 public:
1252   explicit ShenandoahGCStatePropagator(char gc_state) :
1253     HandshakeClosure("Shenandoah GC State Change"),
1254     _gc_state(gc_state) {}
1255 
1256   void do_thread(Thread* thread) override {
1257     ShenandoahThreadLocalData::set_gc_state(thread, _gc_state);
1258   }
1259 private:
1260   char _gc_state;
1261 };
1262 
1263 class ShenandoahPrepareForUpdateRefs : public HandshakeClosure {
1264 public:
1265   explicit ShenandoahPrepareForUpdateRefs(char gc_state) :
1266     HandshakeClosure("Shenandoah Prepare for Update Refs"),
1267     _retire(ResizeTLAB), _propagator(gc_state) {}
1268 
1269   void do_thread(Thread* thread) override {
1270     _propagator.do_thread(thread);
1271     if (ShenandoahThreadLocalData::gclab(thread) != nullptr) {
1272       _retire.do_thread(thread);
1273     }
1274   }
1275 private:
1276   ShenandoahRetireGCLABClosure _retire;
1277   ShenandoahGCStatePropagator _propagator;
1278 };
1279 
1280 void ShenandoahHeap::evacuate_collection_set(bool concurrent) {
1281   ShenandoahEvacuationTask task(this, _collection_set, concurrent);
1282   workers()->run_task(&task);
1283 }
1284 
1285 void ShenandoahHeap::concurrent_prepare_for_update_refs() {
1286   {
1287     // Java threads take this lock while they are being attached and added to the list of thread.
1288     // If another thread holds this lock before we update the gc state, it will receive a stale
1289     // gc state, but they will have been added to the list of java threads and so will be corrected
1290     // by the following handshake.
1291     MutexLocker lock(Threads_lock);
1292 
1293     // A cancellation at this point means the degenerated cycle must resume from update-refs.
1294     set_gc_state_concurrent(EVACUATION, false);
1295     set_gc_state_concurrent(WEAK_ROOTS, false);
1296     set_gc_state_concurrent(UPDATE_REFS, true);
1297   }
1298 
1299   // This will propagate the gc state and retire gclabs and plabs for threads that require it.
1300   ShenandoahPrepareForUpdateRefs prepare_for_update_refs(_gc_state.raw_value());
1301 
1302   // The handshake won't touch worker threads (or control thread, or VM thread), so do those separately.
1303   Threads::non_java_threads_do(&prepare_for_update_refs);
1304 
1305   // Now retire gclabs and plabs and propagate gc_state for mutator threads
1306   Handshake::execute(&prepare_for_update_refs);
1307 
1308   _update_refs_iterator.reset();
1309 }
1310 
1311 class ShenandoahCompositeHandshakeClosure : public HandshakeClosure {
1312   HandshakeClosure* _handshake_1;
1313   HandshakeClosure* _handshake_2;
1314   public:
1315     ShenandoahCompositeHandshakeClosure(HandshakeClosure* handshake_1, HandshakeClosure* handshake_2) :
1316       HandshakeClosure(handshake_2->name()),
1317       _handshake_1(handshake_1), _handshake_2(handshake_2) {}
1318 
1319   void do_thread(Thread* thread) override {
1320       _handshake_1->do_thread(thread);
1321       _handshake_2->do_thread(thread);
1322     }
1323 };
1324 
1325 void ShenandoahHeap::concurrent_final_roots(HandshakeClosure* handshake_closure) {
1326   {
1327     assert(!is_evacuation_in_progress(), "Should not evacuate for abbreviated or old cycles");
1328     MutexLocker lock(Threads_lock);
1329     set_gc_state_concurrent(WEAK_ROOTS, false);
1330   }
1331 
1332   ShenandoahGCStatePropagator propagator(_gc_state.raw_value());
1333   Threads::non_java_threads_do(&propagator);
1334   if (handshake_closure == nullptr) {
1335     Handshake::execute(&propagator);
1336   } else {
1337     ShenandoahCompositeHandshakeClosure composite(&propagator, handshake_closure);
1338     Handshake::execute(&composite);
1339   }
1340 }
1341 
1342 oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) {
1343   assert(thread == Thread::current(), "Expected thread parameter to be current thread.");
1344   if (ShenandoahThreadLocalData::is_oom_during_evac(thread)) {
1345     // This thread went through the OOM during evac protocol. It is safe to return
1346     // the forward pointer. It must not attempt to evacuate any other objects.
1347     return ShenandoahBarrierSet::resolve_forwarded(p);
1348   }
1349 
1350   assert(ShenandoahThreadLocalData::is_evac_allowed(thread), "must be enclosed in oom-evac scope");
1351 
1352   ShenandoahHeapRegion* r = heap_region_containing(p);
1353   assert(!r->is_humongous(), "never evacuate humongous objects");
1354 
1355   ShenandoahAffiliation target_gen = r->affiliation();
1356   return try_evacuate_object(p, thread, r, target_gen);
1357 }
1358 
1359 oop ShenandoahHeap::try_evacuate_object(oop p, Thread* thread, ShenandoahHeapRegion* from_region,
1360                                                ShenandoahAffiliation target_gen) {
1361   assert(target_gen == YOUNG_GENERATION, "Only expect evacuations to young in this mode");
1362   assert(from_region->is_young(), "Only expect evacuations from young in this mode");
1363   bool alloc_from_lab = true;
1364   HeapWord* copy = nullptr;
1365 
1366   markWord mark = p->mark();
1367   if (ShenandoahForwarding::is_forwarded(mark)) {
1368     return ShenandoahForwarding::get_forwardee(p);
1369   }
1370   size_t old_size = ShenandoahForwarding::size(p);
1371   size_t size = p->copy_size(old_size, mark);
1372 
1373 #ifdef ASSERT
1374   if (ShenandoahOOMDuringEvacALot &&
1375       (os::random() & 1) == 0) { // Simulate OOM every ~2nd slow-path call
1376     copy = nullptr;
1377   } else {
1378 #endif
1379     if (UseTLAB) {
1380       copy = allocate_from_gclab(thread, size);
1381     }
1382     if (copy == nullptr) {
1383       // If we failed to allocate in LAB, we'll try a shared allocation.
1384       ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared_gc(size, target_gen);
1385       copy = allocate_memory(req);
1386       alloc_from_lab = false;
1387     }
1388 #ifdef ASSERT
1389   }
1390 #endif
1391 
1392   if (copy == nullptr) {
1393     control_thread()->handle_alloc_failure_evac(size);
1394 
1395     _oom_evac_handler.handle_out_of_memory_during_evacuation();
1396 
1397     return ShenandoahBarrierSet::resolve_forwarded(p);
1398   }
1399 
1400   // Copy the object:
1401   Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(p), copy, old_size);
1402 
1403   // Try to install the new forwarding pointer.
1404   oop copy_val = cast_to_oop(copy);
1405   oop result = ShenandoahForwarding::try_update_forwardee(p, copy_val);
1406   if (result == copy_val) {
1407     // Successfully evacuated. Our copy is now the public one!
1408     copy_val->initialize_hash_if_necessary(p);
1409     ContinuationGCSupport::relativize_stack_chunk(copy_val);
1410     shenandoah_assert_correct(nullptr, copy_val);
1411     return copy_val;
1412   }  else {
1413     // Failed to evacuate. We need to deal with the object that is left behind. Since this
1414     // new allocation is certainly after TAMS, it will be considered live in the next cycle.
1415     // But if it happens to contain references to evacuated regions, those references would
1416     // not get updated for this stale copy during this cycle, and we will crash while scanning
1417     // it the next cycle.
1418     if (alloc_from_lab) {
1419       // For LAB allocations, it is enough to rollback the allocation ptr. Either the next
1420       // object will overwrite this stale copy, or the filler object on LAB retirement will
1421       // do this.
1422       ShenandoahThreadLocalData::gclab(thread)->undo_allocation(copy, size);
1423     } else {
1424       // For non-LAB allocations, we have no way to retract the allocation, and
1425       // have to explicitly overwrite the copy with the filler object. With that overwrite,
1426       // we have to keep the fwdptr initialized and pointing to our (stale) copy.
1427       assert(size >= ShenandoahHeap::min_fill_size(), "previously allocated object known to be larger than min_size");
1428       fill_with_object(copy, size);
1429       shenandoah_assert_correct(nullptr, copy_val);
1430       // For non-LAB allocations, the object has already been registered
1431     }
1432     shenandoah_assert_correct(nullptr, result);
1433     return result;
1434   }
1435 }
1436 
1437 void ShenandoahHeap::trash_cset_regions() {
1438   ShenandoahHeapLocker locker(lock());
1439 
1440   ShenandoahCollectionSet* set = collection_set();
1441   ShenandoahHeapRegion* r;
1442   set->clear_current_index();
1443   while ((r = set->next()) != nullptr) {
1444     r->make_trash();
1445   }
1446   collection_set()->clear();
1447 }
1448 
1449 void ShenandoahHeap::print_heap_regions_on(outputStream* st) const {
1450   st->print_cr("Heap Regions:");
1451   st->print_cr("Region state: EU=empty-uncommitted, EC=empty-committed, R=regular, H=humongous start, HP=pinned humongous start");
1452   st->print_cr("              HC=humongous continuation, CS=collection set, TR=trash, P=pinned, CSP=pinned collection set");
1453   st->print_cr("BTE=bottom/top/end, TAMS=top-at-mark-start");
1454   st->print_cr("UWM=update watermark, U=used");
1455   st->print_cr("T=TLAB allocs, G=GCLAB allocs");
1456   st->print_cr("S=shared allocs, L=live data");
1457   st->print_cr("CP=critical pins");
1458 
1459   for (size_t i = 0; i < num_regions(); i++) {
1460     get_region(i)->print_on(st);
1461   }
1462 }
1463 
1464 size_t ShenandoahHeap::trash_humongous_region_at(ShenandoahHeapRegion* start) {
1465   assert(start->is_humongous_start(), "reclaim regions starting with the first one");
1466 
1467   oop humongous_obj = cast_to_oop(start->bottom());
1468   size_t size = humongous_obj->size();
1469   size_t required_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize);
1470   size_t index = start->index() + required_regions - 1;
1471 
1472   assert(!start->has_live(), "liveness must be zero");
1473 
1474   for(size_t i = 0; i < required_regions; i++) {
1475     // Reclaim from tail. Otherwise, assertion fails when printing region to trace log,
1476     // as it expects that every region belongs to a humongous region starting with a humongous start region.
1477     ShenandoahHeapRegion* region = get_region(index --);
1478 
1479     assert(region->is_humongous(), "expect correct humongous start or continuation");
1480     assert(!region->is_cset(), "Humongous region should not be in collection set");
1481 
1482     region->make_trash_immediate();
1483   }
1484   return required_regions;
1485 }
1486 
1487 class ShenandoahCheckCleanGCLABClosure : public ThreadClosure {
1488 public:
1489   ShenandoahCheckCleanGCLABClosure() {}
1490   void do_thread(Thread* thread) {
1491     PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1492     assert(gclab != nullptr, "GCLAB should be initialized for %s", thread->name());
1493     assert(gclab->words_remaining() == 0, "GCLAB should not need retirement");
1494 
1495     if (ShenandoahHeap::heap()->mode()->is_generational()) {
1496       PLAB* plab = ShenandoahThreadLocalData::plab(thread);
1497       assert(plab != nullptr, "PLAB should be initialized for %s", thread->name());
1498       assert(plab->words_remaining() == 0, "PLAB should not need retirement");
1499     }
1500   }
1501 };
1502 
1503 void ShenandoahHeap::labs_make_parsable() {
1504   assert(UseTLAB, "Only call with UseTLAB");
1505 
1506   ShenandoahRetireGCLABClosure cl(false);
1507 
1508   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1509     ThreadLocalAllocBuffer& tlab = t->tlab();
1510     tlab.make_parsable();
1511     cl.do_thread(t);
1512   }
1513 
1514   workers()->threads_do(&cl);
1515 
1516   if (safepoint_workers() != nullptr) {
1517     safepoint_workers()->threads_do(&cl);
1518   }
1519 }
1520 
1521 void ShenandoahHeap::tlabs_retire(bool resize) {
1522   assert(UseTLAB, "Only call with UseTLAB");
1523   assert(!resize || ResizeTLAB, "Only call for resize when ResizeTLAB is enabled");
1524 
1525   ThreadLocalAllocStats stats;
1526 
1527   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1528     ThreadLocalAllocBuffer& tlab = t->tlab();
1529     tlab.retire(&stats);
1530     if (resize) {
1531       tlab.resize();
1532     }
1533   }
1534 
1535   stats.publish();
1536 
1537 #ifdef ASSERT
1538   ShenandoahCheckCleanGCLABClosure cl;
1539   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1540     cl.do_thread(t);
1541   }
1542   workers()->threads_do(&cl);
1543 #endif
1544 }
1545 
1546 void ShenandoahHeap::gclabs_retire(bool resize) {
1547   assert(UseTLAB, "Only call with UseTLAB");
1548   assert(!resize || ResizeTLAB, "Only call for resize when ResizeTLAB is enabled");
1549 
1550   ShenandoahRetireGCLABClosure cl(resize);
1551   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1552     cl.do_thread(t);
1553   }
1554 
1555   workers()->threads_do(&cl);
1556 
1557   if (safepoint_workers() != nullptr) {
1558     safepoint_workers()->threads_do(&cl);
1559   }
1560 }
1561 
1562 // Returns size in bytes
1563 size_t ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const {
1564   // Return the max allowed size, and let the allocation path
1565   // figure out the safe size for current allocation.
1566   return ShenandoahHeapRegion::max_tlab_size_bytes();
1567 }
1568 
1569 size_t ShenandoahHeap::max_tlab_size() const {
1570   // Returns size in words
1571   return ShenandoahHeapRegion::max_tlab_size_words();
1572 }
1573 
1574 void ShenandoahHeap::collect_as_vm_thread(GCCause::Cause cause) {
1575   // These requests are ignored because we can't easily have Shenandoah jump into
1576   // a synchronous (degenerated or full) cycle while it is in the middle of a concurrent
1577   // cycle. We _could_ cancel the concurrent cycle and then try to run a cycle directly
1578   // on the VM thread, but this would confuse the control thread mightily and doesn't
1579   // seem worth the trouble. Instead, we will have the caller thread run (and wait for) a
1580   // concurrent cycle in the prologue of the heap inspect/dump operation. This is how
1581   // other concurrent collectors in the JVM handle this scenario as well.
1582   assert(Thread::current()->is_VM_thread(), "Should be the VM thread");
1583   guarantee(cause == GCCause::_heap_dump || cause == GCCause::_heap_inspection, "Invalid cause");
1584 }
1585 
1586 void ShenandoahHeap::collect(GCCause::Cause cause) {
1587   control_thread()->request_gc(cause);
1588 }
1589 
1590 void ShenandoahHeap::do_full_collection(bool clear_all_soft_refs) {
1591   //assert(false, "Shouldn't need to do full collections");
1592 }
1593 
1594 HeapWord* ShenandoahHeap::block_start(const void* addr) const {
1595   ShenandoahHeapRegion* r = heap_region_containing(addr);
1596   if (r != nullptr) {
1597     return r->block_start(addr);
1598   }
1599   return nullptr;
1600 }
1601 
1602 bool ShenandoahHeap::block_is_obj(const HeapWord* addr) const {
1603   ShenandoahHeapRegion* r = heap_region_containing(addr);
1604   return r->block_is_obj(addr);
1605 }
1606 
1607 bool ShenandoahHeap::print_location(outputStream* st, void* addr) const {
1608   return BlockLocationPrinter<ShenandoahHeap>::print_location(st, addr);
1609 }
1610 
1611 void ShenandoahHeap::prepare_for_verify() {
1612   if (SafepointSynchronize::is_at_safepoint() && UseTLAB) {
1613     labs_make_parsable();
1614   }
1615 }
1616 
1617 void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const {
1618   if (_shenandoah_policy->is_at_shutdown()) {
1619     return;
1620   }
1621 
1622   if (_control_thread != nullptr) {
1623     tcl->do_thread(_control_thread);
1624   }
1625 
1626   if (_uncommit_thread != nullptr) {
1627     tcl->do_thread(_uncommit_thread);
1628   }
1629 
1630   workers()->threads_do(tcl);
1631   if (_safepoint_workers != nullptr) {
1632     _safepoint_workers->threads_do(tcl);
1633   }
1634 }
1635 
1636 void ShenandoahHeap::print_tracing_info() const {
1637   LogTarget(Info, gc, stats) lt;
1638   if (lt.is_enabled()) {
1639     ResourceMark rm;
1640     LogStream ls(lt);
1641 
1642     phase_timings()->print_global_on(&ls);
1643 
1644     ls.cr();
1645     ls.cr();
1646 
1647     shenandoah_policy()->print_gc_stats(&ls);
1648 
1649     ls.cr();
1650     ls.cr();
1651   }
1652 }
1653 
1654 void ShenandoahHeap::set_gc_generation(ShenandoahGeneration* generation) {
1655   shenandoah_assert_control_or_vm_thread_at_safepoint();
1656   _gc_generation = generation;
1657 }
1658 
1659 // Active generation may only be set by the VM thread at a safepoint.
1660 void ShenandoahHeap::set_active_generation() {
1661   assert(Thread::current()->is_VM_thread(), "Only the VM Thread");
1662   assert(SafepointSynchronize::is_at_safepoint(), "Only at a safepoint!");
1663   assert(_gc_generation != nullptr, "Will set _active_generation to nullptr");
1664   _active_generation = _gc_generation;
1665 }
1666 
1667 void ShenandoahHeap::on_cycle_start(GCCause::Cause cause, ShenandoahGeneration* generation) {
1668   shenandoah_policy()->record_collection_cause(cause);
1669 
1670   const GCCause::Cause current = gc_cause();
1671   assert(current == GCCause::_no_gc, "Over-writing cause: %s, with: %s",
1672          GCCause::to_string(current), GCCause::to_string(cause));
1673   assert(_gc_generation == nullptr, "Over-writing _gc_generation");
1674 
1675   set_gc_cause(cause);
1676   set_gc_generation(generation);
1677 
1678   generation->heuristics()->record_cycle_start();
1679 }
1680 
1681 void ShenandoahHeap::on_cycle_end(ShenandoahGeneration* generation) {
1682   assert(gc_cause() != GCCause::_no_gc, "cause wasn't set");
1683   assert(_gc_generation != nullptr, "_gc_generation wasn't set");
1684 
1685   generation->heuristics()->record_cycle_end();
1686   if (mode()->is_generational() && generation->is_global()) {
1687     // If we just completed a GLOBAL GC, claim credit for completion of young-gen and old-gen GC as well
1688     young_generation()->heuristics()->record_cycle_end();
1689     old_generation()->heuristics()->record_cycle_end();
1690   }
1691 
1692   set_gc_generation(nullptr);
1693   set_gc_cause(GCCause::_no_gc);
1694 }
1695 
1696 void ShenandoahHeap::verify(VerifyOption vo) {
1697   if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
1698     if (ShenandoahVerify) {
1699       verifier()->verify_generic(vo);
1700     } else {
1701       // TODO: Consider allocating verification bitmaps on demand,
1702       // and turn this on unconditionally.
1703     }
1704   }
1705 }
1706 size_t ShenandoahHeap::tlab_capacity(Thread *thr) const {
1707   return _free_set->capacity();
1708 }
1709 
1710 class ObjectIterateScanRootClosure : public BasicOopIterateClosure {
1711 private:
1712   MarkBitMap* _bitmap;
1713   ShenandoahScanObjectStack* _oop_stack;
1714   ShenandoahHeap* const _heap;
1715   ShenandoahMarkingContext* const _marking_context;
1716 
1717   template <class T>
1718   void do_oop_work(T* p) {
1719     T o = RawAccess<>::oop_load(p);
1720     if (!CompressedOops::is_null(o)) {
1721       oop obj = CompressedOops::decode_not_null(o);
1722       if (_heap->is_concurrent_weak_root_in_progress() && !_marking_context->is_marked(obj)) {
1723         // There may be dead oops in weak roots in concurrent root phase, do not touch them.
1724         return;
1725       }
1726       obj = ShenandoahBarrierSet::barrier_set()->load_reference_barrier(obj);
1727 
1728       assert(oopDesc::is_oop(obj), "must be a valid oop");
1729       if (!_bitmap->is_marked(obj)) {
1730         _bitmap->mark(obj);
1731         _oop_stack->push(obj);
1732       }
1733     }
1734   }
1735 public:
1736   ObjectIterateScanRootClosure(MarkBitMap* bitmap, ShenandoahScanObjectStack* oop_stack) :
1737     _bitmap(bitmap), _oop_stack(oop_stack), _heap(ShenandoahHeap::heap()),
1738     _marking_context(_heap->marking_context()) {}
1739   void do_oop(oop* p)       { do_oop_work(p); }
1740   void do_oop(narrowOop* p) { do_oop_work(p); }
1741 };
1742 
1743 /*
1744  * This is public API, used in preparation of object_iterate().
1745  * Since we don't do linear scan of heap in object_iterate() (see comment below), we don't
1746  * need to make the heap parsable. For Shenandoah-internal linear heap scans that we can
1747  * control, we call SH::tlabs_retire, SH::gclabs_retire.
1748  */
1749 void ShenandoahHeap::ensure_parsability(bool retire_tlabs) {
1750   // No-op.
1751 }
1752 
1753 /*
1754  * Iterates objects in the heap. This is public API, used for, e.g., heap dumping.
1755  *
1756  * We cannot safely iterate objects by doing a linear scan at random points in time. Linear
1757  * scanning needs to deal with dead objects, which may have dead Klass* pointers (e.g.
1758  * calling oopDesc::size() would crash) or dangling reference fields (crashes) etc. Linear
1759  * scanning therefore depends on having a valid marking bitmap to support it. However, we only
1760  * have a valid marking bitmap after successful marking. In particular, we *don't* have a valid
1761  * marking bitmap during marking, after aborted marking or during/after cleanup (when we just
1762  * wiped the bitmap in preparation for next marking).
1763  *
1764  * For all those reasons, we implement object iteration as a single marking traversal, reporting
1765  * objects as we mark+traverse through the heap, starting from GC roots. JVMTI IterateThroughHeap
1766  * is allowed to report dead objects, but is not required to do so.
1767  */
1768 void ShenandoahHeap::object_iterate(ObjectClosure* cl) {
1769   // Reset bitmap
1770   if (!prepare_aux_bitmap_for_iteration())
1771     return;
1772 
1773   ShenandoahScanObjectStack oop_stack;
1774   ObjectIterateScanRootClosure oops(&_aux_bit_map, &oop_stack);
1775   // Seed the stack with root scan
1776   scan_roots_for_iteration(&oop_stack, &oops);
1777 
1778   // Work through the oop stack to traverse heap
1779   while (! oop_stack.is_empty()) {
1780     oop obj = oop_stack.pop();
1781     assert(oopDesc::is_oop(obj), "must be a valid oop");
1782     cl->do_object(obj);
1783     obj->oop_iterate(&oops);
1784   }
1785 
1786   assert(oop_stack.is_empty(), "should be empty");
1787   // Reclaim bitmap
1788   reclaim_aux_bitmap_for_iteration();
1789 }
1790 
1791 bool ShenandoahHeap::prepare_aux_bitmap_for_iteration() {
1792   assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1793 
1794   if (!_aux_bitmap_region_special && !os::commit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size(), false)) {
1795     log_warning(gc)("Could not commit native memory for auxiliary marking bitmap for heap iteration");
1796     return false;
1797   }
1798   // Reset bitmap
1799   _aux_bit_map.clear();
1800   return true;
1801 }
1802 
1803 void ShenandoahHeap::scan_roots_for_iteration(ShenandoahScanObjectStack* oop_stack, ObjectIterateScanRootClosure* oops) {
1804   // Process GC roots according to current GC cycle
1805   // This populates the work stack with initial objects
1806   // It is important to relinquish the associated locks before diving
1807   // into heap dumper
1808   uint n_workers = safepoint_workers() != nullptr ? safepoint_workers()->active_workers() : 1;
1809   ShenandoahHeapIterationRootScanner rp(n_workers);
1810   rp.roots_do(oops);
1811 }
1812 
1813 void ShenandoahHeap::reclaim_aux_bitmap_for_iteration() {
1814   if (!_aux_bitmap_region_special && !os::uncommit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size())) {
1815     log_warning(gc)("Could not uncommit native memory for auxiliary marking bitmap for heap iteration");
1816   }
1817 }
1818 
1819 // Closure for parallelly iterate objects
1820 class ShenandoahObjectIterateParScanClosure : public BasicOopIterateClosure {
1821 private:
1822   MarkBitMap* _bitmap;
1823   ShenandoahObjToScanQueue* _queue;
1824   ShenandoahHeap* const _heap;
1825   ShenandoahMarkingContext* const _marking_context;
1826 
1827   template <class T>
1828   void do_oop_work(T* p) {
1829     T o = RawAccess<>::oop_load(p);
1830     if (!CompressedOops::is_null(o)) {
1831       oop obj = CompressedOops::decode_not_null(o);
1832       if (_heap->is_concurrent_weak_root_in_progress() && !_marking_context->is_marked(obj)) {
1833         // There may be dead oops in weak roots in concurrent root phase, do not touch them.
1834         return;
1835       }
1836       obj = ShenandoahBarrierSet::barrier_set()->load_reference_barrier(obj);
1837 
1838       assert(oopDesc::is_oop(obj), "Must be a valid oop");
1839       if (_bitmap->par_mark(obj)) {
1840         _queue->push(ShenandoahMarkTask(obj));
1841       }
1842     }
1843   }
1844 public:
1845   ShenandoahObjectIterateParScanClosure(MarkBitMap* bitmap, ShenandoahObjToScanQueue* q) :
1846     _bitmap(bitmap), _queue(q), _heap(ShenandoahHeap::heap()),
1847     _marking_context(_heap->marking_context()) {}
1848   void do_oop(oop* p)       { do_oop_work(p); }
1849   void do_oop(narrowOop* p) { do_oop_work(p); }
1850 };
1851 
1852 // Object iterator for parallel heap iteraion.
1853 // The root scanning phase happenes in construction as a preparation of
1854 // parallel marking queues.
1855 // Every worker processes it's own marking queue. work-stealing is used
1856 // to balance workload.
1857 class ShenandoahParallelObjectIterator : public ParallelObjectIteratorImpl {
1858 private:
1859   uint                         _num_workers;
1860   bool                         _init_ready;
1861   MarkBitMap*                  _aux_bit_map;
1862   ShenandoahHeap*              _heap;
1863   ShenandoahScanObjectStack    _roots_stack; // global roots stack
1864   ShenandoahObjToScanQueueSet* _task_queues;
1865 public:
1866   ShenandoahParallelObjectIterator(uint num_workers, MarkBitMap* bitmap) :
1867         _num_workers(num_workers),
1868         _init_ready(false),
1869         _aux_bit_map(bitmap),
1870         _heap(ShenandoahHeap::heap()) {
1871     // Initialize bitmap
1872     _init_ready = _heap->prepare_aux_bitmap_for_iteration();
1873     if (!_init_ready) {
1874       return;
1875     }
1876 
1877     ObjectIterateScanRootClosure oops(_aux_bit_map, &_roots_stack);
1878     _heap->scan_roots_for_iteration(&_roots_stack, &oops);
1879 
1880     _init_ready = prepare_worker_queues();
1881   }
1882 
1883   ~ShenandoahParallelObjectIterator() {
1884     // Reclaim bitmap
1885     _heap->reclaim_aux_bitmap_for_iteration();
1886     // Reclaim queue for workers
1887     if (_task_queues!= nullptr) {
1888       for (uint i = 0; i < _num_workers; ++i) {
1889         ShenandoahObjToScanQueue* q = _task_queues->queue(i);
1890         if (q != nullptr) {
1891           delete q;
1892           _task_queues->register_queue(i, nullptr);
1893         }
1894       }
1895       delete _task_queues;
1896       _task_queues = nullptr;
1897     }
1898   }
1899 
1900   virtual void object_iterate(ObjectClosure* cl, uint worker_id) {
1901     if (_init_ready) {
1902       object_iterate_parallel(cl, worker_id, _task_queues);
1903     }
1904   }
1905 
1906 private:
1907   // Divide global root_stack into worker queues
1908   bool prepare_worker_queues() {
1909     _task_queues = new ShenandoahObjToScanQueueSet((int) _num_workers);
1910     // Initialize queues for every workers
1911     for (uint i = 0; i < _num_workers; ++i) {
1912       ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue();
1913       _task_queues->register_queue(i, task_queue);
1914     }
1915     // Divide roots among the workers. Assume that object referencing distribution
1916     // is related with root kind, use round-robin to make every worker have same chance
1917     // to process every kind of roots
1918     size_t roots_num = _roots_stack.size();
1919     if (roots_num == 0) {
1920       // No work to do
1921       return false;
1922     }
1923 
1924     for (uint j = 0; j < roots_num; j++) {
1925       uint stack_id = j % _num_workers;
1926       oop obj = _roots_stack.pop();
1927       _task_queues->queue(stack_id)->push(ShenandoahMarkTask(obj));
1928     }
1929     return true;
1930   }
1931 
1932   void object_iterate_parallel(ObjectClosure* cl,
1933                                uint worker_id,
1934                                ShenandoahObjToScanQueueSet* queue_set) {
1935     assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1936     assert(queue_set != nullptr, "task queue must not be null");
1937 
1938     ShenandoahObjToScanQueue* q = queue_set->queue(worker_id);
1939     assert(q != nullptr, "object iterate queue must not be null");
1940 
1941     ShenandoahMarkTask t;
1942     ShenandoahObjectIterateParScanClosure oops(_aux_bit_map, q);
1943 
1944     // Work through the queue to traverse heap.
1945     // Steal when there is no task in queue.
1946     while (q->pop(t) || queue_set->steal(worker_id, t)) {
1947       oop obj = t.obj();
1948       assert(oopDesc::is_oop(obj), "must be a valid oop");
1949       cl->do_object(obj);
1950       obj->oop_iterate(&oops);
1951     }
1952     assert(q->is_empty(), "should be empty");
1953   }
1954 };
1955 
1956 ParallelObjectIteratorImpl* ShenandoahHeap::parallel_object_iterator(uint workers) {
1957   return new ShenandoahParallelObjectIterator(workers, &_aux_bit_map);
1958 }
1959 
1960 // Keep alive an object that was loaded with AS_NO_KEEPALIVE.
1961 void ShenandoahHeap::keep_alive(oop obj) {
1962   if (is_concurrent_mark_in_progress() && (obj != nullptr)) {
1963     ShenandoahBarrierSet::barrier_set()->enqueue(obj);
1964   }
1965 }
1966 
1967 void ShenandoahHeap::heap_region_iterate(ShenandoahHeapRegionClosure* blk) const {
1968   for (size_t i = 0; i < num_regions(); i++) {
1969     ShenandoahHeapRegion* current = get_region(i);
1970     blk->heap_region_do(current);
1971   }
1972 }
1973 
1974 class ShenandoahParallelHeapRegionTask : public WorkerTask {
1975 private:
1976   ShenandoahHeap* const _heap;
1977   ShenandoahHeapRegionClosure* const _blk;
1978   size_t const _stride;
1979 
1980   shenandoah_padding(0);
1981   volatile size_t _index;
1982   shenandoah_padding(1);
1983 
1984 public:
1985   ShenandoahParallelHeapRegionTask(ShenandoahHeapRegionClosure* blk, size_t stride) :
1986           WorkerTask("Shenandoah Parallel Region Operation"),
1987           _heap(ShenandoahHeap::heap()), _blk(blk), _stride(stride), _index(0) {}
1988 
1989   void work(uint worker_id) {
1990     ShenandoahParallelWorkerSession worker_session(worker_id);
1991     size_t stride = _stride;
1992 
1993     size_t max = _heap->num_regions();
1994     while (Atomic::load(&_index) < max) {
1995       size_t cur = Atomic::fetch_then_add(&_index, stride, memory_order_relaxed);
1996       size_t start = cur;
1997       size_t end = MIN2(cur + stride, max);
1998       if (start >= max) break;
1999 
2000       for (size_t i = cur; i < end; i++) {
2001         ShenandoahHeapRegion* current = _heap->get_region(i);
2002         _blk->heap_region_do(current);
2003       }
2004     }
2005   }
2006 };
2007 
2008 void ShenandoahHeap::parallel_heap_region_iterate(ShenandoahHeapRegionClosure* blk) const {
2009   assert(blk->is_thread_safe(), "Only thread-safe closures here");
2010   const uint active_workers = workers()->active_workers();
2011   const size_t n_regions = num_regions();
2012   size_t stride = ShenandoahParallelRegionStride;
2013   if (stride == 0 && active_workers > 1) {
2014     // Automatically derive the stride to balance the work between threads
2015     // evenly. Do not try to split work if below the reasonable threshold.
2016     constexpr size_t threshold = 4096;
2017     stride = n_regions <= threshold ?
2018             threshold :
2019             (n_regions + active_workers - 1) / active_workers;
2020   }
2021 
2022   if (n_regions > stride && active_workers > 1) {
2023     ShenandoahParallelHeapRegionTask task(blk, stride);
2024     workers()->run_task(&task);
2025   } else {
2026     heap_region_iterate(blk);
2027   }
2028 }
2029 
2030 class ShenandoahRendezvousClosure : public HandshakeClosure {
2031 public:
2032   inline ShenandoahRendezvousClosure(const char* name) : HandshakeClosure(name) {}
2033   inline void do_thread(Thread* thread) {}
2034 };
2035 
2036 void ShenandoahHeap::rendezvous_threads(const char* name) {
2037   ShenandoahRendezvousClosure cl(name);
2038   Handshake::execute(&cl);
2039 }
2040 
2041 void ShenandoahHeap::recycle_trash() {
2042   free_set()->recycle_trash();
2043 }
2044 
2045 void ShenandoahHeap::do_class_unloading() {
2046   _unloader.unload();
2047   if (mode()->is_generational()) {
2048     old_generation()->set_parsable(false);
2049   }
2050 }
2051 
2052 void ShenandoahHeap::stw_weak_refs(bool full_gc) {
2053   // Weak refs processing
2054   ShenandoahPhaseTimings::Phase phase = full_gc ? ShenandoahPhaseTimings::full_gc_weakrefs
2055                                                 : ShenandoahPhaseTimings::degen_gc_weakrefs;
2056   ShenandoahTimingsTracker t(phase);
2057   ShenandoahGCWorkerPhase worker_phase(phase);
2058   shenandoah_assert_generations_reconciled();
2059   gc_generation()->ref_processor()->process_references(phase, workers(), false /* concurrent */);
2060 }
2061 
2062 void ShenandoahHeap::prepare_update_heap_references() {
2063   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
2064 
2065   // Evacuation is over, no GCLABs are needed anymore. GCLABs are under URWM, so we need to
2066   // make them parsable for update code to work correctly. Plus, we can compute new sizes
2067   // for future GCLABs here.
2068   if (UseTLAB) {
2069     ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_init_update_refs_manage_gclabs);
2070     gclabs_retire(ResizeTLAB);
2071   }
2072 
2073   _update_refs_iterator.reset();
2074 }
2075 
2076 void ShenandoahHeap::propagate_gc_state_to_all_threads() {
2077   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at Shenandoah safepoint");
2078   if (_gc_state_changed) {
2079     ShenandoahGCStatePropagator propagator(_gc_state.raw_value());
2080     Threads::threads_do(&propagator);
2081     _gc_state_changed = false;
2082   }
2083 }
2084 
2085 void ShenandoahHeap::set_gc_state_at_safepoint(uint mask, bool value) {
2086   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at Shenandoah safepoint");
2087   _gc_state.set_cond(mask, value);
2088   _gc_state_changed = true;
2089 }
2090 
2091 void ShenandoahHeap::set_gc_state_concurrent(uint mask, bool value) {
2092   // Holding the thread lock here assures that any thread created after we change the gc
2093   // state will have the correct state. It also prevents attaching threads from seeing
2094   // an inconsistent state. See ShenandoahBarrierSet::on_thread_attach for reference. Established
2095   // threads will use their thread local copy of the gc state (changed by a handshake, or on a
2096   // safepoint).
2097   assert(Threads_lock->is_locked(), "Must hold thread lock for concurrent gc state change");
2098   _gc_state.set_cond(mask, value);
2099 }
2100 
2101 void ShenandoahHeap::set_concurrent_young_mark_in_progress(bool in_progress) {
2102   uint mask;
2103   assert(!has_forwarded_objects(), "Young marking is not concurrent with evacuation");
2104   if (!in_progress && is_concurrent_old_mark_in_progress()) {
2105     assert(mode()->is_generational(), "Only generational GC has old marking");
2106     assert(_gc_state.is_set(MARKING), "concurrent_old_marking_in_progress implies MARKING");
2107     // If old-marking is in progress when we turn off YOUNG_MARKING, leave MARKING (and OLD_MARKING) on
2108     mask = YOUNG_MARKING;
2109   } else {
2110     mask = MARKING | YOUNG_MARKING;
2111   }
2112   set_gc_state_at_safepoint(mask, in_progress);
2113   manage_satb_barrier(in_progress);
2114 }
2115 
2116 void ShenandoahHeap::set_concurrent_old_mark_in_progress(bool in_progress) {
2117 #ifdef ASSERT
2118   // has_forwarded_objects() iff UPDATE_REFS or EVACUATION
2119   bool has_forwarded = has_forwarded_objects();
2120   bool updating_or_evacuating = _gc_state.is_set(UPDATE_REFS | EVACUATION);
2121   bool evacuating = _gc_state.is_set(EVACUATION);
2122   assert ((has_forwarded == updating_or_evacuating) || (evacuating && !has_forwarded && collection_set()->is_empty()),
2123           "Updating or evacuating iff has forwarded objects, or if evacuation phase is promoting in place without forwarding");
2124 #endif
2125   if (!in_progress && is_concurrent_young_mark_in_progress()) {
2126     // If young-marking is in progress when we turn off OLD_MARKING, leave MARKING (and YOUNG_MARKING) on
2127     assert(_gc_state.is_set(MARKING), "concurrent_young_marking_in_progress implies MARKING");
2128     set_gc_state_at_safepoint(OLD_MARKING, in_progress);
2129   } else {
2130     set_gc_state_at_safepoint(MARKING | OLD_MARKING, in_progress);
2131   }
2132   manage_satb_barrier(in_progress);
2133 }
2134 
2135 bool ShenandoahHeap::is_prepare_for_old_mark_in_progress() const {
2136   return old_generation()->is_preparing_for_mark();
2137 }
2138 
2139 void ShenandoahHeap::manage_satb_barrier(bool active) {
2140   if (is_concurrent_mark_in_progress()) {
2141     // Ignore request to deactivate barrier while concurrent mark is in progress.
2142     // Do not attempt to re-activate the barrier if it is already active.
2143     if (active && !ShenandoahBarrierSet::satb_mark_queue_set().is_active()) {
2144       ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(active, !active);
2145     }
2146   } else {
2147     // No concurrent marking is in progress so honor request to deactivate,
2148     // but only if the barrier is already active.
2149     if (!active && ShenandoahBarrierSet::satb_mark_queue_set().is_active()) {
2150       ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(active, !active);
2151     }
2152   }
2153 }
2154 
2155 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
2156   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only call this at safepoint");
2157   set_gc_state_at_safepoint(EVACUATION, in_progress);
2158 }
2159 
2160 void ShenandoahHeap::set_concurrent_strong_root_in_progress(bool in_progress) {
2161   if (in_progress) {
2162     _concurrent_strong_root_in_progress.set();
2163   } else {
2164     _concurrent_strong_root_in_progress.unset();
2165   }
2166 }
2167 
2168 void ShenandoahHeap::set_concurrent_weak_root_in_progress(bool cond) {
2169   set_gc_state_at_safepoint(WEAK_ROOTS, cond);
2170 }
2171 
2172 GCTracer* ShenandoahHeap::tracer() {
2173   return shenandoah_policy()->tracer();
2174 }
2175 
2176 size_t ShenandoahHeap::tlab_used(Thread* thread) const {
2177   return _free_set->used();
2178 }
2179 
2180 bool ShenandoahHeap::try_cancel_gc(GCCause::Cause cause) {
2181   const GCCause::Cause prev = _cancelled_gc.xchg(cause);
2182   return prev == GCCause::_no_gc || prev == GCCause::_shenandoah_concurrent_gc;
2183 }
2184 
2185 void ShenandoahHeap::cancel_concurrent_mark() {
2186   if (mode()->is_generational()) {
2187     young_generation()->cancel_marking();
2188     old_generation()->cancel_marking();
2189   }
2190 
2191   global_generation()->cancel_marking();
2192 
2193   ShenandoahBarrierSet::satb_mark_queue_set().abandon_partial_marking();
2194 }
2195 
2196 bool ShenandoahHeap::cancel_gc(GCCause::Cause cause) {
2197   if (try_cancel_gc(cause)) {
2198     FormatBuffer<> msg("Cancelling GC: %s", GCCause::to_string(cause));
2199     log_info(gc,thread)("%s", msg.buffer());
2200     Events::log(Thread::current(), "%s", msg.buffer());
2201     _cancel_requested_time = os::elapsedTime();
2202     return true;
2203   }
2204   return false;
2205 }
2206 
2207 uint ShenandoahHeap::max_workers() {
2208   return _max_workers;
2209 }
2210 
2211 void ShenandoahHeap::stop() {
2212   // The shutdown sequence should be able to terminate when GC is running.
2213 
2214   // Step 0. Notify policy to disable event recording and prevent visiting gc threads during shutdown
2215   _shenandoah_policy->record_shutdown();
2216 
2217   // Step 1. Stop reporting on gc thread cpu utilization
2218   mmu_tracker()->stop();
2219 
2220   // Step 2. Wait until GC worker exits normally (this will cancel any ongoing GC).
2221   control_thread()->stop();
2222 
2223   // Stop 4. Shutdown uncommit thread.
2224   if (_uncommit_thread != nullptr) {
2225     _uncommit_thread->stop();
2226   }
2227 }
2228 
2229 void ShenandoahHeap::stw_unload_classes(bool full_gc) {
2230   if (!unload_classes()) return;
2231   ClassUnloadingContext ctx(_workers->active_workers(),
2232                             true /* unregister_nmethods_during_purge */,
2233                             false /* lock_nmethod_free_separately */);
2234 
2235   // Unload classes and purge SystemDictionary.
2236   {
2237     ShenandoahPhaseTimings::Phase phase = full_gc ?
2238                                           ShenandoahPhaseTimings::full_gc_purge_class_unload :
2239                                           ShenandoahPhaseTimings::degen_gc_purge_class_unload;
2240     ShenandoahIsAliveSelector is_alive;
2241     {
2242       CodeCache::UnlinkingScope scope(is_alive.is_alive_closure());
2243       ShenandoahGCPhase gc_phase(phase);
2244       ShenandoahGCWorkerPhase worker_phase(phase);
2245       bool unloading_occurred = SystemDictionary::do_unloading(gc_timer());
2246 
2247       uint num_workers = _workers->active_workers();
2248       ShenandoahClassUnloadingTask unlink_task(phase, num_workers, unloading_occurred);
2249       _workers->run_task(&unlink_task);
2250     }
2251     // Release unloaded nmethods's memory.
2252     ClassUnloadingContext::context()->purge_and_free_nmethods();
2253   }
2254 
2255   {
2256     ShenandoahGCPhase phase(full_gc ?
2257                             ShenandoahPhaseTimings::full_gc_purge_cldg :
2258                             ShenandoahPhaseTimings::degen_gc_purge_cldg);
2259     ClassLoaderDataGraph::purge(true /* at_safepoint */);
2260   }
2261   // Resize and verify metaspace
2262   MetaspaceGC::compute_new_size();
2263   DEBUG_ONLY(MetaspaceUtils::verify();)
2264 }
2265 
2266 // Weak roots are either pre-evacuated (final mark) or updated (final update refs),
2267 // so they should not have forwarded oops.
2268 // However, we do need to "null" dead oops in the roots, if can not be done
2269 // in concurrent cycles.
2270 void ShenandoahHeap::stw_process_weak_roots(bool full_gc) {
2271   uint num_workers = _workers->active_workers();
2272   ShenandoahPhaseTimings::Phase timing_phase = full_gc ?
2273                                                ShenandoahPhaseTimings::full_gc_purge_weak_par :
2274                                                ShenandoahPhaseTimings::degen_gc_purge_weak_par;
2275   ShenandoahGCPhase phase(timing_phase);
2276   ShenandoahGCWorkerPhase worker_phase(timing_phase);
2277   // Cleanup weak roots
2278   if (has_forwarded_objects()) {
2279     ShenandoahForwardedIsAliveClosure is_alive;
2280     ShenandoahNonConcUpdateRefsClosure keep_alive;
2281     ShenandoahParallelWeakRootsCleaningTask<ShenandoahForwardedIsAliveClosure, ShenandoahNonConcUpdateRefsClosure>
2282       cleaning_task(timing_phase, &is_alive, &keep_alive, num_workers);
2283     _workers->run_task(&cleaning_task);
2284   } else {
2285     ShenandoahIsAliveClosure is_alive;
2286 #ifdef ASSERT
2287     ShenandoahAssertNotForwardedClosure verify_cl;
2288     ShenandoahParallelWeakRootsCleaningTask<ShenandoahIsAliveClosure, ShenandoahAssertNotForwardedClosure>
2289       cleaning_task(timing_phase, &is_alive, &verify_cl, num_workers);
2290 #else
2291     ShenandoahParallelWeakRootsCleaningTask<ShenandoahIsAliveClosure, DoNothingClosure>
2292       cleaning_task(timing_phase, &is_alive, &do_nothing_cl, num_workers);
2293 #endif
2294     _workers->run_task(&cleaning_task);
2295   }
2296 }
2297 
2298 void ShenandoahHeap::parallel_cleaning(bool full_gc) {
2299   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2300   assert(is_stw_gc_in_progress(), "Only for Degenerated and Full GC");
2301   ShenandoahGCPhase phase(full_gc ?
2302                           ShenandoahPhaseTimings::full_gc_purge :
2303                           ShenandoahPhaseTimings::degen_gc_purge);
2304   stw_weak_refs(full_gc);
2305   stw_process_weak_roots(full_gc);
2306   stw_unload_classes(full_gc);
2307 }
2308 
2309 void ShenandoahHeap::set_has_forwarded_objects(bool cond) {
2310   set_gc_state_at_safepoint(HAS_FORWARDED, cond);
2311 }
2312 
2313 void ShenandoahHeap::set_unload_classes(bool uc) {
2314   _unload_classes.set_cond(uc);
2315 }
2316 
2317 bool ShenandoahHeap::unload_classes() const {
2318   return _unload_classes.is_set();
2319 }
2320 
2321 address ShenandoahHeap::in_cset_fast_test_addr() {
2322   ShenandoahHeap* heap = ShenandoahHeap::heap();
2323   assert(heap->collection_set() != nullptr, "Sanity");
2324   return (address) heap->collection_set()->biased_map_address();
2325 }
2326 
2327 void ShenandoahHeap::reset_bytes_allocated_since_gc_start() {
2328   if (mode()->is_generational()) {
2329     young_generation()->reset_bytes_allocated_since_gc_start();
2330     old_generation()->reset_bytes_allocated_since_gc_start();
2331   }
2332 
2333   global_generation()->reset_bytes_allocated_since_gc_start();
2334 }
2335 
2336 void ShenandoahHeap::set_degenerated_gc_in_progress(bool in_progress) {
2337   _degenerated_gc_in_progress.set_cond(in_progress);
2338 }
2339 
2340 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) {
2341   _full_gc_in_progress.set_cond(in_progress);
2342 }
2343 
2344 void ShenandoahHeap::set_full_gc_move_in_progress(bool in_progress) {
2345   assert (is_full_gc_in_progress(), "should be");
2346   _full_gc_move_in_progress.set_cond(in_progress);
2347 }
2348 
2349 void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) {
2350   set_gc_state_at_safepoint(UPDATE_REFS, in_progress);
2351 }
2352 
2353 void ShenandoahHeap::register_nmethod(nmethod* nm) {
2354   ShenandoahCodeRoots::register_nmethod(nm);
2355 }
2356 
2357 void ShenandoahHeap::unregister_nmethod(nmethod* nm) {
2358   ShenandoahCodeRoots::unregister_nmethod(nm);
2359 }
2360 
2361 void ShenandoahHeap::pin_object(JavaThread* thr, oop o) {
2362   heap_region_containing(o)->record_pin();
2363 }
2364 
2365 void ShenandoahHeap::unpin_object(JavaThread* thr, oop o) {
2366   ShenandoahHeapRegion* r = heap_region_containing(o);
2367   assert(r != nullptr, "Sanity");
2368   assert(r->pin_count() > 0, "Region %zu should have non-zero pins", r->index());
2369   r->record_unpin();
2370 }
2371 
2372 void ShenandoahHeap::sync_pinned_region_status() {
2373   ShenandoahHeapLocker locker(lock());
2374 
2375   for (size_t i = 0; i < num_regions(); i++) {
2376     ShenandoahHeapRegion *r = get_region(i);
2377     if (r->is_active()) {
2378       if (r->is_pinned()) {
2379         if (r->pin_count() == 0) {
2380           r->make_unpinned();
2381         }
2382       } else {
2383         if (r->pin_count() > 0) {
2384           r->make_pinned();
2385         }
2386       }
2387     }
2388   }
2389 
2390   assert_pinned_region_status();
2391 }
2392 
2393 #ifdef ASSERT
2394 void ShenandoahHeap::assert_pinned_region_status() {
2395   for (size_t i = 0; i < num_regions(); i++) {
2396     ShenandoahHeapRegion* r = get_region(i);
2397     shenandoah_assert_generations_reconciled();
2398     if (gc_generation()->contains(r)) {
2399       assert((r->is_pinned() && r->pin_count() > 0) || (!r->is_pinned() && r->pin_count() == 0),
2400              "Region %zu pinning status is inconsistent", i);
2401     }
2402   }
2403 }
2404 #endif
2405 
2406 ConcurrentGCTimer* ShenandoahHeap::gc_timer() const {
2407   return _gc_timer;
2408 }
2409 
2410 void ShenandoahHeap::prepare_concurrent_roots() {
2411   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2412   assert(!is_stw_gc_in_progress(), "Only concurrent GC");
2413   set_concurrent_strong_root_in_progress(!collection_set()->is_empty());
2414   set_concurrent_weak_root_in_progress(true);
2415   if (unload_classes()) {
2416     _unloader.prepare();
2417   }
2418 }
2419 
2420 void ShenandoahHeap::finish_concurrent_roots() {
2421   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2422   assert(!is_stw_gc_in_progress(), "Only concurrent GC");
2423   if (unload_classes()) {
2424     _unloader.finish();
2425   }
2426 }
2427 
2428 #ifdef ASSERT
2429 void ShenandoahHeap::assert_gc_workers(uint nworkers) {
2430   assert(nworkers > 0 && nworkers <= max_workers(), "Sanity");
2431 
2432   if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
2433     // Use ParallelGCThreads inside safepoints
2434     assert(nworkers == ParallelGCThreads, "Use ParallelGCThreads (%u) within safepoint, not %u",
2435            ParallelGCThreads, nworkers);
2436   } else {
2437     // Use ConcGCThreads outside safepoints
2438     assert(nworkers == ConcGCThreads, "Use ConcGCThreads (%u) outside safepoints, %u",
2439            ConcGCThreads, nworkers);
2440   }
2441 }
2442 #endif
2443 
2444 ShenandoahVerifier* ShenandoahHeap::verifier() {
2445   guarantee(ShenandoahVerify, "Should be enabled");
2446   assert (_verifier != nullptr, "sanity");
2447   return _verifier;
2448 }
2449 
2450 template<bool CONCURRENT>
2451 class ShenandoahUpdateHeapRefsTask : public WorkerTask {
2452 private:
2453   ShenandoahHeap* _heap;
2454   ShenandoahRegionIterator* _regions;
2455 public:
2456   explicit ShenandoahUpdateHeapRefsTask(ShenandoahRegionIterator* regions) :
2457     WorkerTask("Shenandoah Update References"),
2458     _heap(ShenandoahHeap::heap()),
2459     _regions(regions) {
2460   }
2461 
2462   void work(uint worker_id) {
2463     if (CONCURRENT) {
2464       ShenandoahConcurrentWorkerSession worker_session(worker_id);
2465       ShenandoahSuspendibleThreadSetJoiner stsj;
2466       do_work<ShenandoahConcUpdateRefsClosure>(worker_id);
2467     } else {
2468       ShenandoahParallelWorkerSession worker_session(worker_id);
2469       do_work<ShenandoahNonConcUpdateRefsClosure>(worker_id);
2470     }
2471   }
2472 
2473 private:
2474   template<class T>
2475   void do_work(uint worker_id) {
2476     if (CONCURRENT && (worker_id == 0)) {
2477       // We ask the first worker to replenish the Mutator free set by moving regions previously reserved to hold the
2478       // results of evacuation.  These reserves are no longer necessary because evacuation has completed.
2479       size_t cset_regions = _heap->collection_set()->count();
2480 
2481       // Now that evacuation is done, we can reassign any regions that had been reserved to hold the results of evacuation
2482       // to the mutator free set.  At the end of GC, we will have cset_regions newly evacuated fully empty regions from
2483       // which we will be able to replenish the Collector free set and the OldCollector free set in preparation for the
2484       // next GC cycle.
2485       _heap->free_set()->move_regions_from_collector_to_mutator(cset_regions);
2486     }
2487     // If !CONCURRENT, there's no value in expanding Mutator free set
2488     T cl;
2489     ShenandoahHeapRegion* r = _regions->next();
2490     while (r != nullptr) {
2491       HeapWord* update_watermark = r->get_update_watermark();
2492       assert (update_watermark >= r->bottom(), "sanity");
2493       if (r->is_active() && !r->is_cset()) {
2494         _heap->marked_object_oop_iterate(r, &cl, update_watermark);
2495         if (ShenandoahPacing) {
2496           _heap->pacer()->report_update_refs(pointer_delta(update_watermark, r->bottom()));
2497         }
2498       }
2499       if (_heap->check_cancelled_gc_and_yield(CONCURRENT)) {
2500         return;
2501       }
2502       r = _regions->next();
2503     }
2504   }
2505 };
2506 
2507 void ShenandoahHeap::update_heap_references(bool concurrent) {
2508   assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
2509 
2510   if (concurrent) {
2511     ShenandoahUpdateHeapRefsTask<true> task(&_update_refs_iterator);
2512     workers()->run_task(&task);
2513   } else {
2514     ShenandoahUpdateHeapRefsTask<false> task(&_update_refs_iterator);
2515     workers()->run_task(&task);
2516   }
2517 }
2518 
2519 void ShenandoahHeap::update_heap_region_states(bool concurrent) {
2520   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2521   assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
2522 
2523   {
2524     ShenandoahGCPhase phase(concurrent ?
2525                             ShenandoahPhaseTimings::final_update_refs_update_region_states :
2526                             ShenandoahPhaseTimings::degen_gc_final_update_refs_update_region_states);
2527 
2528     final_update_refs_update_region_states();
2529 
2530     assert_pinned_region_status();
2531   }
2532 
2533   {
2534     ShenandoahGCPhase phase(concurrent ?
2535                             ShenandoahPhaseTimings::final_update_refs_trash_cset :
2536                             ShenandoahPhaseTimings::degen_gc_final_update_refs_trash_cset);
2537     trash_cset_regions();
2538   }
2539 }
2540 
2541 void ShenandoahHeap::final_update_refs_update_region_states() {
2542   ShenandoahSynchronizePinnedRegionStates cl;
2543   parallel_heap_region_iterate(&cl);
2544 }
2545 
2546 void ShenandoahHeap::rebuild_free_set(bool concurrent) {
2547   ShenandoahGCPhase phase(concurrent ?
2548                           ShenandoahPhaseTimings::final_update_refs_rebuild_freeset :
2549                           ShenandoahPhaseTimings::degen_gc_final_update_refs_rebuild_freeset);
2550   ShenandoahHeapLocker locker(lock());
2551   size_t young_cset_regions, old_cset_regions;
2552   size_t first_old_region, last_old_region, old_region_count;
2553   _free_set->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old_region, last_old_region, old_region_count);
2554   // If there are no old regions, first_old_region will be greater than last_old_region
2555   assert((first_old_region > last_old_region) ||
2556          ((last_old_region + 1 - first_old_region >= old_region_count) &&
2557           get_region(first_old_region)->is_old() && get_region(last_old_region)->is_old()),
2558          "sanity: old_region_count: %zu, first_old_region: %zu, last_old_region: %zu",
2559          old_region_count, first_old_region, last_old_region);
2560 
2561   if (mode()->is_generational()) {
2562 #ifdef ASSERT
2563     if (ShenandoahVerify) {
2564       verifier()->verify_before_rebuilding_free_set();
2565     }
2566 #endif
2567 
2568     // The computation of bytes_of_allocation_runway_before_gc_trigger is quite conservative so consider all of this
2569     // available for transfer to old. Note that transfer of humongous regions does not impact available.
2570     ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::heap();
2571     size_t allocation_runway = gen_heap->young_generation()->heuristics()->bytes_of_allocation_runway_before_gc_trigger(young_cset_regions);
2572     gen_heap->compute_old_generation_balance(allocation_runway, old_cset_regions);
2573 
2574     // Total old_available may have been expanded to hold anticipated promotions.  We trigger if the fragmented available
2575     // memory represents more than 16 regions worth of data.  Note that fragmentation may increase when we promote regular
2576     // regions in place when many of these regular regions have an abundant amount of available memory within them.  Fragmentation
2577     // will decrease as promote-by-copy consumes the available memory within these partially consumed regions.
2578     //
2579     // We consider old-gen to have excessive fragmentation if more than 12.5% of old-gen is free memory that resides
2580     // within partially consumed regions of memory.
2581   }
2582   // Rebuild free set based on adjusted generation sizes.
2583   _free_set->finish_rebuild(young_cset_regions, old_cset_regions, old_region_count);
2584 
2585   if (mode()->is_generational()) {
2586     ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::heap();
2587     ShenandoahOldGeneration* old_gen = gen_heap->old_generation();
2588     old_gen->heuristics()->evaluate_triggers(first_old_region, last_old_region, old_region_count, num_regions());
2589   }
2590 }
2591 
2592 void ShenandoahHeap::print_extended_on(outputStream *st) const {
2593   print_on(st);
2594   st->cr();
2595   print_heap_regions_on(st);
2596 }
2597 
2598 bool ShenandoahHeap::is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self) {
2599   size_t slice = r->index() / _bitmap_regions_per_slice;
2600 
2601   size_t regions_from = _bitmap_regions_per_slice * slice;
2602   size_t regions_to   = MIN2(num_regions(), _bitmap_regions_per_slice * (slice + 1));
2603   for (size_t g = regions_from; g < regions_to; g++) {
2604     assert (g / _bitmap_regions_per_slice == slice, "same slice");
2605     if (skip_self && g == r->index()) continue;
2606     if (get_region(g)->is_committed()) {
2607       return true;
2608     }
2609   }
2610   return false;
2611 }
2612 
2613 bool ShenandoahHeap::commit_bitmap_slice(ShenandoahHeapRegion* r) {
2614   shenandoah_assert_heaplocked();
2615 
2616   // Bitmaps in special regions do not need commits
2617   if (_bitmap_region_special) {
2618     return true;
2619   }
2620 
2621   if (is_bitmap_slice_committed(r, true)) {
2622     // Some other region from the group is already committed, meaning the bitmap
2623     // slice is already committed, we exit right away.
2624     return true;
2625   }
2626 
2627   // Commit the bitmap slice:
2628   size_t slice = r->index() / _bitmap_regions_per_slice;
2629   size_t off = _bitmap_bytes_per_slice * slice;
2630   size_t len = _bitmap_bytes_per_slice;
2631   char* start = (char*) _bitmap_region.start() + off;
2632 
2633   if (!os::commit_memory(start, len, false)) {
2634     return false;
2635   }
2636 
2637   if (AlwaysPreTouch) {
2638     os::pretouch_memory(start, start + len, _pretouch_bitmap_page_size);
2639   }
2640 
2641   return true;
2642 }
2643 
2644 bool ShenandoahHeap::uncommit_bitmap_slice(ShenandoahHeapRegion *r) {
2645   shenandoah_assert_heaplocked();
2646 
2647   // Bitmaps in special regions do not need uncommits
2648   if (_bitmap_region_special) {
2649     return true;
2650   }
2651 
2652   if (is_bitmap_slice_committed(r, true)) {
2653     // Some other region from the group is still committed, meaning the bitmap
2654     // slice should stay committed, exit right away.
2655     return true;
2656   }
2657 
2658   // Uncommit the bitmap slice:
2659   size_t slice = r->index() / _bitmap_regions_per_slice;
2660   size_t off = _bitmap_bytes_per_slice * slice;
2661   size_t len = _bitmap_bytes_per_slice;
2662   if (!os::uncommit_memory((char*)_bitmap_region.start() + off, len)) {
2663     return false;
2664   }
2665   return true;
2666 }
2667 
2668 void ShenandoahHeap::forbid_uncommit() {
2669   if (_uncommit_thread != nullptr) {
2670     _uncommit_thread->forbid_uncommit();
2671   }
2672 }
2673 
2674 void ShenandoahHeap::allow_uncommit() {
2675   if (_uncommit_thread != nullptr) {
2676     _uncommit_thread->allow_uncommit();
2677   }
2678 }
2679 
2680 #ifdef ASSERT
2681 bool ShenandoahHeap::is_uncommit_in_progress() {
2682   if (_uncommit_thread != nullptr) {
2683     return _uncommit_thread->is_uncommit_in_progress();
2684   }
2685   return false;
2686 }
2687 #endif
2688 
2689 void ShenandoahHeap::safepoint_synchronize_begin() {
2690   StackWatermarkSet::safepoint_synchronize_begin();
2691   SuspendibleThreadSet::synchronize();
2692 }
2693 
2694 void ShenandoahHeap::safepoint_synchronize_end() {
2695   SuspendibleThreadSet::desynchronize();
2696 }
2697 
2698 void ShenandoahHeap::try_inject_alloc_failure() {
2699   if (ShenandoahAllocFailureALot && !cancelled_gc() && ((os::random() % 1000) > 950)) {
2700     _inject_alloc_failure.set();
2701     os::naked_short_sleep(1);
2702     if (cancelled_gc()) {
2703       log_info(gc)("Allocation failure was successfully injected");
2704     }
2705   }
2706 }
2707 
2708 bool ShenandoahHeap::should_inject_alloc_failure() {
2709   return _inject_alloc_failure.is_set() && _inject_alloc_failure.try_unset();
2710 }
2711 
2712 void ShenandoahHeap::initialize_serviceability() {
2713   _memory_pool = new ShenandoahMemoryPool(this);
2714   _cycle_memory_manager.add_pool(_memory_pool);
2715   _stw_memory_manager.add_pool(_memory_pool);
2716 }
2717 
2718 GrowableArray<GCMemoryManager*> ShenandoahHeap::memory_managers() {
2719   GrowableArray<GCMemoryManager*> memory_managers(2);
2720   memory_managers.append(&_cycle_memory_manager);
2721   memory_managers.append(&_stw_memory_manager);
2722   return memory_managers;
2723 }
2724 
2725 GrowableArray<MemoryPool*> ShenandoahHeap::memory_pools() {
2726   GrowableArray<MemoryPool*> memory_pools(1);
2727   memory_pools.append(_memory_pool);
2728   return memory_pools;
2729 }
2730 
2731 MemoryUsage ShenandoahHeap::memory_usage() {
2732   return MemoryUsage(_initial_size, used(), committed(), max_capacity());
2733 }
2734 
2735 ShenandoahRegionIterator::ShenandoahRegionIterator() :
2736   _heap(ShenandoahHeap::heap()),
2737   _index(0) {}
2738 
2739 ShenandoahRegionIterator::ShenandoahRegionIterator(ShenandoahHeap* heap) :
2740   _heap(heap),
2741   _index(0) {}
2742 
2743 void ShenandoahRegionIterator::reset() {
2744   _index = 0;
2745 }
2746 
2747 bool ShenandoahRegionIterator::has_next() const {
2748   return _index < _heap->num_regions();
2749 }
2750 
2751 char ShenandoahHeap::gc_state() const {
2752   return _gc_state.raw_value();
2753 }
2754 
2755 bool ShenandoahHeap::is_gc_state(GCState state) const {
2756   // If the global gc state has been changed, but hasn't yet been propagated to all threads, then
2757   // the global gc state is the correct value. Once the gc state has been synchronized with all threads,
2758   // _gc_state_changed will be toggled to false and we need to use the thread local state.
2759   return _gc_state_changed ? _gc_state.is_set(state) : ShenandoahThreadLocalData::is_gc_state(state);
2760 }
2761 
2762 
2763 ShenandoahLiveData* ShenandoahHeap::get_liveness_cache(uint worker_id) {
2764 #ifdef ASSERT
2765   assert(_liveness_cache != nullptr, "sanity");
2766   assert(worker_id < _max_workers, "sanity");
2767   for (uint i = 0; i < num_regions(); i++) {
2768     assert(_liveness_cache[worker_id][i] == 0, "liveness cache should be empty");
2769   }
2770 #endif
2771   return _liveness_cache[worker_id];
2772 }
2773 
2774 void ShenandoahHeap::flush_liveness_cache(uint worker_id) {
2775   assert(worker_id < _max_workers, "sanity");
2776   assert(_liveness_cache != nullptr, "sanity");
2777   ShenandoahLiveData* ld = _liveness_cache[worker_id];
2778   for (uint i = 0; i < num_regions(); i++) {
2779     ShenandoahLiveData live = ld[i];
2780     if (live > 0) {
2781       ShenandoahHeapRegion* r = get_region(i);
2782       r->increase_live_data_gc_words(live);
2783       ld[i] = 0;
2784     }
2785   }
2786 }
2787 
2788 bool ShenandoahHeap::requires_barriers(stackChunkOop obj) const {
2789   if (is_idle()) return false;
2790 
2791   // Objects allocated after marking start are implicitly alive, don't need any barriers during
2792   // marking phase.
2793   if (is_concurrent_mark_in_progress() &&
2794      !marking_context()->allocated_after_mark_start(obj)) {
2795     return true;
2796   }
2797 
2798   // Can not guarantee obj is deeply good.
2799   if (has_forwarded_objects()) {
2800     return true;
2801   }
2802 
2803   return false;
2804 }
2805 
2806 HeapWord* ShenandoahHeap::allocate_loaded_archive_space(size_t size) {
2807 #if INCLUDE_CDS_JAVA_HEAP
2808   // CDS wants a continuous memory range to load a bunch of objects.
2809   // This effectively bypasses normal allocation paths, and requires
2810   // a bit of massaging to unbreak GC invariants.
2811 
2812   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared(size);
2813 
2814   // Easy case: a single regular region, no further adjustments needed.
2815   if (!ShenandoahHeapRegion::requires_humongous(size)) {
2816     return allocate_memory(req);
2817   }
2818 
2819   // Hard case: the requested size would cause a humongous allocation.
2820   // We need to make sure it looks like regular allocation to the rest of GC.
2821 
2822   // CDS code would guarantee no objects straddle multiple regions, as long as
2823   // regions are as large as MIN_GC_REGION_ALIGNMENT. It is impractical at this
2824   // point to deal with case when Shenandoah runs with smaller regions.
2825   // TODO: This check can be dropped once MIN_GC_REGION_ALIGNMENT agrees more with Shenandoah.
2826   if (ShenandoahHeapRegion::region_size_bytes() < ArchiveHeapWriter::MIN_GC_REGION_ALIGNMENT) {
2827     return nullptr;
2828   }
2829 
2830   HeapWord* mem = allocate_memory(req);
2831   size_t start_idx = heap_region_index_containing(mem);
2832   size_t num_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize);
2833 
2834   // Flip humongous -> regular.
2835   {
2836     ShenandoahHeapLocker locker(lock(), false);
2837     for (size_t c = start_idx; c < start_idx + num_regions; c++) {
2838       get_region(c)->make_regular_bypass();
2839     }
2840   }
2841 
2842   return mem;
2843 #else
2844   assert(false, "Archive heap loader should not be available, should not be here");
2845   return nullptr;
2846 #endif // INCLUDE_CDS_JAVA_HEAP
2847 }
2848 
2849 void ShenandoahHeap::complete_loaded_archive_space(MemRegion archive_space) {
2850   // Nothing to do here, except checking that heap looks fine.
2851 #ifdef ASSERT
2852   HeapWord* start = archive_space.start();
2853   HeapWord* end = archive_space.end();
2854 
2855   // No unclaimed space between the objects.
2856   // Objects are properly allocated in correct regions.
2857   HeapWord* cur = start;
2858   while (cur < end) {
2859     oop oop = cast_to_oop(cur);
2860     shenandoah_assert_in_correct_region(nullptr, oop);
2861     cur += oop->size();
2862   }
2863 
2864   // No unclaimed tail at the end of archive space.
2865   assert(cur == end,
2866          "Archive space should be fully used: " PTR_FORMAT " " PTR_FORMAT,
2867          p2i(cur), p2i(end));
2868 
2869   // Region bounds are good.
2870   ShenandoahHeapRegion* begin_reg = heap_region_containing(start);
2871   ShenandoahHeapRegion* end_reg = heap_region_containing(end);
2872   assert(begin_reg->is_regular(), "Must be");
2873   assert(end_reg->is_regular(), "Must be");
2874   assert(begin_reg->bottom() == start,
2875          "Must agree: archive-space-start: " PTR_FORMAT ", begin-region-bottom: " PTR_FORMAT,
2876          p2i(start), p2i(begin_reg->bottom()));
2877   assert(end_reg->top() == end,
2878          "Must agree: archive-space-end: " PTR_FORMAT ", end-region-top: " PTR_FORMAT,
2879          p2i(end), p2i(end_reg->top()));
2880 #endif
2881 }
2882 
2883 ShenandoahGeneration* ShenandoahHeap::generation_for(ShenandoahAffiliation affiliation) const {
2884   if (!mode()->is_generational()) {
2885     return global_generation();
2886   } else if (affiliation == YOUNG_GENERATION) {
2887     return young_generation();
2888   } else if (affiliation == OLD_GENERATION) {
2889     return old_generation();
2890   }
2891 
2892   ShouldNotReachHere();
2893   return nullptr;
2894 }
2895 
2896 void ShenandoahHeap::log_heap_status(const char* msg) const {
2897   if (mode()->is_generational()) {
2898     young_generation()->log_status(msg);
2899     old_generation()->log_status(msg);
2900   } else {
2901     global_generation()->log_status(msg);
2902   }
2903 }