1 /*
   2  * Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2013, 2022, Red Hat, Inc. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "memory/allocation.hpp"
  28 #include "memory/universe.hpp"
  29 
  30 #include "gc/shared/classUnloadingContext.hpp"
  31 #include "gc/shared/gcArguments.hpp"
  32 #include "gc/shared/gcTimer.hpp"
  33 #include "gc/shared/gcTraceTime.inline.hpp"
  34 #include "gc/shared/locationPrinter.inline.hpp"
  35 #include "gc/shared/memAllocator.hpp"
  36 #include "gc/shared/plab.hpp"
  37 #include "gc/shared/slidingForwarding.hpp"
  38 #include "gc/shared/tlab_globals.hpp"
  39 
  40 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  41 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
  42 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  43 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  44 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
  45 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  46 #include "gc/shenandoah/shenandoahControlThread.hpp"
  47 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  48 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  49 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  50 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
  51 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  52 #include "gc/shenandoah/shenandoahInitLogger.hpp"
  53 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  54 #include "gc/shenandoah/shenandoahMemoryPool.hpp"
  55 #include "gc/shenandoah/shenandoahMetrics.hpp"
  56 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  57 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  58 #include "gc/shenandoah/shenandoahPacer.inline.hpp"
  59 #include "gc/shenandoah/shenandoahPadding.hpp"
  60 #include "gc/shenandoah/shenandoahParallelCleaning.inline.hpp"
  61 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
  62 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
  63 #include "gc/shenandoah/shenandoahSTWMark.hpp"
  64 #include "gc/shenandoah/shenandoahUtils.hpp"
  65 #include "gc/shenandoah/shenandoahVerifier.hpp"
  66 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
  67 #include "gc/shenandoah/shenandoahVMOperations.hpp"
  68 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
  69 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  70 #include "gc/shenandoah/mode/shenandoahIUMode.hpp"
  71 #include "gc/shenandoah/mode/shenandoahPassiveMode.hpp"
  72 #include "gc/shenandoah/mode/shenandoahSATBMode.hpp"
  73 #if INCLUDE_JFR
  74 #include "gc/shenandoah/shenandoahJfrSupport.hpp"
  75 #endif
  76 
  77 #include "classfile/systemDictionary.hpp"
  78 #include "code/codeCache.hpp"
  79 #include "memory/classLoaderMetaspace.hpp"
  80 #include "memory/metaspaceUtils.hpp"
  81 #include "nmt/mallocTracker.hpp"
  82 #include "nmt/memTracker.hpp"
  83 #include "oops/compressedOops.inline.hpp"
  84 #include "prims/jvmtiTagMap.hpp"
  85 #include "runtime/atomic.hpp"
  86 #include "runtime/globals.hpp"
  87 #include "runtime/interfaceSupport.inline.hpp"
  88 #include "runtime/java.hpp"
  89 #include "runtime/orderAccess.hpp"
  90 #include "runtime/safepointMechanism.hpp"
  91 #include "runtime/stackWatermarkSet.hpp"
  92 #include "runtime/vmThread.hpp"
  93 #include "utilities/events.hpp"
  94 #include "utilities/powerOfTwo.hpp"
  95 
  96 class ShenandoahPretouchHeapTask : public WorkerTask {
  97 private:
  98   ShenandoahRegionIterator _regions;
  99   const size_t _page_size;
 100 public:
 101   ShenandoahPretouchHeapTask(size_t page_size) :
 102     WorkerTask("Shenandoah Pretouch Heap"),
 103     _page_size(page_size) {}
 104 
 105   virtual void work(uint worker_id) {
 106     ShenandoahHeapRegion* r = _regions.next();
 107     while (r != nullptr) {
 108       if (r->is_committed()) {
 109         os::pretouch_memory(r->bottom(), r->end(), _page_size);
 110       }
 111       r = _regions.next();
 112     }
 113   }
 114 };
 115 
 116 class ShenandoahPretouchBitmapTask : public WorkerTask {
 117 private:
 118   ShenandoahRegionIterator _regions;
 119   char* _bitmap_base;
 120   const size_t _bitmap_size;
 121   const size_t _page_size;
 122 public:
 123   ShenandoahPretouchBitmapTask(char* bitmap_base, size_t bitmap_size, size_t page_size) :
 124     WorkerTask("Shenandoah Pretouch Bitmap"),
 125     _bitmap_base(bitmap_base),
 126     _bitmap_size(bitmap_size),
 127     _page_size(page_size) {}
 128 
 129   virtual void work(uint worker_id) {
 130     ShenandoahHeapRegion* r = _regions.next();
 131     while (r != nullptr) {
 132       size_t start = r->index()       * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
 133       size_t end   = (r->index() + 1) * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
 134       assert (end <= _bitmap_size, "end is sane: " SIZE_FORMAT " < " SIZE_FORMAT, end, _bitmap_size);
 135 
 136       if (r->is_committed()) {
 137         os::pretouch_memory(_bitmap_base + start, _bitmap_base + end, _page_size);
 138       }
 139 
 140       r = _regions.next();
 141     }
 142   }
 143 };
 144 
 145 jint ShenandoahHeap::initialize() {
 146   //
 147   // Figure out heap sizing
 148   //
 149 
 150   size_t init_byte_size = InitialHeapSize;
 151   size_t min_byte_size  = MinHeapSize;
 152   size_t max_byte_size  = MaxHeapSize;
 153   size_t heap_alignment = HeapAlignment;
 154 
 155   size_t reg_size_bytes = ShenandoahHeapRegion::region_size_bytes();
 156 
 157   Universe::check_alignment(max_byte_size,  reg_size_bytes, "Shenandoah heap");
 158   Universe::check_alignment(init_byte_size, reg_size_bytes, "Shenandoah heap");
 159 
 160   _num_regions = ShenandoahHeapRegion::region_count();
 161   assert(_num_regions == (max_byte_size / reg_size_bytes),
 162          "Regions should cover entire heap exactly: " SIZE_FORMAT " != " SIZE_FORMAT "/" SIZE_FORMAT,
 163          _num_regions, max_byte_size, reg_size_bytes);
 164 
 165   // Now we know the number of regions, initialize the heuristics.
 166   initialize_heuristics();
 167 
 168   size_t num_committed_regions = init_byte_size / reg_size_bytes;
 169   num_committed_regions = MIN2(num_committed_regions, _num_regions);
 170   assert(num_committed_regions <= _num_regions, "sanity");
 171   _initial_size = num_committed_regions * reg_size_bytes;
 172 
 173   size_t num_min_regions = min_byte_size / reg_size_bytes;
 174   num_min_regions = MIN2(num_min_regions, _num_regions);
 175   assert(num_min_regions <= _num_regions, "sanity");
 176   _minimum_size = num_min_regions * reg_size_bytes;
 177 
 178   // Default to max heap size.
 179   _soft_max_size = _num_regions * reg_size_bytes;
 180 
 181   _committed = _initial_size;
 182 
 183   size_t heap_page_size   = UseLargePages ? os::large_page_size() : os::vm_page_size();
 184   size_t bitmap_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
 185   size_t region_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
 186 
 187   //
 188   // Reserve and commit memory for heap
 189   //
 190 
 191   ReservedHeapSpace heap_rs = Universe::reserve_heap(max_byte_size, heap_alignment);
 192   initialize_reserved_region(heap_rs);
 193   _heap_region = MemRegion((HeapWord*)heap_rs.base(), heap_rs.size() / HeapWordSize);
 194   _heap_region_special = heap_rs.special();
 195 
 196   assert((((size_t) base()) & ShenandoahHeapRegion::region_size_bytes_mask()) == 0,
 197          "Misaligned heap: " PTR_FORMAT, p2i(base()));
 198   os::trace_page_sizes_for_requested_size("Heap",
 199                                           max_byte_size, heap_alignment,
 200                                           heap_rs.base(),
 201                                           heap_rs.size(), heap_rs.page_size());
 202 
 203 #if SHENANDOAH_OPTIMIZED_MARKTASK
 204   // The optimized ShenandoahMarkTask takes some bits away from the full object bits.
 205   // Fail if we ever attempt to address more than we can.
 206   if ((uintptr_t)heap_rs.end() >= ShenandoahMarkTask::max_addressable()) {
 207     FormatBuffer<512> buf("Shenandoah reserved [" PTR_FORMAT ", " PTR_FORMAT") for the heap, \n"
 208                           "but max object address is " PTR_FORMAT ". Try to reduce heap size, or try other \n"
 209                           "VM options that allocate heap at lower addresses (HeapBaseMinAddress, AllocateHeapAt, etc).",
 210                 p2i(heap_rs.base()), p2i(heap_rs.end()), ShenandoahMarkTask::max_addressable());
 211     vm_exit_during_initialization("Fatal Error", buf);
 212   }
 213 #endif
 214 
 215   ReservedSpace sh_rs = heap_rs.first_part(max_byte_size);
 216   if (!_heap_region_special) {
 217     os::commit_memory_or_exit(sh_rs.base(), _initial_size, heap_alignment, false,
 218                               "Cannot commit heap memory");
 219   }
 220 
 221   //
 222   // Reserve and commit memory for bitmap(s)
 223   //
 224 
 225   size_t bitmap_size_orig = ShenandoahMarkBitMap::compute_size(heap_rs.size());
 226   _bitmap_size = align_up(bitmap_size_orig, bitmap_page_size);
 227 
 228   size_t bitmap_bytes_per_region = reg_size_bytes / ShenandoahMarkBitMap::heap_map_factor();
 229 
 230   guarantee(bitmap_bytes_per_region != 0,
 231             "Bitmap bytes per region should not be zero");
 232   guarantee(is_power_of_2(bitmap_bytes_per_region),
 233             "Bitmap bytes per region should be power of two: " SIZE_FORMAT, bitmap_bytes_per_region);
 234 
 235   if (bitmap_page_size > bitmap_bytes_per_region) {
 236     _bitmap_regions_per_slice = bitmap_page_size / bitmap_bytes_per_region;
 237     _bitmap_bytes_per_slice = bitmap_page_size;
 238   } else {
 239     _bitmap_regions_per_slice = 1;
 240     _bitmap_bytes_per_slice = bitmap_bytes_per_region;
 241   }
 242 
 243   guarantee(_bitmap_regions_per_slice >= 1,
 244             "Should have at least one region per slice: " SIZE_FORMAT,
 245             _bitmap_regions_per_slice);
 246 
 247   guarantee(((_bitmap_bytes_per_slice) % bitmap_page_size) == 0,
 248             "Bitmap slices should be page-granular: bps = " SIZE_FORMAT ", page size = " SIZE_FORMAT,
 249             _bitmap_bytes_per_slice, bitmap_page_size);
 250 
 251   ReservedSpace bitmap(_bitmap_size, bitmap_page_size);
 252   os::trace_page_sizes_for_requested_size("Mark Bitmap",
 253                                           bitmap_size_orig, bitmap_page_size,
 254                                           bitmap.base(),
 255                                           bitmap.size(), bitmap.page_size());
 256   MemTracker::record_virtual_memory_type(bitmap.base(), mtGC);
 257   _bitmap_region = MemRegion((HeapWord*) bitmap.base(), bitmap.size() / HeapWordSize);
 258   _bitmap_region_special = bitmap.special();
 259 
 260   size_t bitmap_init_commit = _bitmap_bytes_per_slice *
 261                               align_up(num_committed_regions, _bitmap_regions_per_slice) / _bitmap_regions_per_slice;
 262   bitmap_init_commit = MIN2(_bitmap_size, bitmap_init_commit);
 263   if (!_bitmap_region_special) {
 264     os::commit_memory_or_exit((char *) _bitmap_region.start(), bitmap_init_commit, bitmap_page_size, false,
 265                               "Cannot commit bitmap memory");
 266   }
 267 
 268   _marking_context = new ShenandoahMarkingContext(_heap_region, _bitmap_region, _num_regions, _max_workers);
 269 
 270   if (ShenandoahVerify) {
 271     ReservedSpace verify_bitmap(_bitmap_size, bitmap_page_size);
 272     os::trace_page_sizes_for_requested_size("Verify Bitmap",
 273                                             bitmap_size_orig, bitmap_page_size,
 274                                             verify_bitmap.base(),
 275                                             verify_bitmap.size(), verify_bitmap.page_size());
 276     if (!verify_bitmap.special()) {
 277       os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), bitmap_page_size, false,
 278                                 "Cannot commit verification bitmap memory");
 279     }
 280     MemTracker::record_virtual_memory_type(verify_bitmap.base(), mtGC);
 281     MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize);
 282     _verification_bit_map.initialize(_heap_region, verify_bitmap_region);
 283     _verifier = new ShenandoahVerifier(this, &_verification_bit_map);
 284   }
 285 
 286   // Reserve aux bitmap for use in object_iterate(). We don't commit it here.
 287   size_t aux_bitmap_page_size = bitmap_page_size;
 288 #ifdef LINUX
 289   // In THP "advise" mode, we refrain from advising the system to use large pages
 290   // since we know these commits will be short lived, and there is no reason to trash
 291   // the THP area with this bitmap.
 292   if (UseTransparentHugePages) {
 293     aux_bitmap_page_size = os::vm_page_size();
 294   }
 295 #endif
 296   ReservedSpace aux_bitmap(_bitmap_size, aux_bitmap_page_size);
 297   os::trace_page_sizes_for_requested_size("Aux Bitmap",
 298                                           bitmap_size_orig, aux_bitmap_page_size,
 299                                           aux_bitmap.base(),
 300                                           aux_bitmap.size(), aux_bitmap.page_size());
 301   MemTracker::record_virtual_memory_type(aux_bitmap.base(), mtGC);
 302   _aux_bitmap_region = MemRegion((HeapWord*) aux_bitmap.base(), aux_bitmap.size() / HeapWordSize);
 303   _aux_bitmap_region_special = aux_bitmap.special();
 304   _aux_bit_map.initialize(_heap_region, _aux_bitmap_region);
 305 
 306   //
 307   // Create regions and region sets
 308   //
 309   size_t region_align = align_up(sizeof(ShenandoahHeapRegion), SHENANDOAH_CACHE_LINE_SIZE);
 310   size_t region_storage_size_orig = region_align * _num_regions;
 311   size_t region_storage_size = align_up(region_storage_size_orig,
 312                                         MAX2(region_page_size, os::vm_allocation_granularity()));
 313 
 314   ReservedSpace region_storage(region_storage_size, region_page_size);
 315   os::trace_page_sizes_for_requested_size("Region Storage",
 316                                           region_storage_size_orig, region_page_size,
 317                                           region_storage.base(),
 318                                           region_storage.size(), region_storage.page_size());
 319   MemTracker::record_virtual_memory_type(region_storage.base(), mtGC);
 320   if (!region_storage.special()) {
 321     os::commit_memory_or_exit(region_storage.base(), region_storage_size, region_page_size, false,
 322                               "Cannot commit region memory");
 323   }
 324 
 325   // Try to fit the collection set bitmap at lower addresses. This optimizes code generation for cset checks.
 326   // Go up until a sensible limit (subject to encoding constraints) and try to reserve the space there.
 327   // If not successful, bite a bullet and allocate at whatever address.
 328   {
 329     const size_t cset_align = MAX2<size_t>(os::vm_page_size(), os::vm_allocation_granularity());
 330     const size_t cset_size = align_up(((size_t) sh_rs.base() + sh_rs.size()) >> ShenandoahHeapRegion::region_size_bytes_shift(), cset_align);
 331     const size_t cset_page_size = os::vm_page_size();
 332 
 333     uintptr_t min = round_up_power_of_2(cset_align);
 334     uintptr_t max = (1u << 30u);
 335     ReservedSpace cset_rs;
 336 
 337     for (uintptr_t addr = min; addr <= max; addr <<= 1u) {
 338       char* req_addr = (char*)addr;
 339       assert(is_aligned(req_addr, cset_align), "Should be aligned");
 340       cset_rs = ReservedSpace(cset_size, cset_align, cset_page_size, req_addr);
 341       if (cset_rs.is_reserved()) {
 342         assert(cset_rs.base() == req_addr, "Allocated where requested: " PTR_FORMAT ", " PTR_FORMAT, p2i(cset_rs.base()), addr);
 343         _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
 344         break;
 345       }
 346     }
 347 
 348     if (_collection_set == nullptr) {
 349       cset_rs = ReservedSpace(cset_size, cset_align, os::vm_page_size());
 350       _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
 351     }
 352     os::trace_page_sizes_for_requested_size("Collection Set",
 353                                             cset_size, cset_page_size,
 354                                             cset_rs.base(),
 355                                             cset_rs.size(), cset_rs.page_size());
 356   }
 357 
 358   _regions = NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, _num_regions, mtGC);
 359   _free_set = new ShenandoahFreeSet(this, _num_regions);
 360 
 361   {
 362     ShenandoahHeapLocker locker(lock());
 363 
 364     for (size_t i = 0; i < _num_regions; i++) {
 365       HeapWord* start = (HeapWord*)sh_rs.base() + ShenandoahHeapRegion::region_size_words() * i;
 366       bool is_committed = i < num_committed_regions;
 367       void* loc = region_storage.base() + i * region_align;
 368 
 369       ShenandoahHeapRegion* r = new (loc) ShenandoahHeapRegion(start, i, is_committed);
 370       assert(is_aligned(r, SHENANDOAH_CACHE_LINE_SIZE), "Sanity");
 371 
 372       _marking_context->initialize_top_at_mark_start(r);
 373       _regions[i] = r;
 374       assert(!collection_set()->is_in(i), "New region should not be in collection set");
 375     }
 376 
 377     // Initialize to complete
 378     _marking_context->mark_complete();
 379 
 380     _free_set->rebuild();
 381   }
 382 
 383   if (AlwaysPreTouch) {
 384     // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads,
 385     // before initialize() below zeroes it with initializing thread. For any given region,
 386     // we touch the region and the corresponding bitmaps from the same thread.
 387     ShenandoahPushWorkerScope scope(workers(), _max_workers, false);
 388 
 389     _pretouch_heap_page_size = heap_page_size;
 390     _pretouch_bitmap_page_size = bitmap_page_size;
 391 
 392 #ifdef LINUX
 393     // UseTransparentHugePages would madvise that backing memory can be coalesced into huge
 394     // pages. But, the kernel needs to know that every small page is used, in order to coalesce
 395     // them into huge one. Therefore, we need to pretouch with smaller pages.
 396     if (UseTransparentHugePages) {
 397       _pretouch_heap_page_size = (size_t)os::vm_page_size();
 398       _pretouch_bitmap_page_size = (size_t)os::vm_page_size();
 399     }
 400 #endif
 401 
 402     // OS memory managers may want to coalesce back-to-back pages. Make their jobs
 403     // simpler by pre-touching continuous spaces (heap and bitmap) separately.
 404 
 405     ShenandoahPretouchBitmapTask bcl(bitmap.base(), _bitmap_size, _pretouch_bitmap_page_size);
 406     _workers->run_task(&bcl);
 407 
 408     ShenandoahPretouchHeapTask hcl(_pretouch_heap_page_size);
 409     _workers->run_task(&hcl);
 410   }
 411 
 412   //
 413   // Initialize the rest of GC subsystems
 414   //
 415 
 416   _liveness_cache = NEW_C_HEAP_ARRAY(ShenandoahLiveData*, _max_workers, mtGC);
 417   for (uint worker = 0; worker < _max_workers; worker++) {
 418     _liveness_cache[worker] = NEW_C_HEAP_ARRAY(ShenandoahLiveData, _num_regions, mtGC);
 419     Copy::fill_to_bytes(_liveness_cache[worker], _num_regions * sizeof(ShenandoahLiveData));
 420   }
 421 
 422   // There should probably be Shenandoah-specific options for these,
 423   // just as there are G1-specific options.
 424   {
 425     ShenandoahSATBMarkQueueSet& satbqs = ShenandoahBarrierSet::satb_mark_queue_set();
 426     satbqs.set_process_completed_buffers_threshold(20); // G1SATBProcessCompletedThreshold
 427     satbqs.set_buffer_enqueue_threshold_percentage(60); // G1SATBBufferEnqueueingThresholdPercent
 428   }
 429 
 430   _monitoring_support = new ShenandoahMonitoringSupport(this);
 431   _phase_timings = new ShenandoahPhaseTimings(max_workers());
 432   ShenandoahCodeRoots::initialize();
 433 
 434   if (ShenandoahPacing) {
 435     _pacer = new ShenandoahPacer(this);
 436     _pacer->setup_for_idle();
 437   }
 438 
 439   _control_thread = new ShenandoahControlThread();
 440 
 441   ShenandoahInitLogger::print();
 442 
 443   SlidingForwarding::initialize(_heap_region, ShenandoahHeapRegion::region_size_words());
 444 
 445   return JNI_OK;
 446 }
 447 
 448 void ShenandoahHeap::initialize_mode() {
 449   if (ShenandoahGCMode != nullptr) {
 450     if (strcmp(ShenandoahGCMode, "satb") == 0) {
 451       _gc_mode = new ShenandoahSATBMode();
 452     } else if (strcmp(ShenandoahGCMode, "iu") == 0) {
 453       _gc_mode = new ShenandoahIUMode();
 454     } else if (strcmp(ShenandoahGCMode, "passive") == 0) {
 455       _gc_mode = new ShenandoahPassiveMode();
 456     } else {
 457       vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option");
 458     }
 459   } else {
 460     vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option (null)");
 461   }
 462   _gc_mode->initialize_flags();
 463   if (_gc_mode->is_diagnostic() && !UnlockDiagnosticVMOptions) {
 464     vm_exit_during_initialization(
 465             err_msg("GC mode \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
 466                     _gc_mode->name()));
 467   }
 468   if (_gc_mode->is_experimental() && !UnlockExperimentalVMOptions) {
 469     vm_exit_during_initialization(
 470             err_msg("GC mode \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
 471                     _gc_mode->name()));
 472   }
 473 }
 474 
 475 void ShenandoahHeap::initialize_heuristics() {
 476   assert(_gc_mode != nullptr, "Must be initialized");
 477   _heuristics = _gc_mode->initialize_heuristics();
 478 
 479   if (_heuristics->is_diagnostic() && !UnlockDiagnosticVMOptions) {
 480     vm_exit_during_initialization(
 481             err_msg("Heuristics \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
 482                     _heuristics->name()));
 483   }
 484   if (_heuristics->is_experimental() && !UnlockExperimentalVMOptions) {
 485     vm_exit_during_initialization(
 486             err_msg("Heuristics \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
 487                     _heuristics->name()));
 488   }
 489 }
 490 
 491 #ifdef _MSC_VER
 492 #pragma warning( push )
 493 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
 494 #endif
 495 
 496 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
 497   CollectedHeap(),
 498   _initial_size(0),
 499   _used(0),
 500   _committed(0),
 501   _bytes_allocated_since_gc_start(0),
 502   _max_workers(MAX2(ConcGCThreads, ParallelGCThreads)),
 503   _workers(nullptr),
 504   _safepoint_workers(nullptr),
 505   _heap_region_special(false),
 506   _num_regions(0),
 507   _regions(nullptr),
 508   _update_refs_iterator(this),
 509   _gc_state_changed(false),
 510   _gc_no_progress_count(0),
 511   _control_thread(nullptr),
 512   _shenandoah_policy(policy),
 513   _gc_mode(nullptr),
 514   _heuristics(nullptr),
 515   _free_set(nullptr),
 516   _pacer(nullptr),
 517   _verifier(nullptr),
 518   _phase_timings(nullptr),
 519   _monitoring_support(nullptr),
 520   _memory_pool(nullptr),
 521   _stw_memory_manager("Shenandoah Pauses"),
 522   _cycle_memory_manager("Shenandoah Cycles"),
 523   _gc_timer(new ConcurrentGCTimer()),
 524   _log_min_obj_alignment_in_bytes(LogMinObjAlignmentInBytes),
 525   _ref_processor(new ShenandoahReferenceProcessor(MAX2(_max_workers, 1U))),
 526   _marking_context(nullptr),
 527   _bitmap_size(0),
 528   _bitmap_regions_per_slice(0),
 529   _bitmap_bytes_per_slice(0),
 530   _bitmap_region_special(false),
 531   _aux_bitmap_region_special(false),
 532   _liveness_cache(nullptr),
 533   _collection_set(nullptr)
 534 {
 535   // Initialize GC mode early, so we can adjust barrier support
 536   initialize_mode();
 537   BarrierSet::set_barrier_set(new ShenandoahBarrierSet(this));
 538 
 539   _max_workers = MAX2(_max_workers, 1U);
 540   _workers = new ShenandoahWorkerThreads("Shenandoah GC Threads", _max_workers);
 541   if (_workers == nullptr) {
 542     vm_exit_during_initialization("Failed necessary allocation.");
 543   } else {
 544     _workers->initialize_workers();
 545   }
 546 
 547   if (ParallelGCThreads > 1) {
 548     _safepoint_workers = new ShenandoahWorkerThreads("Safepoint Cleanup Thread",
 549                                                 ParallelGCThreads);
 550     _safepoint_workers->initialize_workers();
 551   }
 552 }
 553 
 554 #ifdef _MSC_VER
 555 #pragma warning( pop )
 556 #endif
 557 
 558 class ShenandoahResetBitmapTask : public WorkerTask {
 559 private:
 560   ShenandoahRegionIterator _regions;
 561 
 562 public:
 563   ShenandoahResetBitmapTask() :
 564     WorkerTask("Shenandoah Reset Bitmap") {}
 565 
 566   void work(uint worker_id) {
 567     ShenandoahHeapRegion* region = _regions.next();
 568     ShenandoahHeap* heap = ShenandoahHeap::heap();
 569     ShenandoahMarkingContext* const ctx = heap->marking_context();
 570     while (region != nullptr) {
 571       if (heap->is_bitmap_slice_committed(region)) {
 572         ctx->clear_bitmap(region);
 573       }
 574       region = _regions.next();
 575     }
 576   }
 577 };
 578 
 579 void ShenandoahHeap::reset_mark_bitmap() {
 580   assert_gc_workers(_workers->active_workers());
 581   mark_incomplete_marking_context();
 582 
 583   ShenandoahResetBitmapTask task;
 584   _workers->run_task(&task);
 585 }
 586 
 587 void ShenandoahHeap::print_on(outputStream* st) const {
 588   st->print_cr("Shenandoah Heap");
 589   st->print_cr(" " SIZE_FORMAT "%s max, " SIZE_FORMAT "%s soft max, " SIZE_FORMAT "%s committed, " SIZE_FORMAT "%s used",
 590                byte_size_in_proper_unit(max_capacity()), proper_unit_for_byte_size(max_capacity()),
 591                byte_size_in_proper_unit(soft_max_capacity()), proper_unit_for_byte_size(soft_max_capacity()),
 592                byte_size_in_proper_unit(committed()),    proper_unit_for_byte_size(committed()),
 593                byte_size_in_proper_unit(used()),         proper_unit_for_byte_size(used()));
 594   st->print_cr(" " SIZE_FORMAT " x " SIZE_FORMAT"%s regions",
 595                num_regions(),
 596                byte_size_in_proper_unit(ShenandoahHeapRegion::region_size_bytes()),
 597                proper_unit_for_byte_size(ShenandoahHeapRegion::region_size_bytes()));
 598 
 599   st->print("Status: ");
 600   if (has_forwarded_objects())                 st->print("has forwarded objects, ");
 601   if (is_concurrent_mark_in_progress())        st->print("marking, ");
 602   if (is_evacuation_in_progress())             st->print("evacuating, ");
 603   if (is_update_refs_in_progress())            st->print("updating refs, ");
 604   if (is_degenerated_gc_in_progress())         st->print("degenerated gc, ");
 605   if (is_full_gc_in_progress())                st->print("full gc, ");
 606   if (is_full_gc_move_in_progress())           st->print("full gc move, ");
 607   if (is_concurrent_weak_root_in_progress())   st->print("concurrent weak roots, ");
 608   if (is_concurrent_strong_root_in_progress() &&
 609       !is_concurrent_weak_root_in_progress())  st->print("concurrent strong roots, ");
 610 
 611   if (cancelled_gc()) {
 612     st->print("cancelled");
 613   } else {
 614     st->print("not cancelled");
 615   }
 616   st->cr();
 617 
 618   st->print_cr("Reserved region:");
 619   st->print_cr(" - [" PTR_FORMAT ", " PTR_FORMAT ") ",
 620                p2i(reserved_region().start()),
 621                p2i(reserved_region().end()));
 622 
 623   ShenandoahCollectionSet* cset = collection_set();
 624   st->print_cr("Collection set:");
 625   if (cset != nullptr) {
 626     st->print_cr(" - map (vanilla): " PTR_FORMAT, p2i(cset->map_address()));
 627     st->print_cr(" - map (biased):  " PTR_FORMAT, p2i(cset->biased_map_address()));
 628   } else {
 629     st->print_cr(" (null)");
 630   }
 631 
 632   st->cr();
 633   MetaspaceUtils::print_on(st);
 634 
 635   if (Verbose) {
 636     st->cr();
 637     print_heap_regions_on(st);
 638   }
 639 }
 640 
 641 class ShenandoahInitWorkerGCLABClosure : public ThreadClosure {
 642 public:
 643   void do_thread(Thread* thread) {
 644     assert(thread != nullptr, "Sanity");
 645     assert(thread->is_Worker_thread(), "Only worker thread expected");
 646     ShenandoahThreadLocalData::initialize_gclab(thread);
 647   }
 648 };
 649 
 650 void ShenandoahHeap::post_initialize() {
 651   CollectedHeap::post_initialize();
 652   MutexLocker ml(Threads_lock);
 653 
 654   ShenandoahInitWorkerGCLABClosure init_gclabs;
 655   _workers->threads_do(&init_gclabs);
 656 
 657   // gclab can not be initialized early during VM startup, as it can not determinate its max_size.
 658   // Now, we will let WorkerThreads to initialize gclab when new worker is created.
 659   _workers->set_initialize_gclab();
 660   if (_safepoint_workers != nullptr) {
 661     _safepoint_workers->threads_do(&init_gclabs);
 662     _safepoint_workers->set_initialize_gclab();
 663   }
 664 
 665   _heuristics->initialize();
 666 
 667   JFR_ONLY(ShenandoahJFRSupport::register_jfr_type_serializers();)
 668 }
 669 
 670 size_t ShenandoahHeap::used() const {
 671   return Atomic::load(&_used);
 672 }
 673 
 674 size_t ShenandoahHeap::committed() const {
 675   return Atomic::load(&_committed);
 676 }
 677 
 678 size_t ShenandoahHeap::available() const {
 679   return free_set()->available();
 680 }
 681 
 682 void ShenandoahHeap::increase_committed(size_t bytes) {
 683   shenandoah_assert_heaplocked_or_safepoint();
 684   _committed += bytes;
 685 }
 686 
 687 void ShenandoahHeap::decrease_committed(size_t bytes) {
 688   shenandoah_assert_heaplocked_or_safepoint();
 689   _committed -= bytes;
 690 }
 691 
 692 void ShenandoahHeap::increase_used(size_t bytes) {
 693   Atomic::add(&_used, bytes, memory_order_relaxed);
 694 }
 695 
 696 void ShenandoahHeap::set_used(size_t bytes) {
 697   Atomic::store(&_used, bytes);
 698 }
 699 
 700 void ShenandoahHeap::decrease_used(size_t bytes) {
 701   assert(used() >= bytes, "never decrease heap size by more than we've left");
 702   Atomic::sub(&_used, bytes, memory_order_relaxed);
 703 }
 704 
 705 void ShenandoahHeap::increase_allocated(size_t bytes) {
 706   Atomic::add(&_bytes_allocated_since_gc_start, bytes, memory_order_relaxed);
 707 }
 708 
 709 void ShenandoahHeap::notify_mutator_alloc_words(size_t words, bool waste) {
 710   size_t bytes = words * HeapWordSize;
 711   if (!waste) {
 712     increase_used(bytes);
 713   }
 714   increase_allocated(bytes);
 715   if (ShenandoahPacing) {
 716     control_thread()->pacing_notify_alloc(words);
 717     if (waste) {
 718       pacer()->claim_for_alloc(words, true);
 719     }
 720   }
 721 }
 722 
 723 size_t ShenandoahHeap::capacity() const {
 724   return committed();
 725 }
 726 
 727 size_t ShenandoahHeap::max_capacity() const {
 728   return _num_regions * ShenandoahHeapRegion::region_size_bytes();
 729 }
 730 
 731 size_t ShenandoahHeap::soft_max_capacity() const {
 732   size_t v = Atomic::load(&_soft_max_size);
 733   assert(min_capacity() <= v && v <= max_capacity(),
 734          "Should be in bounds: " SIZE_FORMAT " <= " SIZE_FORMAT " <= " SIZE_FORMAT,
 735          min_capacity(), v, max_capacity());
 736   return v;
 737 }
 738 
 739 void ShenandoahHeap::set_soft_max_capacity(size_t v) {
 740   assert(min_capacity() <= v && v <= max_capacity(),
 741          "Should be in bounds: " SIZE_FORMAT " <= " SIZE_FORMAT " <= " SIZE_FORMAT,
 742          min_capacity(), v, max_capacity());
 743   Atomic::store(&_soft_max_size, v);
 744 }
 745 
 746 size_t ShenandoahHeap::min_capacity() const {
 747   return _minimum_size;
 748 }
 749 
 750 size_t ShenandoahHeap::initial_capacity() const {
 751   return _initial_size;
 752 }
 753 
 754 bool ShenandoahHeap::is_in(const void* p) const {
 755   HeapWord* heap_base = (HeapWord*) base();
 756   HeapWord* last_region_end = heap_base + ShenandoahHeapRegion::region_size_words() * num_regions();
 757   return p >= heap_base && p < last_region_end;
 758 }
 759 
 760 void ShenandoahHeap::maybe_uncommit(double shrink_before, size_t shrink_until) {
 761   assert (ShenandoahUncommit, "should be enabled");
 762 
 763   // Determine if there is work to do. This avoids taking heap lock if there is
 764   // no work available, avoids spamming logs with superfluous logging messages,
 765   // and minimises the amount of work while locks are taken.
 766 
 767   if (committed() <= shrink_until) return;
 768 
 769   bool has_work = false;
 770   for (size_t i = 0; i < num_regions(); i++) {
 771     ShenandoahHeapRegion* r = get_region(i);
 772     if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
 773       has_work = true;
 774       break;
 775     }
 776   }
 777 
 778   if (has_work) {
 779     static const char* msg = "Concurrent uncommit";
 780     ShenandoahConcurrentPhase gcPhase(msg, ShenandoahPhaseTimings::conc_uncommit, true /* log_heap_usage */);
 781     EventMark em("%s", msg);
 782 
 783     op_uncommit(shrink_before, shrink_until);
 784   }
 785 }
 786 
 787 void ShenandoahHeap::op_uncommit(double shrink_before, size_t shrink_until) {
 788   assert (ShenandoahUncommit, "should be enabled");
 789 
 790   // Application allocates from the beginning of the heap, and GC allocates at
 791   // the end of it. It is more efficient to uncommit from the end, so that applications
 792   // could enjoy the near committed regions. GC allocations are much less frequent,
 793   // and therefore can accept the committing costs.
 794 
 795   size_t count = 0;
 796   for (size_t i = num_regions(); i > 0; i--) { // care about size_t underflow
 797     ShenandoahHeapRegion* r = get_region(i - 1);
 798     if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
 799       ShenandoahHeapLocker locker(lock());
 800       if (r->is_empty_committed()) {
 801         if (committed() < shrink_until + ShenandoahHeapRegion::region_size_bytes()) {
 802           break;
 803         }
 804 
 805         r->make_uncommitted();
 806         count++;
 807       }
 808     }
 809     SpinPause(); // allow allocators to take the lock
 810   }
 811 
 812   if (count > 0) {
 813     notify_heap_changed();
 814   }
 815 }
 816 
 817 bool ShenandoahHeap::check_soft_max_changed() {
 818   size_t new_soft_max = Atomic::load(&SoftMaxHeapSize);
 819   size_t old_soft_max = soft_max_capacity();
 820   if (new_soft_max != old_soft_max) {
 821     new_soft_max = MAX2(min_capacity(), new_soft_max);
 822     new_soft_max = MIN2(max_capacity(), new_soft_max);
 823     if (new_soft_max != old_soft_max) {
 824       log_info(gc)("Soft Max Heap Size: " SIZE_FORMAT "%s -> " SIZE_FORMAT "%s",
 825                    byte_size_in_proper_unit(old_soft_max), proper_unit_for_byte_size(old_soft_max),
 826                    byte_size_in_proper_unit(new_soft_max), proper_unit_for_byte_size(new_soft_max)
 827       );
 828       set_soft_max_capacity(new_soft_max);
 829       return true;
 830     }
 831   }
 832   return false;
 833 }
 834 
 835 void ShenandoahHeap::notify_heap_changed() {
 836   // Update monitoring counters when we took a new region. This amortizes the
 837   // update costs on slow path.
 838   monitoring_support()->notify_heap_changed();
 839 
 840   // This is called from allocation path, and thus should be fast.
 841   _heap_changed.try_set();
 842 }
 843 
 844 void ShenandoahHeap::set_forced_counters_update(bool value) {
 845   monitoring_support()->set_forced_counters_update(value);
 846 }
 847 
 848 void ShenandoahHeap::handle_force_counters_update() {
 849   monitoring_support()->handle_force_counters_update();
 850 }
 851 
 852 HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) {
 853   // New object should fit the GCLAB size
 854   size_t min_size = MAX2(size, PLAB::min_size());
 855 
 856   // Figure out size of new GCLAB, looking back at heuristics. Expand aggressively.
 857   size_t new_size = ShenandoahThreadLocalData::gclab_size(thread) * 2;
 858   new_size = MIN2(new_size, PLAB::max_size());
 859   new_size = MAX2(new_size, PLAB::min_size());
 860 
 861   // Record new heuristic value even if we take any shortcut. This captures
 862   // the case when moderately-sized objects always take a shortcut. At some point,
 863   // heuristics should catch up with them.
 864   ShenandoahThreadLocalData::set_gclab_size(thread, new_size);
 865 
 866   if (new_size < size) {
 867     // New size still does not fit the object. Fall back to shared allocation.
 868     // This avoids retiring perfectly good GCLABs, when we encounter a large object.
 869     return nullptr;
 870   }
 871 
 872   // Retire current GCLAB, and allocate a new one.
 873   PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
 874   gclab->retire();
 875 
 876   size_t actual_size = 0;
 877   HeapWord* gclab_buf = allocate_new_gclab(min_size, new_size, &actual_size);
 878   if (gclab_buf == nullptr) {
 879     return nullptr;
 880   }
 881 
 882   assert (size <= actual_size, "allocation should fit");
 883 
 884   // ...and clear or zap just allocated TLAB, if needed.
 885   if (ZeroTLAB) {
 886     Copy::zero_to_words(gclab_buf, actual_size);
 887   } else if (ZapTLAB) {
 888     // Skip mangling the space corresponding to the object header to
 889     // ensure that the returned space is not considered parsable by
 890     // any concurrent GC thread.
 891     size_t hdr_size = oopDesc::header_size();
 892     Copy::fill_to_words(gclab_buf + hdr_size, actual_size - hdr_size, badHeapWordVal);
 893   }
 894   gclab->set_buf(gclab_buf, actual_size);
 895   return gclab->allocate(size);
 896 }
 897 
 898 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t min_size,
 899                                             size_t requested_size,
 900                                             size_t* actual_size) {
 901   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_tlab(min_size, requested_size);
 902   HeapWord* res = allocate_memory(req);
 903   if (res != nullptr) {
 904     *actual_size = req.actual_size();
 905   } else {
 906     *actual_size = 0;
 907   }
 908   return res;
 909 }
 910 
 911 HeapWord* ShenandoahHeap::allocate_new_gclab(size_t min_size,
 912                                              size_t word_size,
 913                                              size_t* actual_size) {
 914   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_gclab(min_size, word_size);
 915   HeapWord* res = allocate_memory(req);
 916   if (res != nullptr) {
 917     *actual_size = req.actual_size();
 918   } else {
 919     *actual_size = 0;
 920   }
 921   return res;
 922 }
 923 
 924 HeapWord* ShenandoahHeap::allocate_memory(ShenandoahAllocRequest& req) {
 925   intptr_t pacer_epoch = 0;
 926   bool in_new_region = false;
 927   HeapWord* result = nullptr;
 928 
 929   if (req.is_mutator_alloc()) {
 930     if (ShenandoahPacing) {
 931       pacer()->pace_for_alloc(req.size());
 932       pacer_epoch = pacer()->epoch();
 933     }
 934 
 935     if (!ShenandoahAllocFailureALot || !should_inject_alloc_failure()) {
 936       result = allocate_memory_under_lock(req, in_new_region);
 937     }
 938 
 939     // Check that gc overhead is not exceeded.
 940     //
 941     // Shenandoah will grind along for quite a while allocating one
 942     // object at a time using shared (non-tlab) allocations. This check
 943     // is testing that the GC overhead limit has not been exceeded.
 944     // This will notify the collector to start a cycle, but will raise
 945     // an OOME to the mutator if the last Full GCs have not made progress.
 946     if (result == nullptr && !req.is_lab_alloc() && get_gc_no_progress_count() > ShenandoahNoProgressThreshold) {
 947       control_thread()->handle_alloc_failure(req, false);
 948       return nullptr;
 949     }
 950 
 951     // Block until control thread reacted, then retry allocation.
 952     //
 953     // It might happen that one of the threads requesting allocation would unblock
 954     // way later after GC happened, only to fail the second allocation, because
 955     // other threads have already depleted the free storage. In this case, a better
 956     // strategy is to try again, as long as GC makes progress (or until at least
 957     // one full GC has completed).
 958     size_t original_count = shenandoah_policy()->full_gc_count();
 959     while (result == nullptr
 960         && (get_gc_no_progress_count() == 0 || original_count == shenandoah_policy()->full_gc_count())) {
 961       control_thread()->handle_alloc_failure(req, true);
 962       result = allocate_memory_under_lock(req, in_new_region);
 963     }
 964 
 965     if (log_is_enabled(Debug, gc, alloc)) {
 966       ResourceMark rm;
 967       log_debug(gc, alloc)("Thread: %s, Result: " PTR_FORMAT ", Request: %s, Size: " SIZE_FORMAT ", Original: " SIZE_FORMAT ", Latest: " SIZE_FORMAT,
 968                            Thread::current()->name(), p2i(result), req.type_string(), req.size(), original_count, get_gc_no_progress_count());
 969     }
 970   } else {
 971     assert(req.is_gc_alloc(), "Can only accept GC allocs here");
 972     result = allocate_memory_under_lock(req, in_new_region);
 973     // Do not call handle_alloc_failure() here, because we cannot block.
 974     // The allocation failure would be handled by the LRB slowpath with handle_alloc_failure_evac().
 975   }
 976 
 977   if (in_new_region) {
 978     notify_heap_changed();
 979   }
 980 
 981   if (result != nullptr) {
 982     size_t requested = req.size();
 983     size_t actual = req.actual_size();
 984 
 985     assert (req.is_lab_alloc() || (requested == actual),
 986             "Only LAB allocations are elastic: %s, requested = " SIZE_FORMAT ", actual = " SIZE_FORMAT,
 987             ShenandoahAllocRequest::alloc_type_to_string(req.type()), requested, actual);
 988 
 989     if (req.is_mutator_alloc()) {
 990       notify_mutator_alloc_words(actual, false);
 991 
 992       // If we requested more than we were granted, give the rest back to pacer.
 993       // This only matters if we are in the same pacing epoch: do not try to unpace
 994       // over the budget for the other phase.
 995       if (ShenandoahPacing && (pacer_epoch > 0) && (requested > actual)) {
 996         pacer()->unpace_for_alloc(pacer_epoch, requested - actual);
 997       }
 998     } else {
 999       increase_used(actual*HeapWordSize);
1000     }
1001   }
1002 
1003   return result;
1004 }
1005 
1006 HeapWord* ShenandoahHeap::allocate_memory_under_lock(ShenandoahAllocRequest& req, bool& in_new_region) {
1007   // If we are dealing with mutator allocation, then we may need to block for safepoint.
1008   // We cannot block for safepoint for GC allocations, because there is a high chance
1009   // we are already running at safepoint or from stack watermark machinery, and we cannot
1010   // block again.
1011   ShenandoahHeapLocker locker(lock(), req.is_mutator_alloc());
1012   return _free_set->allocate(req, in_new_region);
1013 }
1014 
1015 HeapWord* ShenandoahHeap::mem_allocate(size_t size,
1016                                         bool*  gc_overhead_limit_was_exceeded) {
1017   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared(size);
1018   return allocate_memory(req);
1019 }
1020 
1021 MetaWord* ShenandoahHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
1022                                                              size_t size,
1023                                                              Metaspace::MetadataType mdtype) {
1024   MetaWord* result;
1025 
1026   // Inform metaspace OOM to GC heuristics if class unloading is possible.
1027   if (heuristics()->can_unload_classes()) {
1028     ShenandoahHeuristics* h = heuristics();
1029     h->record_metaspace_oom();
1030   }
1031 
1032   // Expand and retry allocation
1033   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
1034   if (result != nullptr) {
1035     return result;
1036   }
1037 
1038   // Start full GC
1039   collect(GCCause::_metadata_GC_clear_soft_refs);
1040 
1041   // Retry allocation
1042   result = loader_data->metaspace_non_null()->allocate(size, mdtype);
1043   if (result != nullptr) {
1044     return result;
1045   }
1046 
1047   // Expand and retry allocation
1048   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
1049   if (result != nullptr) {
1050     return result;
1051   }
1052 
1053   // Out of memory
1054   return nullptr;
1055 }
1056 
1057 class ShenandoahConcurrentEvacuateRegionObjectClosure : public ObjectClosure {
1058 private:
1059   ShenandoahHeap* const _heap;
1060   Thread* const _thread;
1061 public:
1062   ShenandoahConcurrentEvacuateRegionObjectClosure(ShenandoahHeap* heap) :
1063     _heap(heap), _thread(Thread::current()) {}
1064 
1065   void do_object(oop p) {
1066     shenandoah_assert_marked(nullptr, p);
1067     if (!p->is_forwarded()) {
1068       _heap->evacuate_object(p, _thread);
1069     }
1070   }
1071 };
1072 
1073 class ShenandoahEvacuationTask : public WorkerTask {
1074 private:
1075   ShenandoahHeap* const _sh;
1076   ShenandoahCollectionSet* const _cs;
1077   bool _concurrent;
1078 public:
1079   ShenandoahEvacuationTask(ShenandoahHeap* sh,
1080                            ShenandoahCollectionSet* cs,
1081                            bool concurrent) :
1082     WorkerTask("Shenandoah Evacuation"),
1083     _sh(sh),
1084     _cs(cs),
1085     _concurrent(concurrent)
1086   {}
1087 
1088   void work(uint worker_id) {
1089     if (_concurrent) {
1090       ShenandoahConcurrentWorkerSession worker_session(worker_id);
1091       ShenandoahSuspendibleThreadSetJoiner stsj;
1092       ShenandoahEvacOOMScope oom_evac_scope;
1093       do_work();
1094     } else {
1095       ShenandoahParallelWorkerSession worker_session(worker_id);
1096       ShenandoahEvacOOMScope oom_evac_scope;
1097       do_work();
1098     }
1099   }
1100 
1101 private:
1102   void do_work() {
1103     ShenandoahConcurrentEvacuateRegionObjectClosure cl(_sh);
1104     ShenandoahHeapRegion* r;
1105     while ((r =_cs->claim_next()) != nullptr) {
1106       assert(r->has_live(), "Region " SIZE_FORMAT " should have been reclaimed early", r->index());
1107       _sh->marked_object_iterate(r, &cl);
1108 
1109       if (ShenandoahPacing) {
1110         _sh->pacer()->report_evac(r->used() >> LogHeapWordSize);
1111       }
1112 
1113       if (_sh->check_cancelled_gc_and_yield(_concurrent)) {
1114         break;
1115       }
1116     }
1117   }
1118 };
1119 
1120 void ShenandoahHeap::evacuate_collection_set(bool concurrent) {
1121   ShenandoahEvacuationTask task(this, _collection_set, concurrent);
1122   workers()->run_task(&task);
1123 }
1124 
1125 oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) {
1126   if (ShenandoahThreadLocalData::is_oom_during_evac(Thread::current())) {
1127     // This thread went through the OOM during evac protocol and it is safe to return
1128     // the forward pointer. It must not attempt to evacuate any more.
1129     return ShenandoahBarrierSet::resolve_forwarded(p);
1130   }
1131 
1132   assert(ShenandoahThreadLocalData::is_evac_allowed(thread), "must be enclosed in oom-evac scope");
1133 
1134   size_t size = p->forward_safe_size();
1135 
1136   assert(!heap_region_containing(p)->is_humongous(), "never evacuate humongous objects");
1137 
1138   bool alloc_from_gclab = true;
1139   HeapWord* copy = nullptr;
1140 
1141 #ifdef ASSERT
1142   if (ShenandoahOOMDuringEvacALot &&
1143       (os::random() & 1) == 0) { // Simulate OOM every ~2nd slow-path call
1144     copy = nullptr;
1145   } else {
1146 #endif
1147     if (UseTLAB) {
1148       copy = allocate_from_gclab(thread, size);
1149     }
1150     if (copy == nullptr) {
1151       ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared_gc(size);
1152       copy = allocate_memory(req);
1153       alloc_from_gclab = false;
1154     }
1155 #ifdef ASSERT
1156   }
1157 #endif
1158 
1159   if (copy == nullptr) {
1160     control_thread()->handle_alloc_failure_evac(size);
1161 
1162     _oom_evac_handler.handle_out_of_memory_during_evacuation();
1163 
1164     return ShenandoahBarrierSet::resolve_forwarded(p);
1165   }
1166 
1167   // Copy the object:
1168   Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(p), copy, size);
1169   oop copy_val = cast_to_oop(copy);
1170 
1171   if (UseCompactObjectHeaders) {
1172     // The copy above is not atomic. Make sure we have seen the proper mark
1173     // and re-install it into the copy, so that Klass* is guaranteed to be correct.
1174     markWord mark = copy_val->mark();
1175     if (!mark.is_marked()) {
1176       copy_val->set_mark(mark);
1177       ContinuationGCSupport::relativize_stack_chunk(copy_val);
1178     } else {
1179       // If we copied a mark-word that indicates 'forwarded' state, the object
1180       // installation would not succeed. We cannot access Klass* anymore either.
1181       // Skip the transformation.
1182     }
1183   } else {
1184     ContinuationGCSupport::relativize_stack_chunk(copy_val);
1185   }
1186 
1187   // Try to install the new forwarding pointer.
1188   oop result = ShenandoahForwarding::try_update_forwardee(p, copy_val);
1189   if (result == copy_val) {
1190     // Successfully evacuated. Our copy is now the public one!
1191     shenandoah_assert_correct(nullptr, copy_val);
1192     return copy_val;
1193   }  else {
1194     // Failed to evacuate. We need to deal with the object that is left behind. Since this
1195     // new allocation is certainly after TAMS, it will be considered live in the next cycle.
1196     // But if it happens to contain references to evacuated regions, those references would
1197     // not get updated for this stale copy during this cycle, and we will crash while scanning
1198     // it the next cycle.
1199     //
1200     // For GCLAB allocations, it is enough to rollback the allocation ptr. Either the next
1201     // object will overwrite this stale copy, or the filler object on LAB retirement will
1202     // do this. For non-GCLAB allocations, we have no way to retract the allocation, and
1203     // have to explicitly overwrite the copy with the filler object. With that overwrite,
1204     // we have to keep the fwdptr initialized and pointing to our (stale) copy.
1205     if (alloc_from_gclab) {
1206       ShenandoahThreadLocalData::gclab(thread)->undo_allocation(copy, size);
1207     } else {
1208       fill_with_object(copy, size);
1209       shenandoah_assert_correct(nullptr, copy_val);
1210     }
1211     shenandoah_assert_correct(nullptr, result);
1212     return result;
1213   }
1214 }
1215 
1216 void ShenandoahHeap::trash_cset_regions() {
1217   ShenandoahHeapLocker locker(lock());
1218 
1219   ShenandoahCollectionSet* set = collection_set();
1220   ShenandoahHeapRegion* r;
1221   set->clear_current_index();
1222   while ((r = set->next()) != nullptr) {
1223     r->make_trash();
1224   }
1225   collection_set()->clear();
1226 }
1227 
1228 void ShenandoahHeap::print_heap_regions_on(outputStream* st) const {
1229   st->print_cr("Heap Regions:");
1230   st->print_cr("Region state: EU=empty-uncommitted, EC=empty-committed, R=regular, H=humongous start, HP=pinned humongous start");
1231   st->print_cr("              HC=humongous continuation, CS=collection set, TR=trash, P=pinned, CSP=pinned collection set");
1232   st->print_cr("BTE=bottom/top/end, TAMS=top-at-mark-start");
1233   st->print_cr("UWM=update watermark, U=used");
1234   st->print_cr("T=TLAB allocs, G=GCLAB allocs");
1235   st->print_cr("S=shared allocs, L=live data");
1236   st->print_cr("CP=critical pins");
1237 
1238   for (size_t i = 0; i < num_regions(); i++) {
1239     get_region(i)->print_on(st);
1240   }
1241 }
1242 
1243 void ShenandoahHeap::trash_humongous_region_at(ShenandoahHeapRegion* start) {
1244   assert(start->is_humongous_start(), "reclaim regions starting with the first one");
1245 
1246   oop humongous_obj = cast_to_oop(start->bottom());
1247   size_t size = humongous_obj->size();
1248   size_t required_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize);
1249   size_t index = start->index() + required_regions - 1;
1250 
1251   assert(!start->has_live(), "liveness must be zero");
1252 
1253   for(size_t i = 0; i < required_regions; i++) {
1254     // Reclaim from tail. Otherwise, assertion fails when printing region to trace log,
1255     // as it expects that every region belongs to a humongous region starting with a humongous start region.
1256     ShenandoahHeapRegion* region = get_region(index --);
1257 
1258     assert(region->is_humongous(), "expect correct humongous start or continuation");
1259     assert(!region->is_cset(), "Humongous region should not be in collection set");
1260 
1261     region->make_trash_immediate();
1262   }
1263 }
1264 
1265 class ShenandoahCheckCleanGCLABClosure : public ThreadClosure {
1266 public:
1267   ShenandoahCheckCleanGCLABClosure() {}
1268   void do_thread(Thread* thread) {
1269     PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1270     assert(gclab != nullptr, "GCLAB should be initialized for %s", thread->name());
1271     assert(gclab->words_remaining() == 0, "GCLAB should not need retirement");
1272   }
1273 };
1274 
1275 class ShenandoahRetireGCLABClosure : public ThreadClosure {
1276 private:
1277   bool const _resize;
1278 public:
1279   ShenandoahRetireGCLABClosure(bool resize) : _resize(resize) {}
1280   void do_thread(Thread* thread) {
1281     PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1282     assert(gclab != nullptr, "GCLAB should be initialized for %s", thread->name());
1283     gclab->retire();
1284     if (_resize && ShenandoahThreadLocalData::gclab_size(thread) > 0) {
1285       ShenandoahThreadLocalData::set_gclab_size(thread, 0);
1286     }
1287   }
1288 };
1289 
1290 void ShenandoahHeap::labs_make_parsable() {
1291   assert(UseTLAB, "Only call with UseTLAB");
1292 
1293   ShenandoahRetireGCLABClosure cl(false);
1294 
1295   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1296     ThreadLocalAllocBuffer& tlab = t->tlab();
1297     tlab.make_parsable();
1298     cl.do_thread(t);
1299   }
1300 
1301   workers()->threads_do(&cl);
1302 }
1303 
1304 void ShenandoahHeap::tlabs_retire(bool resize) {
1305   assert(UseTLAB, "Only call with UseTLAB");
1306   assert(!resize || ResizeTLAB, "Only call for resize when ResizeTLAB is enabled");
1307 
1308   ThreadLocalAllocStats stats;
1309 
1310   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1311     ThreadLocalAllocBuffer& tlab = t->tlab();
1312     tlab.retire(&stats);
1313     if (resize) {
1314       tlab.resize();
1315     }
1316   }
1317 
1318   stats.publish();
1319 
1320 #ifdef ASSERT
1321   ShenandoahCheckCleanGCLABClosure cl;
1322   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1323     cl.do_thread(t);
1324   }
1325   workers()->threads_do(&cl);
1326 #endif
1327 }
1328 
1329 void ShenandoahHeap::gclabs_retire(bool resize) {
1330   assert(UseTLAB, "Only call with UseTLAB");
1331   assert(!resize || ResizeTLAB, "Only call for resize when ResizeTLAB is enabled");
1332 
1333   ShenandoahRetireGCLABClosure cl(resize);
1334   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1335     cl.do_thread(t);
1336   }
1337   workers()->threads_do(&cl);
1338 
1339   if (safepoint_workers() != nullptr) {
1340     safepoint_workers()->threads_do(&cl);
1341   }
1342 }
1343 
1344 // Returns size in bytes
1345 size_t ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const {
1346   // Return the max allowed size, and let the allocation path
1347   // figure out the safe size for current allocation.
1348   return ShenandoahHeapRegion::max_tlab_size_bytes();
1349 }
1350 
1351 size_t ShenandoahHeap::max_tlab_size() const {
1352   // Returns size in words
1353   return ShenandoahHeapRegion::max_tlab_size_words();
1354 }
1355 
1356 void ShenandoahHeap::collect(GCCause::Cause cause) {
1357   control_thread()->request_gc(cause);
1358 }
1359 
1360 void ShenandoahHeap::do_full_collection(bool clear_all_soft_refs) {
1361   //assert(false, "Shouldn't need to do full collections");
1362 }
1363 
1364 HeapWord* ShenandoahHeap::block_start(const void* addr) const {
1365   ShenandoahHeapRegion* r = heap_region_containing(addr);
1366   if (r != nullptr) {
1367     return r->block_start(addr);
1368   }
1369   return nullptr;
1370 }
1371 
1372 bool ShenandoahHeap::block_is_obj(const HeapWord* addr) const {
1373   ShenandoahHeapRegion* r = heap_region_containing(addr);
1374   return r->block_is_obj(addr);
1375 }
1376 
1377 bool ShenandoahHeap::print_location(outputStream* st, void* addr) const {
1378   return BlockLocationPrinter<ShenandoahHeap>::print_location(st, addr);
1379 }
1380 
1381 void ShenandoahHeap::prepare_for_verify() {
1382   if (SafepointSynchronize::is_at_safepoint() && UseTLAB) {
1383     labs_make_parsable();
1384   }
1385 }
1386 
1387 void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const {
1388   if (_shenandoah_policy->is_at_shutdown()) {
1389     return;
1390   }
1391 
1392   if (_control_thread != nullptr) {
1393     tcl->do_thread(_control_thread);
1394   }
1395 
1396   workers()->threads_do(tcl);
1397   if (_safepoint_workers != nullptr) {
1398     _safepoint_workers->threads_do(tcl);
1399   }
1400 }
1401 
1402 void ShenandoahHeap::print_tracing_info() const {
1403   LogTarget(Info, gc, stats) lt;
1404   if (lt.is_enabled()) {
1405     ResourceMark rm;
1406     LogStream ls(lt);
1407 
1408     phase_timings()->print_global_on(&ls);
1409 
1410     ls.cr();
1411     ls.cr();
1412 
1413     shenandoah_policy()->print_gc_stats(&ls);
1414 
1415     ls.cr();
1416     ls.cr();
1417   }
1418 }
1419 
1420 void ShenandoahHeap::verify(VerifyOption vo) {
1421   if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
1422     if (ShenandoahVerify) {
1423       verifier()->verify_generic(vo);
1424     } else {
1425       // TODO: Consider allocating verification bitmaps on demand,
1426       // and turn this on unconditionally.
1427     }
1428   }
1429 }
1430 size_t ShenandoahHeap::tlab_capacity(Thread *thr) const {
1431   return _free_set->capacity();
1432 }
1433 
1434 class ObjectIterateScanRootClosure : public BasicOopIterateClosure {
1435 private:
1436   MarkBitMap* _bitmap;
1437   ShenandoahScanObjectStack* _oop_stack;
1438   ShenandoahHeap* const _heap;
1439   ShenandoahMarkingContext* const _marking_context;
1440 
1441   template <class T>
1442   void do_oop_work(T* p) {
1443     T o = RawAccess<>::oop_load(p);
1444     if (!CompressedOops::is_null(o)) {
1445       oop obj = CompressedOops::decode_not_null(o);
1446       if (_heap->is_concurrent_weak_root_in_progress() && !_marking_context->is_marked(obj)) {
1447         // There may be dead oops in weak roots in concurrent root phase, do not touch them.
1448         return;
1449       }
1450       obj = ShenandoahBarrierSet::barrier_set()->load_reference_barrier(obj);
1451 
1452       assert(oopDesc::is_oop(obj), "must be a valid oop");
1453       if (!_bitmap->is_marked(obj)) {
1454         _bitmap->mark(obj);
1455         _oop_stack->push(obj);
1456       }
1457     }
1458   }
1459 public:
1460   ObjectIterateScanRootClosure(MarkBitMap* bitmap, ShenandoahScanObjectStack* oop_stack) :
1461     _bitmap(bitmap), _oop_stack(oop_stack), _heap(ShenandoahHeap::heap()),
1462     _marking_context(_heap->marking_context()) {}
1463   void do_oop(oop* p)       { do_oop_work(p); }
1464   void do_oop(narrowOop* p) { do_oop_work(p); }
1465 };
1466 
1467 /*
1468  * This is public API, used in preparation of object_iterate().
1469  * Since we don't do linear scan of heap in object_iterate() (see comment below), we don't
1470  * need to make the heap parsable. For Shenandoah-internal linear heap scans that we can
1471  * control, we call SH::tlabs_retire, SH::gclabs_retire.
1472  */
1473 void ShenandoahHeap::ensure_parsability(bool retire_tlabs) {
1474   // No-op.
1475 }
1476 
1477 /*
1478  * Iterates objects in the heap. This is public API, used for, e.g., heap dumping.
1479  *
1480  * We cannot safely iterate objects by doing a linear scan at random points in time. Linear
1481  * scanning needs to deal with dead objects, which may have dead Klass* pointers (e.g.
1482  * calling oopDesc::size() would crash) or dangling reference fields (crashes) etc. Linear
1483  * scanning therefore depends on having a valid marking bitmap to support it. However, we only
1484  * have a valid marking bitmap after successful marking. In particular, we *don't* have a valid
1485  * marking bitmap during marking, after aborted marking or during/after cleanup (when we just
1486  * wiped the bitmap in preparation for next marking).
1487  *
1488  * For all those reasons, we implement object iteration as a single marking traversal, reporting
1489  * objects as we mark+traverse through the heap, starting from GC roots. JVMTI IterateThroughHeap
1490  * is allowed to report dead objects, but is not required to do so.
1491  */
1492 void ShenandoahHeap::object_iterate(ObjectClosure* cl) {
1493   // Reset bitmap
1494   if (!prepare_aux_bitmap_for_iteration())
1495     return;
1496 
1497   ShenandoahScanObjectStack oop_stack;
1498   ObjectIterateScanRootClosure oops(&_aux_bit_map, &oop_stack);
1499   // Seed the stack with root scan
1500   scan_roots_for_iteration(&oop_stack, &oops);
1501 
1502   // Work through the oop stack to traverse heap
1503   while (! oop_stack.is_empty()) {
1504     oop obj = oop_stack.pop();
1505     assert(oopDesc::is_oop(obj), "must be a valid oop");
1506     cl->do_object(obj);
1507     obj->oop_iterate(&oops);
1508   }
1509 
1510   assert(oop_stack.is_empty(), "should be empty");
1511   // Reclaim bitmap
1512   reclaim_aux_bitmap_for_iteration();
1513 }
1514 
1515 bool ShenandoahHeap::prepare_aux_bitmap_for_iteration() {
1516   assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1517 
1518   if (!_aux_bitmap_region_special && !os::commit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size(), false)) {
1519     log_warning(gc)("Could not commit native memory for auxiliary marking bitmap for heap iteration");
1520     return false;
1521   }
1522   // Reset bitmap
1523   _aux_bit_map.clear();
1524   return true;
1525 }
1526 
1527 void ShenandoahHeap::scan_roots_for_iteration(ShenandoahScanObjectStack* oop_stack, ObjectIterateScanRootClosure* oops) {
1528   // Process GC roots according to current GC cycle
1529   // This populates the work stack with initial objects
1530   // It is important to relinquish the associated locks before diving
1531   // into heap dumper
1532   uint n_workers = safepoint_workers() != nullptr ? safepoint_workers()->active_workers() : 1;
1533   ShenandoahHeapIterationRootScanner rp(n_workers);
1534   rp.roots_do(oops);
1535 }
1536 
1537 void ShenandoahHeap::reclaim_aux_bitmap_for_iteration() {
1538   if (!_aux_bitmap_region_special && !os::uncommit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size())) {
1539     log_warning(gc)("Could not uncommit native memory for auxiliary marking bitmap for heap iteration");
1540   }
1541 }
1542 
1543 // Closure for parallelly iterate objects
1544 class ShenandoahObjectIterateParScanClosure : public BasicOopIterateClosure {
1545 private:
1546   MarkBitMap* _bitmap;
1547   ShenandoahObjToScanQueue* _queue;
1548   ShenandoahHeap* const _heap;
1549   ShenandoahMarkingContext* const _marking_context;
1550 
1551   template <class T>
1552   void do_oop_work(T* p) {
1553     T o = RawAccess<>::oop_load(p);
1554     if (!CompressedOops::is_null(o)) {
1555       oop obj = CompressedOops::decode_not_null(o);
1556       if (_heap->is_concurrent_weak_root_in_progress() && !_marking_context->is_marked(obj)) {
1557         // There may be dead oops in weak roots in concurrent root phase, do not touch them.
1558         return;
1559       }
1560       obj = ShenandoahBarrierSet::barrier_set()->load_reference_barrier(obj);
1561 
1562       assert(oopDesc::is_oop(obj), "Must be a valid oop");
1563       if (_bitmap->par_mark(obj)) {
1564         _queue->push(ShenandoahMarkTask(obj));
1565       }
1566     }
1567   }
1568 public:
1569   ShenandoahObjectIterateParScanClosure(MarkBitMap* bitmap, ShenandoahObjToScanQueue* q) :
1570     _bitmap(bitmap), _queue(q), _heap(ShenandoahHeap::heap()),
1571     _marking_context(_heap->marking_context()) {}
1572   void do_oop(oop* p)       { do_oop_work(p); }
1573   void do_oop(narrowOop* p) { do_oop_work(p); }
1574 };
1575 
1576 // Object iterator for parallel heap iteraion.
1577 // The root scanning phase happenes in construction as a preparation of
1578 // parallel marking queues.
1579 // Every worker processes it's own marking queue. work-stealing is used
1580 // to balance workload.
1581 class ShenandoahParallelObjectIterator : public ParallelObjectIteratorImpl {
1582 private:
1583   uint                         _num_workers;
1584   bool                         _init_ready;
1585   MarkBitMap*                  _aux_bit_map;
1586   ShenandoahHeap*              _heap;
1587   ShenandoahScanObjectStack    _roots_stack; // global roots stack
1588   ShenandoahObjToScanQueueSet* _task_queues;
1589 public:
1590   ShenandoahParallelObjectIterator(uint num_workers, MarkBitMap* bitmap) :
1591         _num_workers(num_workers),
1592         _init_ready(false),
1593         _aux_bit_map(bitmap),
1594         _heap(ShenandoahHeap::heap()) {
1595     // Initialize bitmap
1596     _init_ready = _heap->prepare_aux_bitmap_for_iteration();
1597     if (!_init_ready) {
1598       return;
1599     }
1600 
1601     ObjectIterateScanRootClosure oops(_aux_bit_map, &_roots_stack);
1602     _heap->scan_roots_for_iteration(&_roots_stack, &oops);
1603 
1604     _init_ready = prepare_worker_queues();
1605   }
1606 
1607   ~ShenandoahParallelObjectIterator() {
1608     // Reclaim bitmap
1609     _heap->reclaim_aux_bitmap_for_iteration();
1610     // Reclaim queue for workers
1611     if (_task_queues!= nullptr) {
1612       for (uint i = 0; i < _num_workers; ++i) {
1613         ShenandoahObjToScanQueue* q = _task_queues->queue(i);
1614         if (q != nullptr) {
1615           delete q;
1616           _task_queues->register_queue(i, nullptr);
1617         }
1618       }
1619       delete _task_queues;
1620       _task_queues = nullptr;
1621     }
1622   }
1623 
1624   virtual void object_iterate(ObjectClosure* cl, uint worker_id) {
1625     if (_init_ready) {
1626       object_iterate_parallel(cl, worker_id, _task_queues);
1627     }
1628   }
1629 
1630 private:
1631   // Divide global root_stack into worker queues
1632   bool prepare_worker_queues() {
1633     _task_queues = new ShenandoahObjToScanQueueSet((int) _num_workers);
1634     // Initialize queues for every workers
1635     for (uint i = 0; i < _num_workers; ++i) {
1636       ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue();
1637       _task_queues->register_queue(i, task_queue);
1638     }
1639     // Divide roots among the workers. Assume that object referencing distribution
1640     // is related with root kind, use round-robin to make every worker have same chance
1641     // to process every kind of roots
1642     size_t roots_num = _roots_stack.size();
1643     if (roots_num == 0) {
1644       // No work to do
1645       return false;
1646     }
1647 
1648     for (uint j = 0; j < roots_num; j++) {
1649       uint stack_id = j % _num_workers;
1650       oop obj = _roots_stack.pop();
1651       _task_queues->queue(stack_id)->push(ShenandoahMarkTask(obj));
1652     }
1653     return true;
1654   }
1655 
1656   void object_iterate_parallel(ObjectClosure* cl,
1657                                uint worker_id,
1658                                ShenandoahObjToScanQueueSet* queue_set) {
1659     assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1660     assert(queue_set != nullptr, "task queue must not be null");
1661 
1662     ShenandoahObjToScanQueue* q = queue_set->queue(worker_id);
1663     assert(q != nullptr, "object iterate queue must not be null");
1664 
1665     ShenandoahMarkTask t;
1666     ShenandoahObjectIterateParScanClosure oops(_aux_bit_map, q);
1667 
1668     // Work through the queue to traverse heap.
1669     // Steal when there is no task in queue.
1670     while (q->pop(t) || queue_set->steal(worker_id, t)) {
1671       oop obj = t.obj();
1672       assert(oopDesc::is_oop(obj), "must be a valid oop");
1673       cl->do_object(obj);
1674       obj->oop_iterate(&oops);
1675     }
1676     assert(q->is_empty(), "should be empty");
1677   }
1678 };
1679 
1680 ParallelObjectIteratorImpl* ShenandoahHeap::parallel_object_iterator(uint workers) {
1681   return new ShenandoahParallelObjectIterator(workers, &_aux_bit_map);
1682 }
1683 
1684 // Keep alive an object that was loaded with AS_NO_KEEPALIVE.
1685 void ShenandoahHeap::keep_alive(oop obj) {
1686   if (is_concurrent_mark_in_progress() && (obj != nullptr)) {
1687     ShenandoahBarrierSet::barrier_set()->enqueue(obj);
1688   }
1689 }
1690 
1691 void ShenandoahHeap::heap_region_iterate(ShenandoahHeapRegionClosure* blk) const {
1692   for (size_t i = 0; i < num_regions(); i++) {
1693     ShenandoahHeapRegion* current = get_region(i);
1694     blk->heap_region_do(current);
1695   }
1696 }
1697 
1698 class ShenandoahParallelHeapRegionTask : public WorkerTask {
1699 private:
1700   ShenandoahHeap* const _heap;
1701   ShenandoahHeapRegionClosure* const _blk;
1702 
1703   shenandoah_padding(0);
1704   volatile size_t _index;
1705   shenandoah_padding(1);
1706 
1707 public:
1708   ShenandoahParallelHeapRegionTask(ShenandoahHeapRegionClosure* blk) :
1709           WorkerTask("Shenandoah Parallel Region Operation"),
1710           _heap(ShenandoahHeap::heap()), _blk(blk), _index(0) {}
1711 
1712   void work(uint worker_id) {
1713     ShenandoahParallelWorkerSession worker_session(worker_id);
1714     size_t stride = ShenandoahParallelRegionStride;
1715 
1716     size_t max = _heap->num_regions();
1717     while (Atomic::load(&_index) < max) {
1718       size_t cur = Atomic::fetch_then_add(&_index, stride, memory_order_relaxed);
1719       size_t start = cur;
1720       size_t end = MIN2(cur + stride, max);
1721       if (start >= max) break;
1722 
1723       for (size_t i = cur; i < end; i++) {
1724         ShenandoahHeapRegion* current = _heap->get_region(i);
1725         _blk->heap_region_do(current);
1726       }
1727     }
1728   }
1729 };
1730 
1731 void ShenandoahHeap::parallel_heap_region_iterate(ShenandoahHeapRegionClosure* blk) const {
1732   assert(blk->is_thread_safe(), "Only thread-safe closures here");
1733   if (num_regions() > ShenandoahParallelRegionStride) {
1734     ShenandoahParallelHeapRegionTask task(blk);
1735     workers()->run_task(&task);
1736   } else {
1737     heap_region_iterate(blk);
1738   }
1739 }
1740 
1741 class ShenandoahRendezvousClosure : public HandshakeClosure {
1742 public:
1743   inline ShenandoahRendezvousClosure() : HandshakeClosure("ShenandoahRendezvous") {}
1744   inline void do_thread(Thread* thread) {}
1745 };
1746 
1747 void ShenandoahHeap::rendezvous_threads() {
1748   ShenandoahRendezvousClosure cl;
1749   Handshake::execute(&cl);
1750 }
1751 
1752 void ShenandoahHeap::recycle_trash() {
1753   free_set()->recycle_trash();
1754 }
1755 
1756 class ShenandoahResetUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
1757 private:
1758   ShenandoahMarkingContext* const _ctx;
1759 public:
1760   ShenandoahResetUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
1761 
1762   void heap_region_do(ShenandoahHeapRegion* r) {
1763     if (r->is_active()) {
1764       // Reset live data and set TAMS optimistically. We would recheck these under the pause
1765       // anyway to capture any updates that happened since now.
1766       r->clear_live_data();
1767       _ctx->capture_top_at_mark_start(r);
1768     }
1769   }
1770 
1771   bool is_thread_safe() { return true; }
1772 };
1773 
1774 void ShenandoahHeap::prepare_gc() {
1775   reset_mark_bitmap();
1776 
1777   ShenandoahResetUpdateRegionStateClosure cl;
1778   parallel_heap_region_iterate(&cl);
1779 }
1780 
1781 class ShenandoahFinalMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
1782 private:
1783   ShenandoahMarkingContext* const _ctx;
1784   ShenandoahHeapLock* const _lock;
1785 
1786 public:
1787   ShenandoahFinalMarkUpdateRegionStateClosure() :
1788     _ctx(ShenandoahHeap::heap()->complete_marking_context()), _lock(ShenandoahHeap::heap()->lock()) {}
1789 
1790   void heap_region_do(ShenandoahHeapRegion* r) {
1791     if (r->is_active()) {
1792       // All allocations past TAMS are implicitly live, adjust the region data.
1793       // Bitmaps/TAMS are swapped at this point, so we need to poll complete bitmap.
1794       HeapWord *tams = _ctx->top_at_mark_start(r);
1795       HeapWord *top = r->top();
1796       if (top > tams) {
1797         r->increase_live_data_alloc_words(pointer_delta(top, tams));
1798       }
1799 
1800       // We are about to select the collection set, make sure it knows about
1801       // current pinning status. Also, this allows trashing more regions that
1802       // now have their pinning status dropped.
1803       if (r->is_pinned()) {
1804         if (r->pin_count() == 0) {
1805           ShenandoahHeapLocker locker(_lock);
1806           r->make_unpinned();
1807         }
1808       } else {
1809         if (r->pin_count() > 0) {
1810           ShenandoahHeapLocker locker(_lock);
1811           r->make_pinned();
1812         }
1813       }
1814 
1815       // Remember limit for updating refs. It's guaranteed that we get no
1816       // from-space-refs written from here on.
1817       r->set_update_watermark_at_safepoint(r->top());
1818     } else {
1819       assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index());
1820       assert(_ctx->top_at_mark_start(r) == r->top(),
1821              "Region " SIZE_FORMAT " should have correct TAMS", r->index());
1822     }
1823   }
1824 
1825   bool is_thread_safe() { return true; }
1826 };
1827 
1828 void ShenandoahHeap::prepare_regions_and_collection_set(bool concurrent) {
1829   assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
1830   {
1831     ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::final_update_region_states :
1832                                          ShenandoahPhaseTimings::degen_gc_final_update_region_states);
1833     ShenandoahFinalMarkUpdateRegionStateClosure cl;
1834     parallel_heap_region_iterate(&cl);
1835 
1836     assert_pinned_region_status();
1837   }
1838 
1839   {
1840     ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::choose_cset :
1841                                          ShenandoahPhaseTimings::degen_gc_choose_cset);
1842     ShenandoahHeapLocker locker(lock());
1843     _collection_set->clear();
1844     heuristics()->choose_collection_set(_collection_set);
1845   }
1846 
1847   {
1848     ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::final_rebuild_freeset :
1849                                          ShenandoahPhaseTimings::degen_gc_final_rebuild_freeset);
1850     ShenandoahHeapLocker locker(lock());
1851     _free_set->rebuild();
1852   }
1853 }
1854 
1855 void ShenandoahHeap::do_class_unloading() {
1856   _unloader.unload();
1857 }
1858 
1859 void ShenandoahHeap::stw_weak_refs(bool full_gc) {
1860   // Weak refs processing
1861   ShenandoahPhaseTimings::Phase phase = full_gc ? ShenandoahPhaseTimings::full_gc_weakrefs
1862                                                 : ShenandoahPhaseTimings::degen_gc_weakrefs;
1863   ShenandoahTimingsTracker t(phase);
1864   ShenandoahGCWorkerPhase worker_phase(phase);
1865   ref_processor()->process_references(phase, workers(), false /* concurrent */);
1866 }
1867 
1868 void ShenandoahHeap::prepare_update_heap_references(bool concurrent) {
1869   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
1870 
1871   // Evacuation is over, no GCLABs are needed anymore. GCLABs are under URWM, so we need to
1872   // make them parsable for update code to work correctly. Plus, we can compute new sizes
1873   // for future GCLABs here.
1874   if (UseTLAB) {
1875     ShenandoahGCPhase phase(concurrent ?
1876                             ShenandoahPhaseTimings::init_update_refs_manage_gclabs :
1877                             ShenandoahPhaseTimings::degen_gc_init_update_refs_manage_gclabs);
1878     gclabs_retire(ResizeTLAB);
1879   }
1880 
1881   _update_refs_iterator.reset();
1882 }
1883 
1884 void ShenandoahHeap::propagate_gc_state_to_java_threads() {
1885   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at Shenandoah safepoint");
1886   if (_gc_state_changed) {
1887     _gc_state_changed = false;
1888     char state = gc_state();
1889     for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1890       ShenandoahThreadLocalData::set_gc_state(t, state);
1891     }
1892   }
1893 }
1894 
1895 void ShenandoahHeap::set_gc_state(uint mask, bool value) {
1896   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at Shenandoah safepoint");
1897   _gc_state.set_cond(mask, value);
1898   _gc_state_changed = true;
1899 }
1900 
1901 void ShenandoahHeap::set_concurrent_mark_in_progress(bool in_progress) {
1902   assert(!has_forwarded_objects(), "Not expected before/after mark phase");
1903   set_gc_state(MARKING, in_progress);
1904   ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
1905 }
1906 
1907 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
1908   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only call this at safepoint");
1909   set_gc_state(EVACUATION, in_progress);
1910 }
1911 
1912 void ShenandoahHeap::set_concurrent_strong_root_in_progress(bool in_progress) {
1913   if (in_progress) {
1914     _concurrent_strong_root_in_progress.set();
1915   } else {
1916     _concurrent_strong_root_in_progress.unset();
1917   }
1918 }
1919 
1920 void ShenandoahHeap::set_concurrent_weak_root_in_progress(bool cond) {
1921   set_gc_state(WEAK_ROOTS, cond);
1922 }
1923 
1924 GCTracer* ShenandoahHeap::tracer() {
1925   return shenandoah_policy()->tracer();
1926 }
1927 
1928 size_t ShenandoahHeap::tlab_used(Thread* thread) const {
1929   return _free_set->used();
1930 }
1931 
1932 bool ShenandoahHeap::try_cancel_gc() {
1933   jbyte prev = _cancelled_gc.cmpxchg(CANCELLED, CANCELLABLE);
1934   return prev == CANCELLABLE;
1935 }
1936 
1937 void ShenandoahHeap::cancel_gc(GCCause::Cause cause) {
1938   if (try_cancel_gc()) {
1939     FormatBuffer<> msg("Cancelling GC: %s", GCCause::to_string(cause));
1940     log_info(gc)("%s", msg.buffer());
1941     Events::log(Thread::current(), "%s", msg.buffer());
1942   }
1943 }
1944 
1945 uint ShenandoahHeap::max_workers() {
1946   return _max_workers;
1947 }
1948 
1949 void ShenandoahHeap::stop() {
1950   // The shutdown sequence should be able to terminate when GC is running.
1951 
1952   // Step 0. Notify policy to disable event recording and prevent visiting gc threads during shutdown
1953   _shenandoah_policy->record_shutdown();
1954 
1955   // Step 1. Notify control thread that we are in shutdown.
1956   // Note that we cannot do that with stop(), because stop() is blocking and waits for the actual shutdown.
1957   // Doing stop() here would wait for the normal GC cycle to complete, never falling through to cancel below.
1958   control_thread()->prepare_for_graceful_shutdown();
1959 
1960   // Step 2. Notify GC workers that we are cancelling GC.
1961   cancel_gc(GCCause::_shenandoah_stop_vm);
1962 
1963   // Step 3. Wait until GC worker exits normally.
1964   control_thread()->stop();
1965 }
1966 
1967 void ShenandoahHeap::stw_unload_classes(bool full_gc) {
1968   if (!unload_classes()) return;
1969   ClassUnloadingContext ctx(_workers->active_workers(),
1970                             true /* unregister_nmethods_during_purge */,
1971                             false /* lock_nmethod_free_separately */);
1972 
1973   // Unload classes and purge SystemDictionary.
1974   {
1975     ShenandoahPhaseTimings::Phase phase = full_gc ?
1976                                           ShenandoahPhaseTimings::full_gc_purge_class_unload :
1977                                           ShenandoahPhaseTimings::degen_gc_purge_class_unload;
1978     ShenandoahIsAliveSelector is_alive;
1979     {
1980       CodeCache::UnlinkingScope scope(is_alive.is_alive_closure());
1981       ShenandoahGCPhase gc_phase(phase);
1982       ShenandoahGCWorkerPhase worker_phase(phase);
1983       bool unloading_occurred = SystemDictionary::do_unloading(gc_timer());
1984 
1985       uint num_workers = _workers->active_workers();
1986       ShenandoahClassUnloadingTask unlink_task(phase, num_workers, unloading_occurred);
1987       _workers->run_task(&unlink_task);
1988     }
1989     // Release unloaded nmethods's memory.
1990     ClassUnloadingContext::context()->purge_and_free_nmethods();
1991   }
1992 
1993   {
1994     ShenandoahGCPhase phase(full_gc ?
1995                             ShenandoahPhaseTimings::full_gc_purge_cldg :
1996                             ShenandoahPhaseTimings::degen_gc_purge_cldg);
1997     ClassLoaderDataGraph::purge(true /* at_safepoint */);
1998   }
1999   // Resize and verify metaspace
2000   MetaspaceGC::compute_new_size();
2001   DEBUG_ONLY(MetaspaceUtils::verify();)
2002 }
2003 
2004 // Weak roots are either pre-evacuated (final mark) or updated (final updaterefs),
2005 // so they should not have forwarded oops.
2006 // However, we do need to "null" dead oops in the roots, if can not be done
2007 // in concurrent cycles.
2008 void ShenandoahHeap::stw_process_weak_roots(bool full_gc) {
2009   uint num_workers = _workers->active_workers();
2010   ShenandoahPhaseTimings::Phase timing_phase = full_gc ?
2011                                                ShenandoahPhaseTimings::full_gc_purge_weak_par :
2012                                                ShenandoahPhaseTimings::degen_gc_purge_weak_par;
2013   ShenandoahGCPhase phase(timing_phase);
2014   ShenandoahGCWorkerPhase worker_phase(timing_phase);
2015   // Cleanup weak roots
2016   if (has_forwarded_objects()) {
2017     ShenandoahForwardedIsAliveClosure is_alive;
2018     ShenandoahUpdateRefsClosure keep_alive;
2019     ShenandoahParallelWeakRootsCleaningTask<ShenandoahForwardedIsAliveClosure, ShenandoahUpdateRefsClosure>
2020       cleaning_task(timing_phase, &is_alive, &keep_alive, num_workers);
2021     _workers->run_task(&cleaning_task);
2022   } else {
2023     ShenandoahIsAliveClosure is_alive;
2024 #ifdef ASSERT
2025     ShenandoahAssertNotForwardedClosure verify_cl;
2026     ShenandoahParallelWeakRootsCleaningTask<ShenandoahIsAliveClosure, ShenandoahAssertNotForwardedClosure>
2027       cleaning_task(timing_phase, &is_alive, &verify_cl, num_workers);
2028 #else
2029     ShenandoahParallelWeakRootsCleaningTask<ShenandoahIsAliveClosure, DoNothingClosure>
2030       cleaning_task(timing_phase, &is_alive, &do_nothing_cl, num_workers);
2031 #endif
2032     _workers->run_task(&cleaning_task);
2033   }
2034 }
2035 
2036 void ShenandoahHeap::parallel_cleaning(bool full_gc) {
2037   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2038   assert(is_stw_gc_in_progress(), "Only for Degenerated and Full GC");
2039   ShenandoahGCPhase phase(full_gc ?
2040                           ShenandoahPhaseTimings::full_gc_purge :
2041                           ShenandoahPhaseTimings::degen_gc_purge);
2042   stw_weak_refs(full_gc);
2043   stw_process_weak_roots(full_gc);
2044   stw_unload_classes(full_gc);
2045 }
2046 
2047 void ShenandoahHeap::set_has_forwarded_objects(bool cond) {
2048   set_gc_state(HAS_FORWARDED, cond);
2049 }
2050 
2051 void ShenandoahHeap::set_unload_classes(bool uc) {
2052   _unload_classes.set_cond(uc);
2053 }
2054 
2055 bool ShenandoahHeap::unload_classes() const {
2056   return _unload_classes.is_set();
2057 }
2058 
2059 address ShenandoahHeap::in_cset_fast_test_addr() {
2060   ShenandoahHeap* heap = ShenandoahHeap::heap();
2061   assert(heap->collection_set() != nullptr, "Sanity");
2062   return (address) heap->collection_set()->biased_map_address();
2063 }
2064 
2065 size_t ShenandoahHeap::bytes_allocated_since_gc_start() const {
2066   return Atomic::load(&_bytes_allocated_since_gc_start);
2067 }
2068 
2069 void ShenandoahHeap::reset_bytes_allocated_since_gc_start() {
2070   Atomic::store(&_bytes_allocated_since_gc_start, (size_t)0);
2071 }
2072 
2073 void ShenandoahHeap::set_degenerated_gc_in_progress(bool in_progress) {
2074   _degenerated_gc_in_progress.set_cond(in_progress);
2075 }
2076 
2077 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) {
2078   _full_gc_in_progress.set_cond(in_progress);
2079 }
2080 
2081 void ShenandoahHeap::set_full_gc_move_in_progress(bool in_progress) {
2082   assert (is_full_gc_in_progress(), "should be");
2083   _full_gc_move_in_progress.set_cond(in_progress);
2084 }
2085 
2086 void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) {
2087   set_gc_state(UPDATEREFS, in_progress);
2088 }
2089 
2090 void ShenandoahHeap::register_nmethod(nmethod* nm) {
2091   ShenandoahCodeRoots::register_nmethod(nm);
2092 }
2093 
2094 void ShenandoahHeap::unregister_nmethod(nmethod* nm) {
2095   ShenandoahCodeRoots::unregister_nmethod(nm);
2096 }
2097 
2098 void ShenandoahHeap::pin_object(JavaThread* thr, oop o) {
2099   heap_region_containing(o)->record_pin();
2100 }
2101 
2102 void ShenandoahHeap::unpin_object(JavaThread* thr, oop o) {
2103   ShenandoahHeapRegion* r = heap_region_containing(o);
2104   assert(r != nullptr, "Sanity");
2105   assert(r->pin_count() > 0, "Region " SIZE_FORMAT " should have non-zero pins", r->index());
2106   r->record_unpin();
2107 }
2108 
2109 void ShenandoahHeap::sync_pinned_region_status() {
2110   ShenandoahHeapLocker locker(lock());
2111 
2112   for (size_t i = 0; i < num_regions(); i++) {
2113     ShenandoahHeapRegion *r = get_region(i);
2114     if (r->is_active()) {
2115       if (r->is_pinned()) {
2116         if (r->pin_count() == 0) {
2117           r->make_unpinned();
2118         }
2119       } else {
2120         if (r->pin_count() > 0) {
2121           r->make_pinned();
2122         }
2123       }
2124     }
2125   }
2126 
2127   assert_pinned_region_status();
2128 }
2129 
2130 #ifdef ASSERT
2131 void ShenandoahHeap::assert_pinned_region_status() {
2132   for (size_t i = 0; i < num_regions(); i++) {
2133     ShenandoahHeapRegion* r = get_region(i);
2134     assert((r->is_pinned() && r->pin_count() > 0) || (!r->is_pinned() && r->pin_count() == 0),
2135            "Region " SIZE_FORMAT " pinning status is inconsistent", i);
2136   }
2137 }
2138 #endif
2139 
2140 ConcurrentGCTimer* ShenandoahHeap::gc_timer() const {
2141   return _gc_timer;
2142 }
2143 
2144 void ShenandoahHeap::prepare_concurrent_roots() {
2145   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2146   assert(!is_stw_gc_in_progress(), "Only concurrent GC");
2147   set_concurrent_strong_root_in_progress(!collection_set()->is_empty());
2148   set_concurrent_weak_root_in_progress(true);
2149   if (unload_classes()) {
2150     _unloader.prepare();
2151   }
2152 }
2153 
2154 void ShenandoahHeap::finish_concurrent_roots() {
2155   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2156   assert(!is_stw_gc_in_progress(), "Only concurrent GC");
2157   if (unload_classes()) {
2158     _unloader.finish();
2159   }
2160 }
2161 
2162 #ifdef ASSERT
2163 void ShenandoahHeap::assert_gc_workers(uint nworkers) {
2164   assert(nworkers > 0 && nworkers <= max_workers(), "Sanity");
2165 
2166   if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
2167     // Use ParallelGCThreads inside safepoints
2168     assert(nworkers == ParallelGCThreads, "Use ParallelGCThreads (%u) within safepoint, not %u",
2169            ParallelGCThreads, nworkers);
2170   } else {
2171     // Use ConcGCThreads outside safepoints
2172     assert(nworkers == ConcGCThreads, "Use ConcGCThreads (%u) outside safepoints, %u",
2173            ConcGCThreads, nworkers);
2174   }
2175 }
2176 #endif
2177 
2178 ShenandoahVerifier* ShenandoahHeap::verifier() {
2179   guarantee(ShenandoahVerify, "Should be enabled");
2180   assert (_verifier != nullptr, "sanity");
2181   return _verifier;
2182 }
2183 
2184 template<bool CONCURRENT>
2185 class ShenandoahUpdateHeapRefsTask : public WorkerTask {
2186 private:
2187   ShenandoahHeap* _heap;
2188   ShenandoahRegionIterator* _regions;
2189 public:
2190   ShenandoahUpdateHeapRefsTask(ShenandoahRegionIterator* regions) :
2191     WorkerTask("Shenandoah Update References"),
2192     _heap(ShenandoahHeap::heap()),
2193     _regions(regions) {
2194   }
2195 
2196   void work(uint worker_id) {
2197     if (CONCURRENT) {
2198       ShenandoahConcurrentWorkerSession worker_session(worker_id);
2199       ShenandoahSuspendibleThreadSetJoiner stsj;
2200       do_work<ShenandoahConcUpdateRefsClosure>(worker_id);
2201     } else {
2202       ShenandoahParallelWorkerSession worker_session(worker_id);
2203       do_work<ShenandoahSTWUpdateRefsClosure>(worker_id);
2204     }
2205   }
2206 
2207 private:
2208   template<class T>
2209   void do_work(uint worker_id) {
2210     T cl;
2211     if (CONCURRENT && (worker_id == 0)) {
2212       // We ask the first worker to replenish the Mutator free set by moving regions previously reserved to hold the
2213       // results of evacuation.  These reserves are no longer necessary because evacuation has completed.
2214       size_t cset_regions = _heap->collection_set()->count();
2215       // We cannot transfer any more regions than will be reclaimed when the existing collection set is recycled because
2216       // we need the reclaimed collection set regions to replenish the collector reserves
2217       _heap->free_set()->move_regions_from_collector_to_mutator(cset_regions);
2218     }
2219     // If !CONCURRENT, there's no value in expanding Mutator free set
2220 
2221     ShenandoahHeapRegion* r = _regions->next();
2222     ShenandoahMarkingContext* const ctx = _heap->complete_marking_context();
2223     while (r != nullptr) {
2224       HeapWord* update_watermark = r->get_update_watermark();
2225       assert (update_watermark >= r->bottom(), "sanity");
2226       if (r->is_active() && !r->is_cset()) {
2227         _heap->marked_object_oop_iterate(r, &cl, update_watermark);
2228       }
2229       if (ShenandoahPacing) {
2230         _heap->pacer()->report_updaterefs(pointer_delta(update_watermark, r->bottom()));
2231       }
2232       if (_heap->check_cancelled_gc_and_yield(CONCURRENT)) {
2233         return;
2234       }
2235       r = _regions->next();
2236     }
2237   }
2238 };
2239 
2240 void ShenandoahHeap::update_heap_references(bool concurrent) {
2241   assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
2242 
2243   if (concurrent) {
2244     ShenandoahUpdateHeapRefsTask<true> task(&_update_refs_iterator);
2245     workers()->run_task(&task);
2246   } else {
2247     ShenandoahUpdateHeapRefsTask<false> task(&_update_refs_iterator);
2248     workers()->run_task(&task);
2249   }
2250 }
2251 
2252 
2253 class ShenandoahFinalUpdateRefsUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
2254 private:
2255   ShenandoahHeapLock* const _lock;
2256 
2257 public:
2258   ShenandoahFinalUpdateRefsUpdateRegionStateClosure() : _lock(ShenandoahHeap::heap()->lock()) {}
2259 
2260   void heap_region_do(ShenandoahHeapRegion* r) {
2261     // Drop unnecessary "pinned" state from regions that does not have CP marks
2262     // anymore, as this would allow trashing them.
2263 
2264     if (r->is_active()) {
2265       if (r->is_pinned()) {
2266         if (r->pin_count() == 0) {
2267           ShenandoahHeapLocker locker(_lock);
2268           r->make_unpinned();
2269         }
2270       } else {
2271         if (r->pin_count() > 0) {
2272           ShenandoahHeapLocker locker(_lock);
2273           r->make_pinned();
2274         }
2275       }
2276     }
2277   }
2278 
2279   bool is_thread_safe() { return true; }
2280 };
2281 
2282 void ShenandoahHeap::update_heap_region_states(bool concurrent) {
2283   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2284   assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
2285 
2286   {
2287     ShenandoahGCPhase phase(concurrent ?
2288                             ShenandoahPhaseTimings::final_update_refs_update_region_states :
2289                             ShenandoahPhaseTimings::degen_gc_final_update_refs_update_region_states);
2290     ShenandoahFinalUpdateRefsUpdateRegionStateClosure cl;
2291     parallel_heap_region_iterate(&cl);
2292 
2293     assert_pinned_region_status();
2294   }
2295 
2296   {
2297     ShenandoahGCPhase phase(concurrent ?
2298                             ShenandoahPhaseTimings::final_update_refs_trash_cset :
2299                             ShenandoahPhaseTimings::degen_gc_final_update_refs_trash_cset);
2300     trash_cset_regions();
2301   }
2302 }
2303 
2304 void ShenandoahHeap::rebuild_free_set(bool concurrent) {
2305   {
2306     ShenandoahGCPhase phase(concurrent ?
2307                             ShenandoahPhaseTimings::final_update_refs_rebuild_freeset :
2308                             ShenandoahPhaseTimings::degen_gc_final_update_refs_rebuild_freeset);
2309     ShenandoahHeapLocker locker(lock());
2310     _free_set->rebuild();
2311   }
2312 }
2313 
2314 void ShenandoahHeap::print_extended_on(outputStream *st) const {
2315   print_on(st);
2316   st->cr();
2317   print_heap_regions_on(st);
2318 }
2319 
2320 bool ShenandoahHeap::is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self) {
2321   size_t slice = r->index() / _bitmap_regions_per_slice;
2322 
2323   size_t regions_from = _bitmap_regions_per_slice * slice;
2324   size_t regions_to   = MIN2(num_regions(), _bitmap_regions_per_slice * (slice + 1));
2325   for (size_t g = regions_from; g < regions_to; g++) {
2326     assert (g / _bitmap_regions_per_slice == slice, "same slice");
2327     if (skip_self && g == r->index()) continue;
2328     if (get_region(g)->is_committed()) {
2329       return true;
2330     }
2331   }
2332   return false;
2333 }
2334 
2335 bool ShenandoahHeap::commit_bitmap_slice(ShenandoahHeapRegion* r) {
2336   shenandoah_assert_heaplocked();
2337 
2338   // Bitmaps in special regions do not need commits
2339   if (_bitmap_region_special) {
2340     return true;
2341   }
2342 
2343   if (is_bitmap_slice_committed(r, true)) {
2344     // Some other region from the group is already committed, meaning the bitmap
2345     // slice is already committed, we exit right away.
2346     return true;
2347   }
2348 
2349   // Commit the bitmap slice:
2350   size_t slice = r->index() / _bitmap_regions_per_slice;
2351   size_t off = _bitmap_bytes_per_slice * slice;
2352   size_t len = _bitmap_bytes_per_slice;
2353   char* start = (char*) _bitmap_region.start() + off;
2354 
2355   if (!os::commit_memory(start, len, false)) {
2356     return false;
2357   }
2358 
2359   if (AlwaysPreTouch) {
2360     os::pretouch_memory(start, start + len, _pretouch_bitmap_page_size);
2361   }
2362 
2363   return true;
2364 }
2365 
2366 bool ShenandoahHeap::uncommit_bitmap_slice(ShenandoahHeapRegion *r) {
2367   shenandoah_assert_heaplocked();
2368 
2369   // Bitmaps in special regions do not need uncommits
2370   if (_bitmap_region_special) {
2371     return true;
2372   }
2373 
2374   if (is_bitmap_slice_committed(r, true)) {
2375     // Some other region from the group is still committed, meaning the bitmap
2376     // slice is should stay committed, exit right away.
2377     return true;
2378   }
2379 
2380   // Uncommit the bitmap slice:
2381   size_t slice = r->index() / _bitmap_regions_per_slice;
2382   size_t off = _bitmap_bytes_per_slice * slice;
2383   size_t len = _bitmap_bytes_per_slice;
2384   if (!os::uncommit_memory((char*)_bitmap_region.start() + off, len)) {
2385     return false;
2386   }
2387   return true;
2388 }
2389 
2390 void ShenandoahHeap::safepoint_synchronize_begin() {
2391   StackWatermarkSet::safepoint_synchronize_begin();
2392   SuspendibleThreadSet::synchronize();
2393 }
2394 
2395 void ShenandoahHeap::safepoint_synchronize_end() {
2396   SuspendibleThreadSet::desynchronize();
2397 }
2398 
2399 void ShenandoahHeap::try_inject_alloc_failure() {
2400   if (ShenandoahAllocFailureALot && !cancelled_gc() && ((os::random() % 1000) > 950)) {
2401     _inject_alloc_failure.set();
2402     os::naked_short_sleep(1);
2403     if (cancelled_gc()) {
2404       log_info(gc)("Allocation failure was successfully injected");
2405     }
2406   }
2407 }
2408 
2409 bool ShenandoahHeap::should_inject_alloc_failure() {
2410   return _inject_alloc_failure.is_set() && _inject_alloc_failure.try_unset();
2411 }
2412 
2413 void ShenandoahHeap::initialize_serviceability() {
2414   _memory_pool = new ShenandoahMemoryPool(this);
2415   _cycle_memory_manager.add_pool(_memory_pool);
2416   _stw_memory_manager.add_pool(_memory_pool);
2417 }
2418 
2419 GrowableArray<GCMemoryManager*> ShenandoahHeap::memory_managers() {
2420   GrowableArray<GCMemoryManager*> memory_managers(2);
2421   memory_managers.append(&_cycle_memory_manager);
2422   memory_managers.append(&_stw_memory_manager);
2423   return memory_managers;
2424 }
2425 
2426 GrowableArray<MemoryPool*> ShenandoahHeap::memory_pools() {
2427   GrowableArray<MemoryPool*> memory_pools(1);
2428   memory_pools.append(_memory_pool);
2429   return memory_pools;
2430 }
2431 
2432 MemoryUsage ShenandoahHeap::memory_usage() {
2433   return _memory_pool->get_memory_usage();
2434 }
2435 
2436 ShenandoahRegionIterator::ShenandoahRegionIterator() :
2437   _heap(ShenandoahHeap::heap()),
2438   _index(0) {}
2439 
2440 ShenandoahRegionIterator::ShenandoahRegionIterator(ShenandoahHeap* heap) :
2441   _heap(heap),
2442   _index(0) {}
2443 
2444 void ShenandoahRegionIterator::reset() {
2445   _index = 0;
2446 }
2447 
2448 bool ShenandoahRegionIterator::has_next() const {
2449   return _index < _heap->num_regions();
2450 }
2451 
2452 char ShenandoahHeap::gc_state() const {
2453   return _gc_state.raw_value();
2454 }
2455 
2456 ShenandoahLiveData* ShenandoahHeap::get_liveness_cache(uint worker_id) {
2457 #ifdef ASSERT
2458   assert(_liveness_cache != nullptr, "sanity");
2459   assert(worker_id < _max_workers, "sanity");
2460   for (uint i = 0; i < num_regions(); i++) {
2461     assert(_liveness_cache[worker_id][i] == 0, "liveness cache should be empty");
2462   }
2463 #endif
2464   return _liveness_cache[worker_id];
2465 }
2466 
2467 void ShenandoahHeap::flush_liveness_cache(uint worker_id) {
2468   assert(worker_id < _max_workers, "sanity");
2469   assert(_liveness_cache != nullptr, "sanity");
2470   ShenandoahLiveData* ld = _liveness_cache[worker_id];
2471   for (uint i = 0; i < num_regions(); i++) {
2472     ShenandoahLiveData live = ld[i];
2473     if (live > 0) {
2474       ShenandoahHeapRegion* r = get_region(i);
2475       r->increase_live_data_gc_words(live);
2476       ld[i] = 0;
2477     }
2478   }
2479 }
2480 
2481 bool ShenandoahHeap::requires_barriers(stackChunkOop obj) const {
2482   if (is_idle()) return false;
2483 
2484   // Objects allocated after marking start are implicitly alive, don't need any barriers during
2485   // marking phase.
2486   if (is_concurrent_mark_in_progress() &&
2487      !marking_context()->allocated_after_mark_start(obj)) {
2488     return true;
2489   }
2490 
2491   // Can not guarantee obj is deeply good.
2492   if (has_forwarded_objects()) {
2493     return true;
2494   }
2495 
2496   return false;
2497 }