1 /*
   2  * Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2013, 2022, Red Hat, Inc. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "memory/allocation.hpp"
  28 #include "memory/universe.hpp"
  29 
  30 #include "gc/shared/classUnloadingContext.hpp"
  31 #include "gc/shared/fullGCForwarding.hpp"
  32 #include "gc/shared/gcArguments.hpp"
  33 #include "gc/shared/gcTimer.hpp"
  34 #include "gc/shared/gcTraceTime.inline.hpp"
  35 #include "gc/shared/locationPrinter.inline.hpp"
  36 #include "gc/shared/memAllocator.hpp"
  37 #include "gc/shared/plab.hpp"
  38 #include "gc/shared/tlab_globals.hpp"
  39 
  40 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  41 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
  42 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  43 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  44 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
  45 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  46 #include "gc/shenandoah/shenandoahControlThread.hpp"
  47 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  48 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  49 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  50 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
  51 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  52 #include "gc/shenandoah/shenandoahInitLogger.hpp"
  53 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  54 #include "gc/shenandoah/shenandoahMemoryPool.hpp"
  55 #include "gc/shenandoah/shenandoahMetrics.hpp"
  56 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  57 #include "gc/shenandoah/shenandoahPacer.inline.hpp"
  58 #include "gc/shenandoah/shenandoahPadding.hpp"
  59 #include "gc/shenandoah/shenandoahParallelCleaning.inline.hpp"
  60 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
  61 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
  62 #include "gc/shenandoah/shenandoahSTWMark.hpp"
  63 #include "gc/shenandoah/shenandoahUtils.hpp"
  64 #include "gc/shenandoah/shenandoahVerifier.hpp"
  65 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
  66 #include "gc/shenandoah/shenandoahVMOperations.hpp"
  67 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
  68 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  69 #include "gc/shenandoah/mode/shenandoahPassiveMode.hpp"
  70 #include "gc/shenandoah/mode/shenandoahSATBMode.hpp"
  71 #if INCLUDE_JFR
  72 #include "gc/shenandoah/shenandoahJfrSupport.hpp"
  73 #endif
  74 
  75 #include "cds/archiveHeapWriter.hpp"
  76 #include "classfile/systemDictionary.hpp"
  77 #include "code/codeCache.hpp"
  78 #include "memory/classLoaderMetaspace.hpp"
  79 #include "memory/metaspaceUtils.hpp"
  80 #include "nmt/mallocTracker.hpp"
  81 #include "nmt/memTracker.hpp"
  82 #include "oops/compressedOops.inline.hpp"
  83 #include "prims/jvmtiTagMap.hpp"
  84 #include "runtime/atomic.hpp"
  85 #include "runtime/globals.hpp"
  86 #include "runtime/interfaceSupport.inline.hpp"
  87 #include "runtime/java.hpp"
  88 #include "runtime/orderAccess.hpp"
  89 #include "runtime/safepointMechanism.hpp"
  90 #include "runtime/stackWatermarkSet.hpp"
  91 #include "runtime/vmThread.hpp"
  92 #include "utilities/events.hpp"
  93 #include "utilities/powerOfTwo.hpp"
  94 
  95 class ShenandoahPretouchHeapTask : public WorkerTask {
  96 private:
  97   ShenandoahRegionIterator _regions;
  98   const size_t _page_size;
  99 public:
 100   ShenandoahPretouchHeapTask(size_t page_size) :
 101     WorkerTask("Shenandoah Pretouch Heap"),
 102     _page_size(page_size) {}
 103 
 104   virtual void work(uint worker_id) {
 105     ShenandoahHeapRegion* r = _regions.next();
 106     while (r != nullptr) {
 107       if (r->is_committed()) {
 108         os::pretouch_memory(r->bottom(), r->end(), _page_size);
 109       }
 110       r = _regions.next();
 111     }
 112   }
 113 };
 114 
 115 class ShenandoahPretouchBitmapTask : public WorkerTask {
 116 private:
 117   ShenandoahRegionIterator _regions;
 118   char* _bitmap_base;
 119   const size_t _bitmap_size;
 120   const size_t _page_size;
 121 public:
 122   ShenandoahPretouchBitmapTask(char* bitmap_base, size_t bitmap_size, size_t page_size) :
 123     WorkerTask("Shenandoah Pretouch Bitmap"),
 124     _bitmap_base(bitmap_base),
 125     _bitmap_size(bitmap_size),
 126     _page_size(page_size) {}
 127 
 128   virtual void work(uint worker_id) {
 129     ShenandoahHeapRegion* r = _regions.next();
 130     while (r != nullptr) {
 131       size_t start = r->index()       * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
 132       size_t end   = (r->index() + 1) * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
 133       assert (end <= _bitmap_size, "end is sane: " SIZE_FORMAT " < " SIZE_FORMAT, end, _bitmap_size);
 134 
 135       if (r->is_committed()) {
 136         os::pretouch_memory(_bitmap_base + start, _bitmap_base + end, _page_size);
 137       }
 138 
 139       r = _regions.next();
 140     }
 141   }
 142 };
 143 
 144 jint ShenandoahHeap::initialize() {
 145   //
 146   // Figure out heap sizing
 147   //
 148 
 149   size_t init_byte_size = InitialHeapSize;
 150   size_t min_byte_size  = MinHeapSize;
 151   size_t max_byte_size  = MaxHeapSize;
 152   size_t heap_alignment = HeapAlignment;
 153 
 154   size_t reg_size_bytes = ShenandoahHeapRegion::region_size_bytes();
 155 
 156   Universe::check_alignment(max_byte_size,  reg_size_bytes, "Shenandoah heap");
 157   Universe::check_alignment(init_byte_size, reg_size_bytes, "Shenandoah heap");
 158 
 159   _num_regions = ShenandoahHeapRegion::region_count();
 160   assert(_num_regions == (max_byte_size / reg_size_bytes),
 161          "Regions should cover entire heap exactly: " SIZE_FORMAT " != " SIZE_FORMAT "/" SIZE_FORMAT,
 162          _num_regions, max_byte_size, reg_size_bytes);
 163 
 164   // Now we know the number of regions, initialize the heuristics.
 165   initialize_heuristics();
 166 
 167   size_t num_committed_regions = init_byte_size / reg_size_bytes;
 168   num_committed_regions = MIN2(num_committed_regions, _num_regions);
 169   assert(num_committed_regions <= _num_regions, "sanity");
 170   _initial_size = num_committed_regions * reg_size_bytes;
 171 
 172   size_t num_min_regions = min_byte_size / reg_size_bytes;
 173   num_min_regions = MIN2(num_min_regions, _num_regions);
 174   assert(num_min_regions <= _num_regions, "sanity");
 175   _minimum_size = num_min_regions * reg_size_bytes;
 176 
 177   // Default to max heap size.
 178   _soft_max_size = _num_regions * reg_size_bytes;
 179 
 180   _committed = _initial_size;
 181 
 182   size_t heap_page_size   = UseLargePages ? os::large_page_size() : os::vm_page_size();
 183   size_t bitmap_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
 184   size_t region_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
 185 
 186   //
 187   // Reserve and commit memory for heap
 188   //
 189 
 190   ReservedHeapSpace heap_rs = Universe::reserve_heap(max_byte_size, heap_alignment);
 191   initialize_reserved_region(heap_rs);
 192   _heap_region = MemRegion((HeapWord*)heap_rs.base(), heap_rs.size() / HeapWordSize);
 193   _heap_region_special = heap_rs.special();
 194 
 195   assert((((size_t) base()) & ShenandoahHeapRegion::region_size_bytes_mask()) == 0,
 196          "Misaligned heap: " PTR_FORMAT, p2i(base()));
 197   os::trace_page_sizes_for_requested_size("Heap",
 198                                           max_byte_size, heap_alignment,
 199                                           heap_rs.base(),
 200                                           heap_rs.size(), heap_rs.page_size());
 201 
 202 #if SHENANDOAH_OPTIMIZED_MARKTASK
 203   // The optimized ShenandoahMarkTask takes some bits away from the full object bits.
 204   // Fail if we ever attempt to address more than we can.
 205   if ((uintptr_t)heap_rs.end() >= ShenandoahMarkTask::max_addressable()) {
 206     FormatBuffer<512> buf("Shenandoah reserved [" PTR_FORMAT ", " PTR_FORMAT") for the heap, \n"
 207                           "but max object address is " PTR_FORMAT ". Try to reduce heap size, or try other \n"
 208                           "VM options that allocate heap at lower addresses (HeapBaseMinAddress, AllocateHeapAt, etc).",
 209                 p2i(heap_rs.base()), p2i(heap_rs.end()), ShenandoahMarkTask::max_addressable());
 210     vm_exit_during_initialization("Fatal Error", buf);
 211   }
 212 #endif
 213 
 214   ReservedSpace sh_rs = heap_rs.first_part(max_byte_size);
 215   if (!_heap_region_special) {
 216     os::commit_memory_or_exit(sh_rs.base(), _initial_size, heap_alignment, false,
 217                               "Cannot commit heap memory");
 218   }
 219 
 220   //
 221   // Reserve and commit memory for bitmap(s)
 222   //
 223 
 224   size_t bitmap_size_orig = ShenandoahMarkBitMap::compute_size(heap_rs.size());
 225   _bitmap_size = align_up(bitmap_size_orig, bitmap_page_size);
 226 
 227   size_t bitmap_bytes_per_region = reg_size_bytes / ShenandoahMarkBitMap::heap_map_factor();
 228 
 229   guarantee(bitmap_bytes_per_region != 0,
 230             "Bitmap bytes per region should not be zero");
 231   guarantee(is_power_of_2(bitmap_bytes_per_region),
 232             "Bitmap bytes per region should be power of two: " SIZE_FORMAT, bitmap_bytes_per_region);
 233 
 234   if (bitmap_page_size > bitmap_bytes_per_region) {
 235     _bitmap_regions_per_slice = bitmap_page_size / bitmap_bytes_per_region;
 236     _bitmap_bytes_per_slice = bitmap_page_size;
 237   } else {
 238     _bitmap_regions_per_slice = 1;
 239     _bitmap_bytes_per_slice = bitmap_bytes_per_region;
 240   }
 241 
 242   guarantee(_bitmap_regions_per_slice >= 1,
 243             "Should have at least one region per slice: " SIZE_FORMAT,
 244             _bitmap_regions_per_slice);
 245 
 246   guarantee(((_bitmap_bytes_per_slice) % bitmap_page_size) == 0,
 247             "Bitmap slices should be page-granular: bps = " SIZE_FORMAT ", page size = " SIZE_FORMAT,
 248             _bitmap_bytes_per_slice, bitmap_page_size);
 249 
 250   ReservedSpace bitmap(_bitmap_size, bitmap_page_size);
 251   os::trace_page_sizes_for_requested_size("Mark Bitmap",
 252                                           bitmap_size_orig, bitmap_page_size,
 253                                           bitmap.base(),
 254                                           bitmap.size(), bitmap.page_size());
 255   MemTracker::record_virtual_memory_tag(bitmap.base(), mtGC);
 256   _bitmap_region = MemRegion((HeapWord*) bitmap.base(), bitmap.size() / HeapWordSize);
 257   _bitmap_region_special = bitmap.special();
 258 
 259   size_t bitmap_init_commit = _bitmap_bytes_per_slice *
 260                               align_up(num_committed_regions, _bitmap_regions_per_slice) / _bitmap_regions_per_slice;
 261   bitmap_init_commit = MIN2(_bitmap_size, bitmap_init_commit);
 262   if (!_bitmap_region_special) {
 263     os::commit_memory_or_exit((char *) _bitmap_region.start(), bitmap_init_commit, bitmap_page_size, false,
 264                               "Cannot commit bitmap memory");
 265   }
 266 
 267   _marking_context = new ShenandoahMarkingContext(_heap_region, _bitmap_region, _num_regions, _max_workers);
 268 
 269   if (ShenandoahVerify) {
 270     ReservedSpace verify_bitmap(_bitmap_size, bitmap_page_size);
 271     os::trace_page_sizes_for_requested_size("Verify Bitmap",
 272                                             bitmap_size_orig, bitmap_page_size,
 273                                             verify_bitmap.base(),
 274                                             verify_bitmap.size(), verify_bitmap.page_size());
 275     if (!verify_bitmap.special()) {
 276       os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), bitmap_page_size, false,
 277                                 "Cannot commit verification bitmap memory");
 278     }
 279     MemTracker::record_virtual_memory_tag(verify_bitmap.base(), mtGC);
 280     MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize);
 281     _verification_bit_map.initialize(_heap_region, verify_bitmap_region);
 282     _verifier = new ShenandoahVerifier(this, &_verification_bit_map);
 283   }
 284 
 285   // Reserve aux bitmap for use in object_iterate(). We don't commit it here.
 286   size_t aux_bitmap_page_size = bitmap_page_size;
 287 
 288   ReservedSpace aux_bitmap(_bitmap_size, aux_bitmap_page_size);
 289   os::trace_page_sizes_for_requested_size("Aux Bitmap",
 290                                           bitmap_size_orig, aux_bitmap_page_size,
 291                                           aux_bitmap.base(),
 292                                           aux_bitmap.size(), aux_bitmap.page_size());
 293   MemTracker::record_virtual_memory_tag(aux_bitmap.base(), mtGC);
 294   _aux_bitmap_region = MemRegion((HeapWord*) aux_bitmap.base(), aux_bitmap.size() / HeapWordSize);
 295   _aux_bitmap_region_special = aux_bitmap.special();
 296   _aux_bit_map.initialize(_heap_region, _aux_bitmap_region);
 297 
 298   //
 299   // Create regions and region sets
 300   //
 301   size_t region_align = align_up(sizeof(ShenandoahHeapRegion), SHENANDOAH_CACHE_LINE_SIZE);
 302   size_t region_storage_size_orig = region_align * _num_regions;
 303   size_t region_storage_size = align_up(region_storage_size_orig,
 304                                         MAX2(region_page_size, os::vm_allocation_granularity()));
 305 
 306   ReservedSpace region_storage(region_storage_size, region_page_size);
 307   os::trace_page_sizes_for_requested_size("Region Storage",
 308                                           region_storage_size_orig, region_page_size,
 309                                           region_storage.base(),
 310                                           region_storage.size(), region_storage.page_size());
 311   MemTracker::record_virtual_memory_tag(region_storage.base(), mtGC);
 312   if (!region_storage.special()) {
 313     os::commit_memory_or_exit(region_storage.base(), region_storage_size, region_page_size, false,
 314                               "Cannot commit region memory");
 315   }
 316 
 317   // Try to fit the collection set bitmap at lower addresses. This optimizes code generation for cset checks.
 318   // Go up until a sensible limit (subject to encoding constraints) and try to reserve the space there.
 319   // If not successful, bite a bullet and allocate at whatever address.
 320   {
 321     const size_t cset_align = MAX2<size_t>(os::vm_page_size(), os::vm_allocation_granularity());
 322     const size_t cset_size = align_up(((size_t) sh_rs.base() + sh_rs.size()) >> ShenandoahHeapRegion::region_size_bytes_shift(), cset_align);
 323     const size_t cset_page_size = os::vm_page_size();
 324 
 325     uintptr_t min = round_up_power_of_2(cset_align);
 326     uintptr_t max = (1u << 30u);
 327     ReservedSpace cset_rs;
 328 
 329     for (uintptr_t addr = min; addr <= max; addr <<= 1u) {
 330       char* req_addr = (char*)addr;
 331       assert(is_aligned(req_addr, cset_align), "Should be aligned");
 332       cset_rs = ReservedSpace(cset_size, cset_align, cset_page_size, req_addr);
 333       if (cset_rs.is_reserved()) {
 334         assert(cset_rs.base() == req_addr, "Allocated where requested: " PTR_FORMAT ", " PTR_FORMAT, p2i(cset_rs.base()), addr);
 335         _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
 336         break;
 337       }
 338     }
 339 
 340     if (_collection_set == nullptr) {
 341       cset_rs = ReservedSpace(cset_size, cset_align, os::vm_page_size());
 342       _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
 343     }
 344     os::trace_page_sizes_for_requested_size("Collection Set",
 345                                             cset_size, cset_page_size,
 346                                             cset_rs.base(),
 347                                             cset_rs.size(), cset_rs.page_size());
 348   }
 349 
 350   _regions = NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, _num_regions, mtGC);
 351   _free_set = new ShenandoahFreeSet(this, _num_regions);
 352 
 353   {
 354     ShenandoahHeapLocker locker(lock());
 355 
 356     for (size_t i = 0; i < _num_regions; i++) {
 357       HeapWord* start = (HeapWord*)sh_rs.base() + ShenandoahHeapRegion::region_size_words() * i;
 358       bool is_committed = i < num_committed_regions;
 359       void* loc = region_storage.base() + i * region_align;
 360 
 361       ShenandoahHeapRegion* r = new (loc) ShenandoahHeapRegion(start, i, is_committed);
 362       assert(is_aligned(r, SHENANDOAH_CACHE_LINE_SIZE), "Sanity");
 363 
 364       _marking_context->initialize_top_at_mark_start(r);
 365       _regions[i] = r;
 366       assert(!collection_set()->is_in(i), "New region should not be in collection set");
 367     }
 368 
 369     // Initialize to complete
 370     _marking_context->mark_complete();
 371 
 372     _free_set->rebuild();
 373   }
 374 
 375   if (AlwaysPreTouch) {
 376     // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads,
 377     // before initialize() below zeroes it with initializing thread. For any given region,
 378     // we touch the region and the corresponding bitmaps from the same thread.
 379     ShenandoahPushWorkerScope scope(workers(), _max_workers, false);
 380 
 381     _pretouch_heap_page_size = heap_page_size;
 382     _pretouch_bitmap_page_size = bitmap_page_size;
 383 
 384     // OS memory managers may want to coalesce back-to-back pages. Make their jobs
 385     // simpler by pre-touching continuous spaces (heap and bitmap) separately.
 386 
 387     ShenandoahPretouchBitmapTask bcl(bitmap.base(), _bitmap_size, _pretouch_bitmap_page_size);
 388     _workers->run_task(&bcl);
 389 
 390     ShenandoahPretouchHeapTask hcl(_pretouch_heap_page_size);
 391     _workers->run_task(&hcl);
 392   }
 393 
 394   //
 395   // Initialize the rest of GC subsystems
 396   //
 397 
 398   _liveness_cache = NEW_C_HEAP_ARRAY(ShenandoahLiveData*, _max_workers, mtGC);
 399   for (uint worker = 0; worker < _max_workers; worker++) {
 400     _liveness_cache[worker] = NEW_C_HEAP_ARRAY(ShenandoahLiveData, _num_regions, mtGC);
 401     Copy::fill_to_bytes(_liveness_cache[worker], _num_regions * sizeof(ShenandoahLiveData));
 402   }
 403 
 404   // There should probably be Shenandoah-specific options for these,
 405   // just as there are G1-specific options.
 406   {
 407     ShenandoahSATBMarkQueueSet& satbqs = ShenandoahBarrierSet::satb_mark_queue_set();
 408     satbqs.set_process_completed_buffers_threshold(20); // G1SATBProcessCompletedThreshold
 409     satbqs.set_buffer_enqueue_threshold_percentage(60); // G1SATBBufferEnqueueingThresholdPercent
 410   }
 411 
 412   _monitoring_support = new ShenandoahMonitoringSupport(this);
 413   _phase_timings = new ShenandoahPhaseTimings(max_workers());
 414   ShenandoahCodeRoots::initialize();
 415 
 416   if (ShenandoahPacing) {
 417     _pacer = new ShenandoahPacer(this);
 418     _pacer->setup_for_idle();
 419   }
 420 
 421   _control_thread = new ShenandoahControlThread();
 422 
 423   ShenandoahInitLogger::print();
 424 
 425   FullGCForwarding::initialize(_heap_region);
 426 
 427   return JNI_OK;
 428 }
 429 
 430 void ShenandoahHeap::initialize_mode() {
 431   if (ShenandoahGCMode != nullptr) {
 432     if (strcmp(ShenandoahGCMode, "satb") == 0) {
 433       _gc_mode = new ShenandoahSATBMode();
 434     } else if (strcmp(ShenandoahGCMode, "passive") == 0) {
 435       _gc_mode = new ShenandoahPassiveMode();
 436     } else {
 437       vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option");
 438     }
 439   } else {
 440     vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option (null)");
 441   }
 442   _gc_mode->initialize_flags();
 443   if (_gc_mode->is_diagnostic() && !UnlockDiagnosticVMOptions) {
 444     vm_exit_during_initialization(
 445             err_msg("GC mode \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
 446                     _gc_mode->name()));
 447   }
 448   if (_gc_mode->is_experimental() && !UnlockExperimentalVMOptions) {
 449     vm_exit_during_initialization(
 450             err_msg("GC mode \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
 451                     _gc_mode->name()));
 452   }
 453 }
 454 
 455 void ShenandoahHeap::initialize_heuristics() {
 456   assert(_gc_mode != nullptr, "Must be initialized");
 457   _heuristics = _gc_mode->initialize_heuristics();
 458 
 459   if (_heuristics->is_diagnostic() && !UnlockDiagnosticVMOptions) {
 460     vm_exit_during_initialization(
 461             err_msg("Heuristics \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
 462                     _heuristics->name()));
 463   }
 464   if (_heuristics->is_experimental() && !UnlockExperimentalVMOptions) {
 465     vm_exit_during_initialization(
 466             err_msg("Heuristics \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
 467                     _heuristics->name()));
 468   }
 469 }
 470 
 471 #ifdef _MSC_VER
 472 #pragma warning( push )
 473 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
 474 #endif
 475 
 476 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
 477   CollectedHeap(),
 478   _initial_size(0),
 479   _used(0),
 480   _committed(0),
 481   _bytes_allocated_since_gc_start(0),
 482   _max_workers(MAX2(ConcGCThreads, ParallelGCThreads)),
 483   _workers(nullptr),
 484   _safepoint_workers(nullptr),
 485   _heap_region_special(false),
 486   _num_regions(0),
 487   _regions(nullptr),
 488   _update_refs_iterator(this),
 489   _gc_state_changed(false),
 490   _gc_no_progress_count(0),
 491   _control_thread(nullptr),
 492   _shenandoah_policy(policy),
 493   _gc_mode(nullptr),
 494   _heuristics(nullptr),
 495   _free_set(nullptr),
 496   _pacer(nullptr),
 497   _verifier(nullptr),
 498   _phase_timings(nullptr),
 499   _monitoring_support(nullptr),
 500   _memory_pool(nullptr),
 501   _stw_memory_manager("Shenandoah Pauses"),
 502   _cycle_memory_manager("Shenandoah Cycles"),
 503   _gc_timer(new ConcurrentGCTimer()),
 504   _log_min_obj_alignment_in_bytes(LogMinObjAlignmentInBytes),
 505   _ref_processor(new ShenandoahReferenceProcessor(MAX2(_max_workers, 1U))),
 506   _marking_context(nullptr),
 507   _bitmap_size(0),
 508   _bitmap_regions_per_slice(0),
 509   _bitmap_bytes_per_slice(0),
 510   _bitmap_region_special(false),
 511   _aux_bitmap_region_special(false),
 512   _liveness_cache(nullptr),
 513   _collection_set(nullptr)
 514 {
 515   // Initialize GC mode early, so we can adjust barrier support
 516   initialize_mode();
 517   BarrierSet::set_barrier_set(new ShenandoahBarrierSet(this));
 518 
 519   _max_workers = MAX2(_max_workers, 1U);
 520   _workers = new ShenandoahWorkerThreads("Shenandoah GC Threads", _max_workers);
 521   if (_workers == nullptr) {
 522     vm_exit_during_initialization("Failed necessary allocation.");
 523   } else {
 524     _workers->initialize_workers();
 525   }
 526 
 527   if (ParallelGCThreads > 1) {
 528     _safepoint_workers = new ShenandoahWorkerThreads("Safepoint Cleanup Thread",
 529                                                 ParallelGCThreads);
 530     _safepoint_workers->initialize_workers();
 531   }
 532 }
 533 
 534 #ifdef _MSC_VER
 535 #pragma warning( pop )
 536 #endif
 537 
 538 class ShenandoahResetBitmapTask : public WorkerTask {
 539 private:
 540   ShenandoahRegionIterator _regions;
 541 
 542 public:
 543   ShenandoahResetBitmapTask() :
 544     WorkerTask("Shenandoah Reset Bitmap") {}
 545 
 546   void work(uint worker_id) {
 547     ShenandoahHeapRegion* region = _regions.next();
 548     ShenandoahHeap* heap = ShenandoahHeap::heap();
 549     ShenandoahMarkingContext* const ctx = heap->marking_context();
 550     while (region != nullptr) {
 551       if (heap->is_bitmap_slice_committed(region)) {
 552         ctx->clear_bitmap(region);
 553       }
 554       region = _regions.next();
 555     }
 556   }
 557 };
 558 
 559 void ShenandoahHeap::reset_mark_bitmap() {
 560   assert_gc_workers(_workers->active_workers());
 561   mark_incomplete_marking_context();
 562 
 563   ShenandoahResetBitmapTask task;
 564   _workers->run_task(&task);
 565 }
 566 
 567 void ShenandoahHeap::print_on(outputStream* st) const {
 568   st->print_cr("Shenandoah Heap");
 569   st->print_cr(" " SIZE_FORMAT "%s max, " SIZE_FORMAT "%s soft max, " SIZE_FORMAT "%s committed, " SIZE_FORMAT "%s used",
 570                byte_size_in_proper_unit(max_capacity()), proper_unit_for_byte_size(max_capacity()),
 571                byte_size_in_proper_unit(soft_max_capacity()), proper_unit_for_byte_size(soft_max_capacity()),
 572                byte_size_in_proper_unit(committed()),    proper_unit_for_byte_size(committed()),
 573                byte_size_in_proper_unit(used()),         proper_unit_for_byte_size(used()));
 574   st->print_cr(" " SIZE_FORMAT " x " SIZE_FORMAT"%s regions",
 575                num_regions(),
 576                byte_size_in_proper_unit(ShenandoahHeapRegion::region_size_bytes()),
 577                proper_unit_for_byte_size(ShenandoahHeapRegion::region_size_bytes()));
 578 
 579   st->print("Status: ");
 580   if (has_forwarded_objects())                 st->print("has forwarded objects, ");
 581   if (is_concurrent_mark_in_progress())        st->print("marking, ");
 582   if (is_evacuation_in_progress())             st->print("evacuating, ");
 583   if (is_update_refs_in_progress())            st->print("updating refs, ");
 584   if (is_degenerated_gc_in_progress())         st->print("degenerated gc, ");
 585   if (is_full_gc_in_progress())                st->print("full gc, ");
 586   if (is_full_gc_move_in_progress())           st->print("full gc move, ");
 587   if (is_concurrent_weak_root_in_progress())   st->print("concurrent weak roots, ");
 588   if (is_concurrent_strong_root_in_progress() &&
 589       !is_concurrent_weak_root_in_progress())  st->print("concurrent strong roots, ");
 590 
 591   if (cancelled_gc()) {
 592     st->print("cancelled");
 593   } else {
 594     st->print("not cancelled");
 595   }
 596   st->cr();
 597 
 598   st->print_cr("Reserved region:");
 599   st->print_cr(" - [" PTR_FORMAT ", " PTR_FORMAT ") ",
 600                p2i(reserved_region().start()),
 601                p2i(reserved_region().end()));
 602 
 603   ShenandoahCollectionSet* cset = collection_set();
 604   st->print_cr("Collection set:");
 605   if (cset != nullptr) {
 606     st->print_cr(" - map (vanilla): " PTR_FORMAT, p2i(cset->map_address()));
 607     st->print_cr(" - map (biased):  " PTR_FORMAT, p2i(cset->biased_map_address()));
 608   } else {
 609     st->print_cr(" (null)");
 610   }
 611 
 612   st->cr();
 613   MetaspaceUtils::print_on(st);
 614 
 615   if (Verbose) {
 616     st->cr();
 617     print_heap_regions_on(st);
 618   }
 619 }
 620 
 621 class ShenandoahInitWorkerGCLABClosure : public ThreadClosure {
 622 public:
 623   void do_thread(Thread* thread) {
 624     assert(thread != nullptr, "Sanity");
 625     assert(thread->is_Worker_thread(), "Only worker thread expected");
 626     ShenandoahThreadLocalData::initialize_gclab(thread);
 627   }
 628 };
 629 
 630 void ShenandoahHeap::post_initialize() {
 631   CollectedHeap::post_initialize();
 632   MutexLocker ml(Threads_lock);
 633 
 634   ShenandoahInitWorkerGCLABClosure init_gclabs;
 635   _workers->threads_do(&init_gclabs);
 636 
 637   // gclab can not be initialized early during VM startup, as it can not determinate its max_size.
 638   // Now, we will let WorkerThreads to initialize gclab when new worker is created.
 639   _workers->set_initialize_gclab();
 640   if (_safepoint_workers != nullptr) {
 641     _safepoint_workers->threads_do(&init_gclabs);
 642     _safepoint_workers->set_initialize_gclab();
 643   }
 644 
 645   _heuristics->initialize();
 646 
 647   JFR_ONLY(ShenandoahJFRSupport::register_jfr_type_serializers();)
 648 }
 649 
 650 size_t ShenandoahHeap::used() const {
 651   return Atomic::load(&_used);
 652 }
 653 
 654 size_t ShenandoahHeap::committed() const {
 655   return Atomic::load(&_committed);
 656 }
 657 
 658 size_t ShenandoahHeap::available() const {
 659   return free_set()->available();
 660 }
 661 
 662 void ShenandoahHeap::increase_committed(size_t bytes) {
 663   shenandoah_assert_heaplocked_or_safepoint();
 664   _committed += bytes;
 665 }
 666 
 667 void ShenandoahHeap::decrease_committed(size_t bytes) {
 668   shenandoah_assert_heaplocked_or_safepoint();
 669   _committed -= bytes;
 670 }
 671 
 672 void ShenandoahHeap::increase_used(size_t bytes) {
 673   Atomic::add(&_used, bytes, memory_order_relaxed);
 674 }
 675 
 676 void ShenandoahHeap::set_used(size_t bytes) {
 677   Atomic::store(&_used, bytes);
 678 }
 679 
 680 void ShenandoahHeap::decrease_used(size_t bytes) {
 681   assert(used() >= bytes, "never decrease heap size by more than we've left");
 682   Atomic::sub(&_used, bytes, memory_order_relaxed);
 683 }
 684 
 685 void ShenandoahHeap::increase_allocated(size_t bytes) {
 686   Atomic::add(&_bytes_allocated_since_gc_start, bytes, memory_order_relaxed);
 687 }
 688 
 689 void ShenandoahHeap::notify_mutator_alloc_words(size_t words, bool waste) {
 690   size_t bytes = words * HeapWordSize;
 691   if (!waste) {
 692     increase_used(bytes);
 693   }
 694   increase_allocated(bytes);
 695   if (ShenandoahPacing) {
 696     control_thread()->pacing_notify_alloc(words);
 697     if (waste) {
 698       pacer()->claim_for_alloc<true>(words);
 699     }
 700   }
 701 }
 702 
 703 size_t ShenandoahHeap::capacity() const {
 704   return committed();
 705 }
 706 
 707 size_t ShenandoahHeap::max_capacity() const {
 708   return _num_regions * ShenandoahHeapRegion::region_size_bytes();
 709 }
 710 
 711 size_t ShenandoahHeap::soft_max_capacity() const {
 712   size_t v = Atomic::load(&_soft_max_size);
 713   assert(min_capacity() <= v && v <= max_capacity(),
 714          "Should be in bounds: " SIZE_FORMAT " <= " SIZE_FORMAT " <= " SIZE_FORMAT,
 715          min_capacity(), v, max_capacity());
 716   return v;
 717 }
 718 
 719 void ShenandoahHeap::set_soft_max_capacity(size_t v) {
 720   assert(min_capacity() <= v && v <= max_capacity(),
 721          "Should be in bounds: " SIZE_FORMAT " <= " SIZE_FORMAT " <= " SIZE_FORMAT,
 722          min_capacity(), v, max_capacity());
 723   Atomic::store(&_soft_max_size, v);
 724 }
 725 
 726 size_t ShenandoahHeap::min_capacity() const {
 727   return _minimum_size;
 728 }
 729 
 730 size_t ShenandoahHeap::initial_capacity() const {
 731   return _initial_size;
 732 }
 733 
 734 bool ShenandoahHeap::is_in(const void* p) const {
 735   if (is_in_reserved(p)) {
 736     if (is_full_gc_move_in_progress()) {
 737       // Full GC move is running, we do not have a consistent region
 738       // information yet. But we know the pointer is in heap.
 739       return true;
 740     }
 741     // Now check if we point to a live section in active region.
 742     ShenandoahHeapRegion* r = heap_region_containing(p);
 743     return (r->is_active() && p < r->top());
 744   } else {
 745     return false;
 746   }
 747 }
 748 
 749 void ShenandoahHeap::maybe_uncommit(double shrink_before, size_t shrink_until) {
 750   assert (ShenandoahUncommit, "should be enabled");
 751 
 752   // Determine if there is work to do. This avoids taking heap lock if there is
 753   // no work available, avoids spamming logs with superfluous logging messages,
 754   // and minimises the amount of work while locks are taken.
 755 
 756   if (committed() <= shrink_until) return;
 757 
 758   bool has_work = false;
 759   for (size_t i = 0; i < num_regions(); i++) {
 760     ShenandoahHeapRegion* r = get_region(i);
 761     if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
 762       has_work = true;
 763       break;
 764     }
 765   }
 766 
 767   if (has_work) {
 768     static const char* msg = "Concurrent uncommit";
 769     ShenandoahConcurrentPhase gcPhase(msg, ShenandoahPhaseTimings::conc_uncommit, true /* log_heap_usage */);
 770     EventMark em("%s", msg);
 771 
 772     op_uncommit(shrink_before, shrink_until);
 773   }
 774 }
 775 
 776 void ShenandoahHeap::op_uncommit(double shrink_before, size_t shrink_until) {
 777   assert (ShenandoahUncommit, "should be enabled");
 778 
 779   // Application allocates from the beginning of the heap, and GC allocates at
 780   // the end of it. It is more efficient to uncommit from the end, so that applications
 781   // could enjoy the near committed regions. GC allocations are much less frequent,
 782   // and therefore can accept the committing costs.
 783 
 784   size_t count = 0;
 785   for (size_t i = num_regions(); i > 0; i--) { // care about size_t underflow
 786     ShenandoahHeapRegion* r = get_region(i - 1);
 787     if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
 788       ShenandoahHeapLocker locker(lock());
 789       if (r->is_empty_committed()) {
 790         if (committed() < shrink_until + ShenandoahHeapRegion::region_size_bytes()) {
 791           break;
 792         }
 793 
 794         r->make_uncommitted();
 795         count++;
 796       }
 797     }
 798     SpinPause(); // allow allocators to take the lock
 799   }
 800 
 801   if (count > 0) {
 802     notify_heap_changed();
 803   }
 804 }
 805 
 806 bool ShenandoahHeap::check_soft_max_changed() {
 807   size_t new_soft_max = Atomic::load(&SoftMaxHeapSize);
 808   size_t old_soft_max = soft_max_capacity();
 809   if (new_soft_max != old_soft_max) {
 810     new_soft_max = MAX2(min_capacity(), new_soft_max);
 811     new_soft_max = MIN2(max_capacity(), new_soft_max);
 812     if (new_soft_max != old_soft_max) {
 813       log_info(gc)("Soft Max Heap Size: " SIZE_FORMAT "%s -> " SIZE_FORMAT "%s",
 814                    byte_size_in_proper_unit(old_soft_max), proper_unit_for_byte_size(old_soft_max),
 815                    byte_size_in_proper_unit(new_soft_max), proper_unit_for_byte_size(new_soft_max)
 816       );
 817       set_soft_max_capacity(new_soft_max);
 818       return true;
 819     }
 820   }
 821   return false;
 822 }
 823 
 824 void ShenandoahHeap::notify_heap_changed() {
 825   // Update monitoring counters when we took a new region. This amortizes the
 826   // update costs on slow path.
 827   monitoring_support()->notify_heap_changed();
 828 
 829   // This is called from allocation path, and thus should be fast.
 830   _heap_changed.try_set();
 831 }
 832 
 833 void ShenandoahHeap::set_forced_counters_update(bool value) {
 834   monitoring_support()->set_forced_counters_update(value);
 835 }
 836 
 837 void ShenandoahHeap::handle_force_counters_update() {
 838   monitoring_support()->handle_force_counters_update();
 839 }
 840 
 841 HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) {
 842   // New object should fit the GCLAB size
 843   size_t min_size = MAX2(size, PLAB::min_size());
 844 
 845   // Figure out size of new GCLAB, looking back at heuristics. Expand aggressively.
 846   size_t new_size = ShenandoahThreadLocalData::gclab_size(thread) * 2;
 847   new_size = MIN2(new_size, PLAB::max_size());
 848   new_size = MAX2(new_size, PLAB::min_size());
 849 
 850   // Record new heuristic value even if we take any shortcut. This captures
 851   // the case when moderately-sized objects always take a shortcut. At some point,
 852   // heuristics should catch up with them.
 853   ShenandoahThreadLocalData::set_gclab_size(thread, new_size);
 854 
 855   if (new_size < size) {
 856     // New size still does not fit the object. Fall back to shared allocation.
 857     // This avoids retiring perfectly good GCLABs, when we encounter a large object.
 858     return nullptr;
 859   }
 860 
 861   // Retire current GCLAB, and allocate a new one.
 862   PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
 863   gclab->retire();
 864 
 865   size_t actual_size = 0;
 866   HeapWord* gclab_buf = allocate_new_gclab(min_size, new_size, &actual_size);
 867   if (gclab_buf == nullptr) {
 868     return nullptr;
 869   }
 870 
 871   assert (size <= actual_size, "allocation should fit");
 872 
 873   // ...and clear or zap just allocated TLAB, if needed.
 874   if (ZeroTLAB) {
 875     Copy::zero_to_words(gclab_buf, actual_size);
 876   } else if (ZapTLAB) {
 877     // Skip mangling the space corresponding to the object header to
 878     // ensure that the returned space is not considered parsable by
 879     // any concurrent GC thread.
 880     size_t hdr_size = oopDesc::header_size();
 881     Copy::fill_to_words(gclab_buf + hdr_size, actual_size - hdr_size, badHeapWordVal);
 882   }
 883   gclab->set_buf(gclab_buf, actual_size);
 884   return gclab->allocate(size);
 885 }
 886 
 887 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t min_size,
 888                                             size_t requested_size,
 889                                             size_t* actual_size) {
 890   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_tlab(min_size, requested_size);
 891   HeapWord* res = allocate_memory(req);
 892   if (res != nullptr) {
 893     *actual_size = req.actual_size();
 894   } else {
 895     *actual_size = 0;
 896   }
 897   return res;
 898 }
 899 
 900 HeapWord* ShenandoahHeap::allocate_new_gclab(size_t min_size,
 901                                              size_t word_size,
 902                                              size_t* actual_size) {
 903   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_gclab(min_size, word_size);
 904   HeapWord* res = allocate_memory(req);
 905   if (res != nullptr) {
 906     *actual_size = req.actual_size();
 907   } else {
 908     *actual_size = 0;
 909   }
 910   return res;
 911 }
 912 
 913 HeapWord* ShenandoahHeap::allocate_memory(ShenandoahAllocRequest& req) {
 914   intptr_t pacer_epoch = 0;
 915   bool in_new_region = false;
 916   HeapWord* result = nullptr;
 917 
 918   if (req.is_mutator_alloc()) {
 919     if (ShenandoahPacing) {
 920       pacer()->pace_for_alloc(req.size());
 921       pacer_epoch = pacer()->epoch();
 922     }
 923 
 924     if (!ShenandoahAllocFailureALot || !should_inject_alloc_failure()) {
 925       result = allocate_memory_under_lock(req, in_new_region);
 926     }
 927 
 928     // Check that gc overhead is not exceeded.
 929     //
 930     // Shenandoah will grind along for quite a while allocating one
 931     // object at a time using shared (non-tlab) allocations. This check
 932     // is testing that the GC overhead limit has not been exceeded.
 933     // This will notify the collector to start a cycle, but will raise
 934     // an OOME to the mutator if the last Full GCs have not made progress.
 935     if (result == nullptr && !req.is_lab_alloc() && get_gc_no_progress_count() > ShenandoahNoProgressThreshold) {
 936       control_thread()->handle_alloc_failure(req, false);
 937       return nullptr;
 938     }
 939 
 940     if (result == nullptr) {
 941       // Block until control thread reacted, then retry allocation.
 942       //
 943       // It might happen that one of the threads requesting allocation would unblock
 944       // way later after GC happened, only to fail the second allocation, because
 945       // other threads have already depleted the free storage. In this case, a better
 946       // strategy is to try again, until at least one full GC has completed.
 947       //
 948       // Stop retrying and return nullptr to cause OOMError exception if our allocation failed even after:
 949       //   a) We experienced a GC that had good progress, or
 950       //   b) We experienced at least one Full GC (whether or not it had good progress)
 951       //
 952       // TODO: Consider GLOBAL GC rather than Full GC to remediate OOM condition: https://bugs.openjdk.org/browse/JDK-8335910
 953 
 954       size_t original_count = shenandoah_policy()->full_gc_count();
 955       while ((result == nullptr) && (original_count == shenandoah_policy()->full_gc_count())) {
 956         control_thread()->handle_alloc_failure(req, true);
 957         result = allocate_memory_under_lock(req, in_new_region);
 958       }
 959       if (result != nullptr) {
 960         // If our allocation request has been satisifed after it initially failed, we count this as good gc progress
 961         notify_gc_progress();
 962       }
 963       if (log_is_enabled(Debug, gc, alloc)) {
 964         ResourceMark rm;
 965         log_debug(gc, alloc)("Thread: %s, Result: " PTR_FORMAT ", Request: %s, Size: " SIZE_FORMAT
 966                              ", Original: " SIZE_FORMAT ", Latest: " SIZE_FORMAT,
 967                              Thread::current()->name(), p2i(result), req.type_string(), req.size(),
 968                              original_count, get_gc_no_progress_count());
 969       }
 970     }
 971   } else {
 972     assert(req.is_gc_alloc(), "Can only accept GC allocs here");
 973     result = allocate_memory_under_lock(req, in_new_region);
 974     // Do not call handle_alloc_failure() here, because we cannot block.
 975     // The allocation failure would be handled by the LRB slowpath with handle_alloc_failure_evac().
 976   }
 977 
 978   if (in_new_region) {
 979     notify_heap_changed();
 980   }
 981 
 982   if (result != nullptr) {
 983     size_t requested = req.size();
 984     size_t actual = req.actual_size();
 985 
 986     assert (req.is_lab_alloc() || (requested == actual),
 987             "Only LAB allocations are elastic: %s, requested = " SIZE_FORMAT ", actual = " SIZE_FORMAT,
 988             ShenandoahAllocRequest::alloc_type_to_string(req.type()), requested, actual);
 989 
 990     if (req.is_mutator_alloc()) {
 991       notify_mutator_alloc_words(actual, false);
 992 
 993       // If we requested more than we were granted, give the rest back to pacer.
 994       // This only matters if we are in the same pacing epoch: do not try to unpace
 995       // over the budget for the other phase.
 996       if (ShenandoahPacing && (pacer_epoch > 0) && (requested > actual)) {
 997         pacer()->unpace_for_alloc(pacer_epoch, requested - actual);
 998       }
 999     } else {
1000       increase_used(actual*HeapWordSize);
1001     }
1002   }
1003 
1004   return result;
1005 }
1006 
1007 HeapWord* ShenandoahHeap::allocate_memory_under_lock(ShenandoahAllocRequest& req, bool& in_new_region) {
1008   // If we are dealing with mutator allocation, then we may need to block for safepoint.
1009   // We cannot block for safepoint for GC allocations, because there is a high chance
1010   // we are already running at safepoint or from stack watermark machinery, and we cannot
1011   // block again.
1012   ShenandoahHeapLocker locker(lock(), req.is_mutator_alloc());
1013   return _free_set->allocate(req, in_new_region);
1014 }
1015 
1016 HeapWord* ShenandoahHeap::mem_allocate(size_t size,
1017                                         bool*  gc_overhead_limit_was_exceeded) {
1018   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared(size);
1019   return allocate_memory(req);
1020 }
1021 
1022 MetaWord* ShenandoahHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
1023                                                              size_t size,
1024                                                              Metaspace::MetadataType mdtype) {
1025   MetaWord* result;
1026 
1027   // Inform metaspace OOM to GC heuristics if class unloading is possible.
1028   if (heuristics()->can_unload_classes()) {
1029     ShenandoahHeuristics* h = heuristics();
1030     h->record_metaspace_oom();
1031   }
1032 
1033   // Expand and retry allocation
1034   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
1035   if (result != nullptr) {
1036     return result;
1037   }
1038 
1039   // Start full GC
1040   collect(GCCause::_metadata_GC_clear_soft_refs);
1041 
1042   // Retry allocation
1043   result = loader_data->metaspace_non_null()->allocate(size, mdtype);
1044   if (result != nullptr) {
1045     return result;
1046   }
1047 
1048   // Expand and retry allocation
1049   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
1050   if (result != nullptr) {
1051     return result;
1052   }
1053 
1054   // Out of memory
1055   return nullptr;
1056 }
1057 
1058 class ShenandoahConcurrentEvacuateRegionObjectClosure : public ObjectClosure {
1059 private:
1060   ShenandoahHeap* const _heap;
1061   Thread* const _thread;
1062 public:
1063   ShenandoahConcurrentEvacuateRegionObjectClosure(ShenandoahHeap* heap) :
1064     _heap(heap), _thread(Thread::current()) {}
1065 
1066   void do_object(oop p) {
1067     shenandoah_assert_marked(nullptr, p);
1068     if (!p->is_forwarded()) {
1069       _heap->evacuate_object(p, _thread);
1070     }
1071   }
1072 };
1073 
1074 class ShenandoahEvacuationTask : public WorkerTask {
1075 private:
1076   ShenandoahHeap* const _sh;
1077   ShenandoahCollectionSet* const _cs;
1078   bool _concurrent;
1079 public:
1080   ShenandoahEvacuationTask(ShenandoahHeap* sh,
1081                            ShenandoahCollectionSet* cs,
1082                            bool concurrent) :
1083     WorkerTask("Shenandoah Evacuation"),
1084     _sh(sh),
1085     _cs(cs),
1086     _concurrent(concurrent)
1087   {}
1088 
1089   void work(uint worker_id) {
1090     if (_concurrent) {
1091       ShenandoahConcurrentWorkerSession worker_session(worker_id);
1092       ShenandoahSuspendibleThreadSetJoiner stsj;
1093       ShenandoahEvacOOMScope oom_evac_scope;
1094       do_work();
1095     } else {
1096       ShenandoahParallelWorkerSession worker_session(worker_id);
1097       ShenandoahEvacOOMScope oom_evac_scope;
1098       do_work();
1099     }
1100   }
1101 
1102 private:
1103   void do_work() {
1104     ShenandoahConcurrentEvacuateRegionObjectClosure cl(_sh);
1105     ShenandoahHeapRegion* r;
1106     while ((r =_cs->claim_next()) != nullptr) {
1107       assert(r->has_live(), "Region " SIZE_FORMAT " should have been reclaimed early", r->index());
1108       _sh->marked_object_iterate(r, &cl);
1109 
1110       if (ShenandoahPacing) {
1111         _sh->pacer()->report_evac(r->used() >> LogHeapWordSize);
1112       }
1113 
1114       if (_sh->check_cancelled_gc_and_yield(_concurrent)) {
1115         break;
1116       }
1117     }
1118   }
1119 };
1120 
1121 void ShenandoahHeap::evacuate_collection_set(bool concurrent) {
1122   ShenandoahEvacuationTask task(this, _collection_set, concurrent);
1123   workers()->run_task(&task);
1124 }
1125 
1126 oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) {
1127   if (ShenandoahThreadLocalData::is_oom_during_evac(Thread::current())) {
1128     // This thread went through the OOM during evac protocol and it is safe to return
1129     // the forward pointer. It must not attempt to evacuate any more.
1130     return ShenandoahBarrierSet::resolve_forwarded(p);
1131   }
1132 
1133   assert(ShenandoahThreadLocalData::is_evac_allowed(thread), "must be enclosed in oom-evac scope");
1134 
1135   size_t size = ShenandoahForwarding::size(p);
1136 
1137   assert(!heap_region_containing(p)->is_humongous(), "never evacuate humongous objects");
1138 
1139   bool alloc_from_gclab = true;
1140   HeapWord* copy = nullptr;
1141 
1142 #ifdef ASSERT
1143   if (ShenandoahOOMDuringEvacALot &&
1144       (os::random() & 1) == 0) { // Simulate OOM every ~2nd slow-path call
1145     copy = nullptr;
1146   } else {
1147 #endif
1148     if (UseTLAB) {
1149       copy = allocate_from_gclab(thread, size);
1150     }
1151     if (copy == nullptr) {
1152       ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared_gc(size);
1153       copy = allocate_memory(req);
1154       alloc_from_gclab = false;
1155     }
1156 #ifdef ASSERT
1157   }
1158 #endif
1159 
1160   if (copy == nullptr) {
1161     control_thread()->handle_alloc_failure_evac(size);
1162 
1163     _oom_evac_handler.handle_out_of_memory_during_evacuation();
1164 
1165     return ShenandoahBarrierSet::resolve_forwarded(p);
1166   }
1167 
1168   // Copy the object:
1169   Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(p), copy, size);
1170 
1171   // Try to install the new forwarding pointer.
1172   oop copy_val = cast_to_oop(copy);
1173   oop result = ShenandoahForwarding::try_update_forwardee(p, copy_val);
1174   if (result == copy_val) {
1175     // Successfully evacuated. Our copy is now the public one!
1176     ContinuationGCSupport::relativize_stack_chunk(copy_val);
1177     shenandoah_assert_correct(nullptr, copy_val);
1178     return copy_val;
1179   }  else {
1180     // Failed to evacuate. We need to deal with the object that is left behind. Since this
1181     // new allocation is certainly after TAMS, it will be considered live in the next cycle.
1182     // But if it happens to contain references to evacuated regions, those references would
1183     // not get updated for this stale copy during this cycle, and we will crash while scanning
1184     // it the next cycle.
1185     //
1186     // For GCLAB allocations, it is enough to rollback the allocation ptr. Either the next
1187     // object will overwrite this stale copy, or the filler object on LAB retirement will
1188     // do this. For non-GCLAB allocations, we have no way to retract the allocation, and
1189     // have to explicitly overwrite the copy with the filler object. With that overwrite,
1190     // we have to keep the fwdptr initialized and pointing to our (stale) copy.
1191     if (alloc_from_gclab) {
1192       ShenandoahThreadLocalData::gclab(thread)->undo_allocation(copy, size);
1193     } else {
1194       fill_with_object(copy, size);
1195       shenandoah_assert_correct(nullptr, copy_val);
1196     }
1197     shenandoah_assert_correct(nullptr, result);
1198     return result;
1199   }
1200 }
1201 
1202 void ShenandoahHeap::trash_cset_regions() {
1203   ShenandoahHeapLocker locker(lock());
1204 
1205   ShenandoahCollectionSet* set = collection_set();
1206   ShenandoahHeapRegion* r;
1207   set->clear_current_index();
1208   while ((r = set->next()) != nullptr) {
1209     r->make_trash();
1210   }
1211   collection_set()->clear();
1212 }
1213 
1214 void ShenandoahHeap::print_heap_regions_on(outputStream* st) const {
1215   st->print_cr("Heap Regions:");
1216   st->print_cr("Region state: EU=empty-uncommitted, EC=empty-committed, R=regular, H=humongous start, HP=pinned humongous start");
1217   st->print_cr("              HC=humongous continuation, CS=collection set, TR=trash, P=pinned, CSP=pinned collection set");
1218   st->print_cr("BTE=bottom/top/end, TAMS=top-at-mark-start");
1219   st->print_cr("UWM=update watermark, U=used");
1220   st->print_cr("T=TLAB allocs, G=GCLAB allocs");
1221   st->print_cr("S=shared allocs, L=live data");
1222   st->print_cr("CP=critical pins");
1223 
1224   for (size_t i = 0; i < num_regions(); i++) {
1225     get_region(i)->print_on(st);
1226   }
1227 }
1228 
1229 void ShenandoahHeap::trash_humongous_region_at(ShenandoahHeapRegion* start) {
1230   assert(start->is_humongous_start(), "reclaim regions starting with the first one");
1231 
1232   oop humongous_obj = cast_to_oop(start->bottom());
1233   size_t size = humongous_obj->size();
1234   size_t required_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize);
1235   size_t index = start->index() + required_regions - 1;
1236 
1237   assert(!start->has_live(), "liveness must be zero");
1238 
1239   for(size_t i = 0; i < required_regions; i++) {
1240     // Reclaim from tail. Otherwise, assertion fails when printing region to trace log,
1241     // as it expects that every region belongs to a humongous region starting with a humongous start region.
1242     ShenandoahHeapRegion* region = get_region(index --);
1243 
1244     assert(region->is_humongous(), "expect correct humongous start or continuation");
1245     assert(!region->is_cset(), "Humongous region should not be in collection set");
1246 
1247     region->make_trash_immediate();
1248   }
1249 }
1250 
1251 class ShenandoahCheckCleanGCLABClosure : public ThreadClosure {
1252 public:
1253   ShenandoahCheckCleanGCLABClosure() {}
1254   void do_thread(Thread* thread) {
1255     PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1256     assert(gclab != nullptr, "GCLAB should be initialized for %s", thread->name());
1257     assert(gclab->words_remaining() == 0, "GCLAB should not need retirement");
1258   }
1259 };
1260 
1261 class ShenandoahRetireGCLABClosure : public ThreadClosure {
1262 private:
1263   bool const _resize;
1264 public:
1265   ShenandoahRetireGCLABClosure(bool resize) : _resize(resize) {}
1266   void do_thread(Thread* thread) {
1267     PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1268     assert(gclab != nullptr, "GCLAB should be initialized for %s", thread->name());
1269     gclab->retire();
1270     if (_resize && ShenandoahThreadLocalData::gclab_size(thread) > 0) {
1271       ShenandoahThreadLocalData::set_gclab_size(thread, 0);
1272     }
1273   }
1274 };
1275 
1276 void ShenandoahHeap::labs_make_parsable() {
1277   assert(UseTLAB, "Only call with UseTLAB");
1278 
1279   ShenandoahRetireGCLABClosure cl(false);
1280 
1281   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1282     ThreadLocalAllocBuffer& tlab = t->tlab();
1283     tlab.make_parsable();
1284     cl.do_thread(t);
1285   }
1286 
1287   workers()->threads_do(&cl);
1288 }
1289 
1290 void ShenandoahHeap::tlabs_retire(bool resize) {
1291   assert(UseTLAB, "Only call with UseTLAB");
1292   assert(!resize || ResizeTLAB, "Only call for resize when ResizeTLAB is enabled");
1293 
1294   ThreadLocalAllocStats stats;
1295 
1296   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1297     ThreadLocalAllocBuffer& tlab = t->tlab();
1298     tlab.retire(&stats);
1299     if (resize) {
1300       tlab.resize();
1301     }
1302   }
1303 
1304   stats.publish();
1305 
1306 #ifdef ASSERT
1307   ShenandoahCheckCleanGCLABClosure cl;
1308   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1309     cl.do_thread(t);
1310   }
1311   workers()->threads_do(&cl);
1312 #endif
1313 }
1314 
1315 void ShenandoahHeap::gclabs_retire(bool resize) {
1316   assert(UseTLAB, "Only call with UseTLAB");
1317   assert(!resize || ResizeTLAB, "Only call for resize when ResizeTLAB is enabled");
1318 
1319   ShenandoahRetireGCLABClosure cl(resize);
1320   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1321     cl.do_thread(t);
1322   }
1323   workers()->threads_do(&cl);
1324 
1325   if (safepoint_workers() != nullptr) {
1326     safepoint_workers()->threads_do(&cl);
1327   }
1328 }
1329 
1330 // Returns size in bytes
1331 size_t ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const {
1332   // Return the max allowed size, and let the allocation path
1333   // figure out the safe size for current allocation.
1334   return ShenandoahHeapRegion::max_tlab_size_bytes();
1335 }
1336 
1337 size_t ShenandoahHeap::max_tlab_size() const {
1338   // Returns size in words
1339   return ShenandoahHeapRegion::max_tlab_size_words();
1340 }
1341 
1342 void ShenandoahHeap::collect(GCCause::Cause cause) {
1343   control_thread()->request_gc(cause);
1344 }
1345 
1346 void ShenandoahHeap::do_full_collection(bool clear_all_soft_refs) {
1347   //assert(false, "Shouldn't need to do full collections");
1348 }
1349 
1350 HeapWord* ShenandoahHeap::block_start(const void* addr) const {
1351   ShenandoahHeapRegion* r = heap_region_containing(addr);
1352   if (r != nullptr) {
1353     return r->block_start(addr);
1354   }
1355   return nullptr;
1356 }
1357 
1358 bool ShenandoahHeap::block_is_obj(const HeapWord* addr) const {
1359   ShenandoahHeapRegion* r = heap_region_containing(addr);
1360   return r->block_is_obj(addr);
1361 }
1362 
1363 bool ShenandoahHeap::print_location(outputStream* st, void* addr) const {
1364   return BlockLocationPrinter<ShenandoahHeap>::print_location(st, addr);
1365 }
1366 
1367 void ShenandoahHeap::prepare_for_verify() {
1368   if (SafepointSynchronize::is_at_safepoint() && UseTLAB) {
1369     labs_make_parsable();
1370   }
1371 }
1372 
1373 void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const {
1374   if (_shenandoah_policy->is_at_shutdown()) {
1375     return;
1376   }
1377 
1378   if (_control_thread != nullptr) {
1379     tcl->do_thread(_control_thread);
1380   }
1381 
1382   workers()->threads_do(tcl);
1383   if (_safepoint_workers != nullptr) {
1384     _safepoint_workers->threads_do(tcl);
1385   }
1386 }
1387 
1388 void ShenandoahHeap::print_tracing_info() const {
1389   LogTarget(Info, gc, stats) lt;
1390   if (lt.is_enabled()) {
1391     ResourceMark rm;
1392     LogStream ls(lt);
1393 
1394     phase_timings()->print_global_on(&ls);
1395 
1396     ls.cr();
1397     ls.cr();
1398 
1399     shenandoah_policy()->print_gc_stats(&ls);
1400 
1401     ls.cr();
1402     ls.cr();
1403   }
1404 }
1405 
1406 void ShenandoahHeap::verify(VerifyOption vo) {
1407   if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
1408     if (ShenandoahVerify) {
1409       verifier()->verify_generic(vo);
1410     } else {
1411       // TODO: Consider allocating verification bitmaps on demand,
1412       // and turn this on unconditionally.
1413     }
1414   }
1415 }
1416 size_t ShenandoahHeap::tlab_capacity(Thread *thr) const {
1417   return _free_set->capacity();
1418 }
1419 
1420 class ObjectIterateScanRootClosure : public BasicOopIterateClosure {
1421 private:
1422   MarkBitMap* _bitmap;
1423   ShenandoahScanObjectStack* _oop_stack;
1424   ShenandoahHeap* const _heap;
1425   ShenandoahMarkingContext* const _marking_context;
1426 
1427   template <class T>
1428   void do_oop_work(T* p) {
1429     T o = RawAccess<>::oop_load(p);
1430     if (!CompressedOops::is_null(o)) {
1431       oop obj = CompressedOops::decode_not_null(o);
1432       if (_heap->is_concurrent_weak_root_in_progress() && !_marking_context->is_marked(obj)) {
1433         // There may be dead oops in weak roots in concurrent root phase, do not touch them.
1434         return;
1435       }
1436       obj = ShenandoahBarrierSet::barrier_set()->load_reference_barrier(obj);
1437 
1438       assert(oopDesc::is_oop(obj), "must be a valid oop");
1439       if (!_bitmap->is_marked(obj)) {
1440         _bitmap->mark(obj);
1441         _oop_stack->push(obj);
1442       }
1443     }
1444   }
1445 public:
1446   ObjectIterateScanRootClosure(MarkBitMap* bitmap, ShenandoahScanObjectStack* oop_stack) :
1447     _bitmap(bitmap), _oop_stack(oop_stack), _heap(ShenandoahHeap::heap()),
1448     _marking_context(_heap->marking_context()) {}
1449   void do_oop(oop* p)       { do_oop_work(p); }
1450   void do_oop(narrowOop* p) { do_oop_work(p); }
1451 };
1452 
1453 /*
1454  * This is public API, used in preparation of object_iterate().
1455  * Since we don't do linear scan of heap in object_iterate() (see comment below), we don't
1456  * need to make the heap parsable. For Shenandoah-internal linear heap scans that we can
1457  * control, we call SH::tlabs_retire, SH::gclabs_retire.
1458  */
1459 void ShenandoahHeap::ensure_parsability(bool retire_tlabs) {
1460   // No-op.
1461 }
1462 
1463 /*
1464  * Iterates objects in the heap. This is public API, used for, e.g., heap dumping.
1465  *
1466  * We cannot safely iterate objects by doing a linear scan at random points in time. Linear
1467  * scanning needs to deal with dead objects, which may have dead Klass* pointers (e.g.
1468  * calling oopDesc::size() would crash) or dangling reference fields (crashes) etc. Linear
1469  * scanning therefore depends on having a valid marking bitmap to support it. However, we only
1470  * have a valid marking bitmap after successful marking. In particular, we *don't* have a valid
1471  * marking bitmap during marking, after aborted marking or during/after cleanup (when we just
1472  * wiped the bitmap in preparation for next marking).
1473  *
1474  * For all those reasons, we implement object iteration as a single marking traversal, reporting
1475  * objects as we mark+traverse through the heap, starting from GC roots. JVMTI IterateThroughHeap
1476  * is allowed to report dead objects, but is not required to do so.
1477  */
1478 void ShenandoahHeap::object_iterate(ObjectClosure* cl) {
1479   // Reset bitmap
1480   if (!prepare_aux_bitmap_for_iteration())
1481     return;
1482 
1483   ShenandoahScanObjectStack oop_stack;
1484   ObjectIterateScanRootClosure oops(&_aux_bit_map, &oop_stack);
1485   // Seed the stack with root scan
1486   scan_roots_for_iteration(&oop_stack, &oops);
1487 
1488   // Work through the oop stack to traverse heap
1489   while (! oop_stack.is_empty()) {
1490     oop obj = oop_stack.pop();
1491     assert(oopDesc::is_oop(obj), "must be a valid oop");
1492     cl->do_object(obj);
1493     obj->oop_iterate(&oops);
1494   }
1495 
1496   assert(oop_stack.is_empty(), "should be empty");
1497   // Reclaim bitmap
1498   reclaim_aux_bitmap_for_iteration();
1499 }
1500 
1501 bool ShenandoahHeap::prepare_aux_bitmap_for_iteration() {
1502   assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1503 
1504   if (!_aux_bitmap_region_special && !os::commit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size(), false)) {
1505     log_warning(gc)("Could not commit native memory for auxiliary marking bitmap for heap iteration");
1506     return false;
1507   }
1508   // Reset bitmap
1509   _aux_bit_map.clear();
1510   return true;
1511 }
1512 
1513 void ShenandoahHeap::scan_roots_for_iteration(ShenandoahScanObjectStack* oop_stack, ObjectIterateScanRootClosure* oops) {
1514   // Process GC roots according to current GC cycle
1515   // This populates the work stack with initial objects
1516   // It is important to relinquish the associated locks before diving
1517   // into heap dumper
1518   uint n_workers = safepoint_workers() != nullptr ? safepoint_workers()->active_workers() : 1;
1519   ShenandoahHeapIterationRootScanner rp(n_workers);
1520   rp.roots_do(oops);
1521 }
1522 
1523 void ShenandoahHeap::reclaim_aux_bitmap_for_iteration() {
1524   if (!_aux_bitmap_region_special && !os::uncommit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size())) {
1525     log_warning(gc)("Could not uncommit native memory for auxiliary marking bitmap for heap iteration");
1526   }
1527 }
1528 
1529 // Closure for parallelly iterate objects
1530 class ShenandoahObjectIterateParScanClosure : public BasicOopIterateClosure {
1531 private:
1532   MarkBitMap* _bitmap;
1533   ShenandoahObjToScanQueue* _queue;
1534   ShenandoahHeap* const _heap;
1535   ShenandoahMarkingContext* const _marking_context;
1536 
1537   template <class T>
1538   void do_oop_work(T* p) {
1539     T o = RawAccess<>::oop_load(p);
1540     if (!CompressedOops::is_null(o)) {
1541       oop obj = CompressedOops::decode_not_null(o);
1542       if (_heap->is_concurrent_weak_root_in_progress() && !_marking_context->is_marked(obj)) {
1543         // There may be dead oops in weak roots in concurrent root phase, do not touch them.
1544         return;
1545       }
1546       obj = ShenandoahBarrierSet::barrier_set()->load_reference_barrier(obj);
1547 
1548       assert(oopDesc::is_oop(obj), "Must be a valid oop");
1549       if (_bitmap->par_mark(obj)) {
1550         _queue->push(ShenandoahMarkTask(obj));
1551       }
1552     }
1553   }
1554 public:
1555   ShenandoahObjectIterateParScanClosure(MarkBitMap* bitmap, ShenandoahObjToScanQueue* q) :
1556     _bitmap(bitmap), _queue(q), _heap(ShenandoahHeap::heap()),
1557     _marking_context(_heap->marking_context()) {}
1558   void do_oop(oop* p)       { do_oop_work(p); }
1559   void do_oop(narrowOop* p) { do_oop_work(p); }
1560 };
1561 
1562 // Object iterator for parallel heap iteraion.
1563 // The root scanning phase happenes in construction as a preparation of
1564 // parallel marking queues.
1565 // Every worker processes it's own marking queue. work-stealing is used
1566 // to balance workload.
1567 class ShenandoahParallelObjectIterator : public ParallelObjectIteratorImpl {
1568 private:
1569   uint                         _num_workers;
1570   bool                         _init_ready;
1571   MarkBitMap*                  _aux_bit_map;
1572   ShenandoahHeap*              _heap;
1573   ShenandoahScanObjectStack    _roots_stack; // global roots stack
1574   ShenandoahObjToScanQueueSet* _task_queues;
1575 public:
1576   ShenandoahParallelObjectIterator(uint num_workers, MarkBitMap* bitmap) :
1577         _num_workers(num_workers),
1578         _init_ready(false),
1579         _aux_bit_map(bitmap),
1580         _heap(ShenandoahHeap::heap()) {
1581     // Initialize bitmap
1582     _init_ready = _heap->prepare_aux_bitmap_for_iteration();
1583     if (!_init_ready) {
1584       return;
1585     }
1586 
1587     ObjectIterateScanRootClosure oops(_aux_bit_map, &_roots_stack);
1588     _heap->scan_roots_for_iteration(&_roots_stack, &oops);
1589 
1590     _init_ready = prepare_worker_queues();
1591   }
1592 
1593   ~ShenandoahParallelObjectIterator() {
1594     // Reclaim bitmap
1595     _heap->reclaim_aux_bitmap_for_iteration();
1596     // Reclaim queue for workers
1597     if (_task_queues!= nullptr) {
1598       for (uint i = 0; i < _num_workers; ++i) {
1599         ShenandoahObjToScanQueue* q = _task_queues->queue(i);
1600         if (q != nullptr) {
1601           delete q;
1602           _task_queues->register_queue(i, nullptr);
1603         }
1604       }
1605       delete _task_queues;
1606       _task_queues = nullptr;
1607     }
1608   }
1609 
1610   virtual void object_iterate(ObjectClosure* cl, uint worker_id) {
1611     if (_init_ready) {
1612       object_iterate_parallel(cl, worker_id, _task_queues);
1613     }
1614   }
1615 
1616 private:
1617   // Divide global root_stack into worker queues
1618   bool prepare_worker_queues() {
1619     _task_queues = new ShenandoahObjToScanQueueSet((int) _num_workers);
1620     // Initialize queues for every workers
1621     for (uint i = 0; i < _num_workers; ++i) {
1622       ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue();
1623       _task_queues->register_queue(i, task_queue);
1624     }
1625     // Divide roots among the workers. Assume that object referencing distribution
1626     // is related with root kind, use round-robin to make every worker have same chance
1627     // to process every kind of roots
1628     size_t roots_num = _roots_stack.size();
1629     if (roots_num == 0) {
1630       // No work to do
1631       return false;
1632     }
1633 
1634     for (uint j = 0; j < roots_num; j++) {
1635       uint stack_id = j % _num_workers;
1636       oop obj = _roots_stack.pop();
1637       _task_queues->queue(stack_id)->push(ShenandoahMarkTask(obj));
1638     }
1639     return true;
1640   }
1641 
1642   void object_iterate_parallel(ObjectClosure* cl,
1643                                uint worker_id,
1644                                ShenandoahObjToScanQueueSet* queue_set) {
1645     assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1646     assert(queue_set != nullptr, "task queue must not be null");
1647 
1648     ShenandoahObjToScanQueue* q = queue_set->queue(worker_id);
1649     assert(q != nullptr, "object iterate queue must not be null");
1650 
1651     ShenandoahMarkTask t;
1652     ShenandoahObjectIterateParScanClosure oops(_aux_bit_map, q);
1653 
1654     // Work through the queue to traverse heap.
1655     // Steal when there is no task in queue.
1656     while (q->pop(t) || queue_set->steal(worker_id, t)) {
1657       oop obj = t.obj();
1658       assert(oopDesc::is_oop(obj), "must be a valid oop");
1659       cl->do_object(obj);
1660       obj->oop_iterate(&oops);
1661     }
1662     assert(q->is_empty(), "should be empty");
1663   }
1664 };
1665 
1666 ParallelObjectIteratorImpl* ShenandoahHeap::parallel_object_iterator(uint workers) {
1667   return new ShenandoahParallelObjectIterator(workers, &_aux_bit_map);
1668 }
1669 
1670 // Keep alive an object that was loaded with AS_NO_KEEPALIVE.
1671 void ShenandoahHeap::keep_alive(oop obj) {
1672   if (is_concurrent_mark_in_progress() && (obj != nullptr)) {
1673     ShenandoahBarrierSet::barrier_set()->enqueue(obj);
1674   }
1675 }
1676 
1677 void ShenandoahHeap::heap_region_iterate(ShenandoahHeapRegionClosure* blk) const {
1678   for (size_t i = 0; i < num_regions(); i++) {
1679     ShenandoahHeapRegion* current = get_region(i);
1680     blk->heap_region_do(current);
1681   }
1682 }
1683 
1684 class ShenandoahParallelHeapRegionTask : public WorkerTask {
1685 private:
1686   ShenandoahHeap* const _heap;
1687   ShenandoahHeapRegionClosure* const _blk;
1688   size_t const _stride;
1689 
1690   shenandoah_padding(0);
1691   volatile size_t _index;
1692   shenandoah_padding(1);
1693 
1694 public:
1695   ShenandoahParallelHeapRegionTask(ShenandoahHeapRegionClosure* blk, size_t stride) :
1696           WorkerTask("Shenandoah Parallel Region Operation"),
1697           _heap(ShenandoahHeap::heap()), _blk(blk), _stride(stride), _index(0) {}
1698 
1699   void work(uint worker_id) {
1700     ShenandoahParallelWorkerSession worker_session(worker_id);
1701     size_t stride = _stride;
1702 
1703     size_t max = _heap->num_regions();
1704     while (Atomic::load(&_index) < max) {
1705       size_t cur = Atomic::fetch_then_add(&_index, stride, memory_order_relaxed);
1706       size_t start = cur;
1707       size_t end = MIN2(cur + stride, max);
1708       if (start >= max) break;
1709 
1710       for (size_t i = cur; i < end; i++) {
1711         ShenandoahHeapRegion* current = _heap->get_region(i);
1712         _blk->heap_region_do(current);
1713       }
1714     }
1715   }
1716 };
1717 
1718 void ShenandoahHeap::parallel_heap_region_iterate(ShenandoahHeapRegionClosure* blk) const {
1719   assert(blk->is_thread_safe(), "Only thread-safe closures here");
1720   const uint active_workers = workers()->active_workers();
1721   const size_t n_regions = num_regions();
1722   size_t stride = ShenandoahParallelRegionStride;
1723   if (stride == 0 && active_workers > 1) {
1724     // Automatically derive the stride to balance the work between threads
1725     // evenly. Do not try to split work if below the reasonable threshold.
1726     constexpr size_t threshold = 4096;
1727     stride = n_regions <= threshold ?
1728             threshold :
1729             (n_regions + active_workers - 1) / active_workers;
1730   }
1731 
1732   if (n_regions > stride && active_workers > 1) {
1733     ShenandoahParallelHeapRegionTask task(blk, stride);
1734     workers()->run_task(&task);
1735   } else {
1736     heap_region_iterate(blk);
1737   }
1738 }
1739 
1740 class ShenandoahRendezvousClosure : public HandshakeClosure {
1741 public:
1742   inline ShenandoahRendezvousClosure(const char* name) : HandshakeClosure(name) {}
1743   inline void do_thread(Thread* thread) {}
1744 };
1745 
1746 void ShenandoahHeap::rendezvous_threads(const char* name) {
1747   ShenandoahRendezvousClosure cl(name);
1748   Handshake::execute(&cl);
1749 }
1750 
1751 void ShenandoahHeap::recycle_trash() {
1752   free_set()->recycle_trash();
1753 }
1754 
1755 class ShenandoahResetUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
1756 private:
1757   ShenandoahMarkingContext* const _ctx;
1758 public:
1759   ShenandoahResetUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
1760 
1761   void heap_region_do(ShenandoahHeapRegion* r) {
1762     if (r->is_active()) {
1763       // Reset live data and set TAMS optimistically. We would recheck these under the pause
1764       // anyway to capture any updates that happened since now.
1765       r->clear_live_data();
1766       _ctx->capture_top_at_mark_start(r);
1767     }
1768   }
1769 
1770   bool is_thread_safe() { return true; }
1771 };
1772 
1773 void ShenandoahHeap::prepare_gc() {
1774   reset_mark_bitmap();
1775 
1776   ShenandoahResetUpdateRegionStateClosure cl;
1777   parallel_heap_region_iterate(&cl);
1778 }
1779 
1780 class ShenandoahFinalMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
1781 private:
1782   ShenandoahMarkingContext* const _ctx;
1783   ShenandoahHeapLock* const _lock;
1784 
1785 public:
1786   ShenandoahFinalMarkUpdateRegionStateClosure() :
1787     _ctx(ShenandoahHeap::heap()->complete_marking_context()), _lock(ShenandoahHeap::heap()->lock()) {}
1788 
1789   void heap_region_do(ShenandoahHeapRegion* r) {
1790     if (r->is_active()) {
1791       // All allocations past TAMS are implicitly live, adjust the region data.
1792       // Bitmaps/TAMS are swapped at this point, so we need to poll complete bitmap.
1793       HeapWord *tams = _ctx->top_at_mark_start(r);
1794       HeapWord *top = r->top();
1795       if (top > tams) {
1796         r->increase_live_data_alloc_words(pointer_delta(top, tams));
1797       }
1798 
1799       // We are about to select the collection set, make sure it knows about
1800       // current pinning status. Also, this allows trashing more regions that
1801       // now have their pinning status dropped.
1802       if (r->is_pinned()) {
1803         if (r->pin_count() == 0) {
1804           ShenandoahHeapLocker locker(_lock);
1805           r->make_unpinned();
1806         }
1807       } else {
1808         if (r->pin_count() > 0) {
1809           ShenandoahHeapLocker locker(_lock);
1810           r->make_pinned();
1811         }
1812       }
1813 
1814       // Remember limit for updating refs. It's guaranteed that we get no
1815       // from-space-refs written from here on.
1816       r->set_update_watermark_at_safepoint(r->top());
1817     } else {
1818       assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index());
1819       assert(_ctx->top_at_mark_start(r) == r->top(),
1820              "Region " SIZE_FORMAT " should have correct TAMS", r->index());
1821     }
1822   }
1823 
1824   bool is_thread_safe() { return true; }
1825 };
1826 
1827 void ShenandoahHeap::prepare_regions_and_collection_set(bool concurrent) {
1828   assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
1829   {
1830     ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::final_update_region_states :
1831                                          ShenandoahPhaseTimings::degen_gc_final_update_region_states);
1832     ShenandoahFinalMarkUpdateRegionStateClosure cl;
1833     parallel_heap_region_iterate(&cl);
1834 
1835     assert_pinned_region_status();
1836   }
1837 
1838   {
1839     ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::choose_cset :
1840                                          ShenandoahPhaseTimings::degen_gc_choose_cset);
1841     ShenandoahHeapLocker locker(lock());
1842     _collection_set->clear();
1843     heuristics()->choose_collection_set(_collection_set);
1844   }
1845 
1846   {
1847     ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::final_rebuild_freeset :
1848                                          ShenandoahPhaseTimings::degen_gc_final_rebuild_freeset);
1849     ShenandoahHeapLocker locker(lock());
1850     _free_set->rebuild();
1851   }
1852 }
1853 
1854 void ShenandoahHeap::do_class_unloading() {
1855   _unloader.unload();
1856 }
1857 
1858 void ShenandoahHeap::stw_weak_refs(bool full_gc) {
1859   // Weak refs processing
1860   ShenandoahPhaseTimings::Phase phase = full_gc ? ShenandoahPhaseTimings::full_gc_weakrefs
1861                                                 : ShenandoahPhaseTimings::degen_gc_weakrefs;
1862   ShenandoahTimingsTracker t(phase);
1863   ShenandoahGCWorkerPhase worker_phase(phase);
1864   ref_processor()->process_references(phase, workers(), false /* concurrent */);
1865 }
1866 
1867 void ShenandoahHeap::prepare_update_heap_references(bool concurrent) {
1868   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
1869 
1870   // Evacuation is over, no GCLABs are needed anymore. GCLABs are under URWM, so we need to
1871   // make them parsable for update code to work correctly. Plus, we can compute new sizes
1872   // for future GCLABs here.
1873   if (UseTLAB) {
1874     ShenandoahGCPhase phase(concurrent ?
1875                             ShenandoahPhaseTimings::init_update_refs_manage_gclabs :
1876                             ShenandoahPhaseTimings::degen_gc_init_update_refs_manage_gclabs);
1877     gclabs_retire(ResizeTLAB);
1878   }
1879 
1880   _update_refs_iterator.reset();
1881 }
1882 
1883 void ShenandoahHeap::propagate_gc_state_to_java_threads() {
1884   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at Shenandoah safepoint");
1885   if (_gc_state_changed) {
1886     _gc_state_changed = false;
1887     char state = gc_state();
1888     for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1889       ShenandoahThreadLocalData::set_gc_state(t, state);
1890     }
1891   }
1892 }
1893 
1894 void ShenandoahHeap::set_gc_state(uint mask, bool value) {
1895   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at Shenandoah safepoint");
1896   _gc_state.set_cond(mask, value);
1897   _gc_state_changed = true;
1898 }
1899 
1900 void ShenandoahHeap::set_concurrent_mark_in_progress(bool in_progress) {
1901   assert(!has_forwarded_objects(), "Not expected before/after mark phase");
1902   set_gc_state(MARKING, in_progress);
1903   ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
1904 }
1905 
1906 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
1907   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only call this at safepoint");
1908   set_gc_state(EVACUATION, in_progress);
1909 }
1910 
1911 void ShenandoahHeap::set_concurrent_strong_root_in_progress(bool in_progress) {
1912   if (in_progress) {
1913     _concurrent_strong_root_in_progress.set();
1914   } else {
1915     _concurrent_strong_root_in_progress.unset();
1916   }
1917 }
1918 
1919 void ShenandoahHeap::set_concurrent_weak_root_in_progress(bool cond) {
1920   set_gc_state(WEAK_ROOTS, cond);
1921 }
1922 
1923 GCTracer* ShenandoahHeap::tracer() {
1924   return shenandoah_policy()->tracer();
1925 }
1926 
1927 size_t ShenandoahHeap::tlab_used(Thread* thread) const {
1928   return _free_set->used();
1929 }
1930 
1931 bool ShenandoahHeap::try_cancel_gc() {
1932   jbyte prev = _cancelled_gc.cmpxchg(CANCELLED, CANCELLABLE);
1933   return prev == CANCELLABLE;
1934 }
1935 
1936 void ShenandoahHeap::cancel_gc(GCCause::Cause cause) {
1937   if (try_cancel_gc()) {
1938     FormatBuffer<> msg("Cancelling GC: %s", GCCause::to_string(cause));
1939     log_info(gc)("%s", msg.buffer());
1940     Events::log(Thread::current(), "%s", msg.buffer());
1941   }
1942 }
1943 
1944 uint ShenandoahHeap::max_workers() {
1945   return _max_workers;
1946 }
1947 
1948 void ShenandoahHeap::stop() {
1949   // The shutdown sequence should be able to terminate when GC is running.
1950 
1951   // Step 0. Notify policy to disable event recording and prevent visiting gc threads during shutdown
1952   _shenandoah_policy->record_shutdown();
1953 
1954   // Step 1. Notify control thread that we are in shutdown.
1955   // Note that we cannot do that with stop(), because stop() is blocking and waits for the actual shutdown.
1956   // Doing stop() here would wait for the normal GC cycle to complete, never falling through to cancel below.
1957   control_thread()->prepare_for_graceful_shutdown();
1958 
1959   // Step 2. Notify GC workers that we are cancelling GC.
1960   cancel_gc(GCCause::_shenandoah_stop_vm);
1961 
1962   // Step 3. Wait until GC worker exits normally.
1963   control_thread()->stop();
1964 }
1965 
1966 void ShenandoahHeap::stw_unload_classes(bool full_gc) {
1967   if (!unload_classes()) return;
1968   ClassUnloadingContext ctx(_workers->active_workers(),
1969                             true /* unregister_nmethods_during_purge */,
1970                             false /* lock_nmethod_free_separately */);
1971 
1972   // Unload classes and purge SystemDictionary.
1973   {
1974     ShenandoahPhaseTimings::Phase phase = full_gc ?
1975                                           ShenandoahPhaseTimings::full_gc_purge_class_unload :
1976                                           ShenandoahPhaseTimings::degen_gc_purge_class_unload;
1977     ShenandoahIsAliveSelector is_alive;
1978     {
1979       CodeCache::UnlinkingScope scope(is_alive.is_alive_closure());
1980       ShenandoahGCPhase gc_phase(phase);
1981       ShenandoahGCWorkerPhase worker_phase(phase);
1982       bool unloading_occurred = SystemDictionary::do_unloading(gc_timer());
1983 
1984       uint num_workers = _workers->active_workers();
1985       ShenandoahClassUnloadingTask unlink_task(phase, num_workers, unloading_occurred);
1986       _workers->run_task(&unlink_task);
1987     }
1988     // Release unloaded nmethods's memory.
1989     ClassUnloadingContext::context()->purge_and_free_nmethods();
1990   }
1991 
1992   {
1993     ShenandoahGCPhase phase(full_gc ?
1994                             ShenandoahPhaseTimings::full_gc_purge_cldg :
1995                             ShenandoahPhaseTimings::degen_gc_purge_cldg);
1996     ClassLoaderDataGraph::purge(true /* at_safepoint */);
1997   }
1998   // Resize and verify metaspace
1999   MetaspaceGC::compute_new_size();
2000   DEBUG_ONLY(MetaspaceUtils::verify();)
2001 }
2002 
2003 // Weak roots are either pre-evacuated (final mark) or updated (final updaterefs),
2004 // so they should not have forwarded oops.
2005 // However, we do need to "null" dead oops in the roots, if can not be done
2006 // in concurrent cycles.
2007 void ShenandoahHeap::stw_process_weak_roots(bool full_gc) {
2008   uint num_workers = _workers->active_workers();
2009   ShenandoahPhaseTimings::Phase timing_phase = full_gc ?
2010                                                ShenandoahPhaseTimings::full_gc_purge_weak_par :
2011                                                ShenandoahPhaseTimings::degen_gc_purge_weak_par;
2012   ShenandoahGCPhase phase(timing_phase);
2013   ShenandoahGCWorkerPhase worker_phase(timing_phase);
2014   // Cleanup weak roots
2015   if (has_forwarded_objects()) {
2016     ShenandoahForwardedIsAliveClosure is_alive;
2017     ShenandoahNonConcUpdateRefsClosure keep_alive;
2018     ShenandoahParallelWeakRootsCleaningTask<ShenandoahForwardedIsAliveClosure, ShenandoahNonConcUpdateRefsClosure>
2019       cleaning_task(timing_phase, &is_alive, &keep_alive, num_workers);
2020     _workers->run_task(&cleaning_task);
2021   } else {
2022     ShenandoahIsAliveClosure is_alive;
2023 #ifdef ASSERT
2024     ShenandoahAssertNotForwardedClosure verify_cl;
2025     ShenandoahParallelWeakRootsCleaningTask<ShenandoahIsAliveClosure, ShenandoahAssertNotForwardedClosure>
2026       cleaning_task(timing_phase, &is_alive, &verify_cl, num_workers);
2027 #else
2028     ShenandoahParallelWeakRootsCleaningTask<ShenandoahIsAliveClosure, DoNothingClosure>
2029       cleaning_task(timing_phase, &is_alive, &do_nothing_cl, num_workers);
2030 #endif
2031     _workers->run_task(&cleaning_task);
2032   }
2033 }
2034 
2035 void ShenandoahHeap::parallel_cleaning(bool full_gc) {
2036   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2037   assert(is_stw_gc_in_progress(), "Only for Degenerated and Full GC");
2038   ShenandoahGCPhase phase(full_gc ?
2039                           ShenandoahPhaseTimings::full_gc_purge :
2040                           ShenandoahPhaseTimings::degen_gc_purge);
2041   stw_weak_refs(full_gc);
2042   stw_process_weak_roots(full_gc);
2043   stw_unload_classes(full_gc);
2044 }
2045 
2046 void ShenandoahHeap::set_has_forwarded_objects(bool cond) {
2047   set_gc_state(HAS_FORWARDED, cond);
2048 }
2049 
2050 void ShenandoahHeap::set_unload_classes(bool uc) {
2051   _unload_classes.set_cond(uc);
2052 }
2053 
2054 bool ShenandoahHeap::unload_classes() const {
2055   return _unload_classes.is_set();
2056 }
2057 
2058 address ShenandoahHeap::in_cset_fast_test_addr() {
2059   ShenandoahHeap* heap = ShenandoahHeap::heap();
2060   assert(heap->collection_set() != nullptr, "Sanity");
2061   return (address) heap->collection_set()->biased_map_address();
2062 }
2063 
2064 size_t ShenandoahHeap::bytes_allocated_since_gc_start() const {
2065   return Atomic::load(&_bytes_allocated_since_gc_start);
2066 }
2067 
2068 void ShenandoahHeap::reset_bytes_allocated_since_gc_start() {
2069   Atomic::store(&_bytes_allocated_since_gc_start, (size_t)0);
2070 }
2071 
2072 void ShenandoahHeap::set_degenerated_gc_in_progress(bool in_progress) {
2073   _degenerated_gc_in_progress.set_cond(in_progress);
2074 }
2075 
2076 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) {
2077   _full_gc_in_progress.set_cond(in_progress);
2078 }
2079 
2080 void ShenandoahHeap::set_full_gc_move_in_progress(bool in_progress) {
2081   assert (is_full_gc_in_progress(), "should be");
2082   _full_gc_move_in_progress.set_cond(in_progress);
2083 }
2084 
2085 void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) {
2086   set_gc_state(UPDATEREFS, in_progress);
2087 }
2088 
2089 void ShenandoahHeap::register_nmethod(nmethod* nm) {
2090   ShenandoahCodeRoots::register_nmethod(nm);
2091 }
2092 
2093 void ShenandoahHeap::unregister_nmethod(nmethod* nm) {
2094   ShenandoahCodeRoots::unregister_nmethod(nm);
2095 }
2096 
2097 void ShenandoahHeap::pin_object(JavaThread* thr, oop o) {
2098   heap_region_containing(o)->record_pin();
2099 }
2100 
2101 void ShenandoahHeap::unpin_object(JavaThread* thr, oop o) {
2102   ShenandoahHeapRegion* r = heap_region_containing(o);
2103   assert(r != nullptr, "Sanity");
2104   assert(r->pin_count() > 0, "Region " SIZE_FORMAT " should have non-zero pins", r->index());
2105   r->record_unpin();
2106 }
2107 
2108 void ShenandoahHeap::sync_pinned_region_status() {
2109   ShenandoahHeapLocker locker(lock());
2110 
2111   for (size_t i = 0; i < num_regions(); i++) {
2112     ShenandoahHeapRegion *r = get_region(i);
2113     if (r->is_active()) {
2114       if (r->is_pinned()) {
2115         if (r->pin_count() == 0) {
2116           r->make_unpinned();
2117         }
2118       } else {
2119         if (r->pin_count() > 0) {
2120           r->make_pinned();
2121         }
2122       }
2123     }
2124   }
2125 
2126   assert_pinned_region_status();
2127 }
2128 
2129 #ifdef ASSERT
2130 void ShenandoahHeap::assert_pinned_region_status() {
2131   for (size_t i = 0; i < num_regions(); i++) {
2132     ShenandoahHeapRegion* r = get_region(i);
2133     assert((r->is_pinned() && r->pin_count() > 0) || (!r->is_pinned() && r->pin_count() == 0),
2134            "Region " SIZE_FORMAT " pinning status is inconsistent", i);
2135   }
2136 }
2137 #endif
2138 
2139 ConcurrentGCTimer* ShenandoahHeap::gc_timer() const {
2140   return _gc_timer;
2141 }
2142 
2143 void ShenandoahHeap::prepare_concurrent_roots() {
2144   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2145   assert(!is_stw_gc_in_progress(), "Only concurrent GC");
2146   set_concurrent_strong_root_in_progress(!collection_set()->is_empty());
2147   set_concurrent_weak_root_in_progress(true);
2148   if (unload_classes()) {
2149     _unloader.prepare();
2150   }
2151 }
2152 
2153 void ShenandoahHeap::finish_concurrent_roots() {
2154   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2155   assert(!is_stw_gc_in_progress(), "Only concurrent GC");
2156   if (unload_classes()) {
2157     _unloader.finish();
2158   }
2159 }
2160 
2161 #ifdef ASSERT
2162 void ShenandoahHeap::assert_gc_workers(uint nworkers) {
2163   assert(nworkers > 0 && nworkers <= max_workers(), "Sanity");
2164 
2165   if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
2166     // Use ParallelGCThreads inside safepoints
2167     assert(nworkers == ParallelGCThreads, "Use ParallelGCThreads (%u) within safepoint, not %u",
2168            ParallelGCThreads, nworkers);
2169   } else {
2170     // Use ConcGCThreads outside safepoints
2171     assert(nworkers == ConcGCThreads, "Use ConcGCThreads (%u) outside safepoints, %u",
2172            ConcGCThreads, nworkers);
2173   }
2174 }
2175 #endif
2176 
2177 ShenandoahVerifier* ShenandoahHeap::verifier() {
2178   guarantee(ShenandoahVerify, "Should be enabled");
2179   assert (_verifier != nullptr, "sanity");
2180   return _verifier;
2181 }
2182 
2183 template<bool CONCURRENT>
2184 class ShenandoahUpdateHeapRefsTask : public WorkerTask {
2185 private:
2186   ShenandoahHeap* _heap;
2187   ShenandoahRegionIterator* _regions;
2188 public:
2189   ShenandoahUpdateHeapRefsTask(ShenandoahRegionIterator* regions) :
2190     WorkerTask("Shenandoah Update References"),
2191     _heap(ShenandoahHeap::heap()),
2192     _regions(regions) {
2193   }
2194 
2195   void work(uint worker_id) {
2196     if (CONCURRENT) {
2197       ShenandoahConcurrentWorkerSession worker_session(worker_id);
2198       ShenandoahSuspendibleThreadSetJoiner stsj;
2199       do_work<ShenandoahConcUpdateRefsClosure>(worker_id);
2200     } else {
2201       ShenandoahParallelWorkerSession worker_session(worker_id);
2202       do_work<ShenandoahNonConcUpdateRefsClosure>(worker_id);
2203     }
2204   }
2205 
2206 private:
2207   template<class T>
2208   void do_work(uint worker_id) {
2209     T cl;
2210     if (CONCURRENT && (worker_id == 0)) {
2211       // We ask the first worker to replenish the Mutator free set by moving regions previously reserved to hold the
2212       // results of evacuation.  These reserves are no longer necessary because evacuation has completed.
2213       size_t cset_regions = _heap->collection_set()->count();
2214       // We cannot transfer any more regions than will be reclaimed when the existing collection set is recycled because
2215       // we need the reclaimed collection set regions to replenish the collector reserves
2216       _heap->free_set()->move_regions_from_collector_to_mutator(cset_regions);
2217     }
2218     // If !CONCURRENT, there's no value in expanding Mutator free set
2219 
2220     ShenandoahHeapRegion* r = _regions->next();
2221     ShenandoahMarkingContext* const ctx = _heap->complete_marking_context();
2222     while (r != nullptr) {
2223       HeapWord* update_watermark = r->get_update_watermark();
2224       assert (update_watermark >= r->bottom(), "sanity");
2225       if (r->is_active() && !r->is_cset()) {
2226         _heap->marked_object_oop_iterate(r, &cl, update_watermark);
2227       }
2228       if (ShenandoahPacing) {
2229         _heap->pacer()->report_updaterefs(pointer_delta(update_watermark, r->bottom()));
2230       }
2231       if (_heap->check_cancelled_gc_and_yield(CONCURRENT)) {
2232         return;
2233       }
2234       r = _regions->next();
2235     }
2236   }
2237 };
2238 
2239 void ShenandoahHeap::update_heap_references(bool concurrent) {
2240   assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
2241 
2242   if (concurrent) {
2243     ShenandoahUpdateHeapRefsTask<true> task(&_update_refs_iterator);
2244     workers()->run_task(&task);
2245   } else {
2246     ShenandoahUpdateHeapRefsTask<false> task(&_update_refs_iterator);
2247     workers()->run_task(&task);
2248   }
2249 }
2250 
2251 
2252 class ShenandoahFinalUpdateRefsUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
2253 private:
2254   ShenandoahHeapLock* const _lock;
2255 
2256 public:
2257   ShenandoahFinalUpdateRefsUpdateRegionStateClosure() : _lock(ShenandoahHeap::heap()->lock()) {}
2258 
2259   void heap_region_do(ShenandoahHeapRegion* r) {
2260     // Drop unnecessary "pinned" state from regions that does not have CP marks
2261     // anymore, as this would allow trashing them.
2262 
2263     if (r->is_active()) {
2264       if (r->is_pinned()) {
2265         if (r->pin_count() == 0) {
2266           ShenandoahHeapLocker locker(_lock);
2267           r->make_unpinned();
2268         }
2269       } else {
2270         if (r->pin_count() > 0) {
2271           ShenandoahHeapLocker locker(_lock);
2272           r->make_pinned();
2273         }
2274       }
2275     }
2276   }
2277 
2278   bool is_thread_safe() { return true; }
2279 };
2280 
2281 void ShenandoahHeap::update_heap_region_states(bool concurrent) {
2282   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2283   assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
2284 
2285   {
2286     ShenandoahGCPhase phase(concurrent ?
2287                             ShenandoahPhaseTimings::final_update_refs_update_region_states :
2288                             ShenandoahPhaseTimings::degen_gc_final_update_refs_update_region_states);
2289     ShenandoahFinalUpdateRefsUpdateRegionStateClosure cl;
2290     parallel_heap_region_iterate(&cl);
2291 
2292     assert_pinned_region_status();
2293   }
2294 
2295   {
2296     ShenandoahGCPhase phase(concurrent ?
2297                             ShenandoahPhaseTimings::final_update_refs_trash_cset :
2298                             ShenandoahPhaseTimings::degen_gc_final_update_refs_trash_cset);
2299     trash_cset_regions();
2300   }
2301 }
2302 
2303 void ShenandoahHeap::rebuild_free_set(bool concurrent) {
2304   {
2305     ShenandoahGCPhase phase(concurrent ?
2306                             ShenandoahPhaseTimings::final_update_refs_rebuild_freeset :
2307                             ShenandoahPhaseTimings::degen_gc_final_update_refs_rebuild_freeset);
2308     ShenandoahHeapLocker locker(lock());
2309     _free_set->rebuild();
2310   }
2311 }
2312 
2313 void ShenandoahHeap::print_extended_on(outputStream *st) const {
2314   print_on(st);
2315   st->cr();
2316   print_heap_regions_on(st);
2317 }
2318 
2319 bool ShenandoahHeap::is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self) {
2320   size_t slice = r->index() / _bitmap_regions_per_slice;
2321 
2322   size_t regions_from = _bitmap_regions_per_slice * slice;
2323   size_t regions_to   = MIN2(num_regions(), _bitmap_regions_per_slice * (slice + 1));
2324   for (size_t g = regions_from; g < regions_to; g++) {
2325     assert (g / _bitmap_regions_per_slice == slice, "same slice");
2326     if (skip_self && g == r->index()) continue;
2327     if (get_region(g)->is_committed()) {
2328       return true;
2329     }
2330   }
2331   return false;
2332 }
2333 
2334 bool ShenandoahHeap::commit_bitmap_slice(ShenandoahHeapRegion* r) {
2335   shenandoah_assert_heaplocked();
2336 
2337   // Bitmaps in special regions do not need commits
2338   if (_bitmap_region_special) {
2339     return true;
2340   }
2341 
2342   if (is_bitmap_slice_committed(r, true)) {
2343     // Some other region from the group is already committed, meaning the bitmap
2344     // slice is already committed, we exit right away.
2345     return true;
2346   }
2347 
2348   // Commit the bitmap slice:
2349   size_t slice = r->index() / _bitmap_regions_per_slice;
2350   size_t off = _bitmap_bytes_per_slice * slice;
2351   size_t len = _bitmap_bytes_per_slice;
2352   char* start = (char*) _bitmap_region.start() + off;
2353 
2354   if (!os::commit_memory(start, len, false)) {
2355     return false;
2356   }
2357 
2358   if (AlwaysPreTouch) {
2359     os::pretouch_memory(start, start + len, _pretouch_bitmap_page_size);
2360   }
2361 
2362   return true;
2363 }
2364 
2365 bool ShenandoahHeap::uncommit_bitmap_slice(ShenandoahHeapRegion *r) {
2366   shenandoah_assert_heaplocked();
2367 
2368   // Bitmaps in special regions do not need uncommits
2369   if (_bitmap_region_special) {
2370     return true;
2371   }
2372 
2373   if (is_bitmap_slice_committed(r, true)) {
2374     // Some other region from the group is still committed, meaning the bitmap
2375     // slice is should stay committed, exit right away.
2376     return true;
2377   }
2378 
2379   // Uncommit the bitmap slice:
2380   size_t slice = r->index() / _bitmap_regions_per_slice;
2381   size_t off = _bitmap_bytes_per_slice * slice;
2382   size_t len = _bitmap_bytes_per_slice;
2383   if (!os::uncommit_memory((char*)_bitmap_region.start() + off, len)) {
2384     return false;
2385   }
2386   return true;
2387 }
2388 
2389 void ShenandoahHeap::safepoint_synchronize_begin() {
2390   StackWatermarkSet::safepoint_synchronize_begin();
2391   SuspendibleThreadSet::synchronize();
2392 }
2393 
2394 void ShenandoahHeap::safepoint_synchronize_end() {
2395   SuspendibleThreadSet::desynchronize();
2396 }
2397 
2398 void ShenandoahHeap::try_inject_alloc_failure() {
2399   if (ShenandoahAllocFailureALot && !cancelled_gc() && ((os::random() % 1000) > 950)) {
2400     _inject_alloc_failure.set();
2401     os::naked_short_sleep(1);
2402     if (cancelled_gc()) {
2403       log_info(gc)("Allocation failure was successfully injected");
2404     }
2405   }
2406 }
2407 
2408 bool ShenandoahHeap::should_inject_alloc_failure() {
2409   return _inject_alloc_failure.is_set() && _inject_alloc_failure.try_unset();
2410 }
2411 
2412 void ShenandoahHeap::initialize_serviceability() {
2413   _memory_pool = new ShenandoahMemoryPool(this);
2414   _cycle_memory_manager.add_pool(_memory_pool);
2415   _stw_memory_manager.add_pool(_memory_pool);
2416 }
2417 
2418 GrowableArray<GCMemoryManager*> ShenandoahHeap::memory_managers() {
2419   GrowableArray<GCMemoryManager*> memory_managers(2);
2420   memory_managers.append(&_cycle_memory_manager);
2421   memory_managers.append(&_stw_memory_manager);
2422   return memory_managers;
2423 }
2424 
2425 GrowableArray<MemoryPool*> ShenandoahHeap::memory_pools() {
2426   GrowableArray<MemoryPool*> memory_pools(1);
2427   memory_pools.append(_memory_pool);
2428   return memory_pools;
2429 }
2430 
2431 MemoryUsage ShenandoahHeap::memory_usage() {
2432   return _memory_pool->get_memory_usage();
2433 }
2434 
2435 ShenandoahRegionIterator::ShenandoahRegionIterator() :
2436   _heap(ShenandoahHeap::heap()),
2437   _index(0) {}
2438 
2439 ShenandoahRegionIterator::ShenandoahRegionIterator(ShenandoahHeap* heap) :
2440   _heap(heap),
2441   _index(0) {}
2442 
2443 void ShenandoahRegionIterator::reset() {
2444   _index = 0;
2445 }
2446 
2447 bool ShenandoahRegionIterator::has_next() const {
2448   return _index < _heap->num_regions();
2449 }
2450 
2451 char ShenandoahHeap::gc_state() const {
2452   return _gc_state.raw_value();
2453 }
2454 
2455 ShenandoahLiveData* ShenandoahHeap::get_liveness_cache(uint worker_id) {
2456 #ifdef ASSERT
2457   assert(_liveness_cache != nullptr, "sanity");
2458   assert(worker_id < _max_workers, "sanity");
2459   for (uint i = 0; i < num_regions(); i++) {
2460     assert(_liveness_cache[worker_id][i] == 0, "liveness cache should be empty");
2461   }
2462 #endif
2463   return _liveness_cache[worker_id];
2464 }
2465 
2466 void ShenandoahHeap::flush_liveness_cache(uint worker_id) {
2467   assert(worker_id < _max_workers, "sanity");
2468   assert(_liveness_cache != nullptr, "sanity");
2469   ShenandoahLiveData* ld = _liveness_cache[worker_id];
2470   for (uint i = 0; i < num_regions(); i++) {
2471     ShenandoahLiveData live = ld[i];
2472     if (live > 0) {
2473       ShenandoahHeapRegion* r = get_region(i);
2474       r->increase_live_data_gc_words(live);
2475       ld[i] = 0;
2476     }
2477   }
2478 }
2479 
2480 bool ShenandoahHeap::requires_barriers(stackChunkOop obj) const {
2481   if (is_idle()) return false;
2482 
2483   // Objects allocated after marking start are implicitly alive, don't need any barriers during
2484   // marking phase.
2485   if (is_concurrent_mark_in_progress() &&
2486      !marking_context()->allocated_after_mark_start(obj)) {
2487     return true;
2488   }
2489 
2490   // Can not guarantee obj is deeply good.
2491   if (has_forwarded_objects()) {
2492     return true;
2493   }
2494 
2495   return false;
2496 }
2497 
2498 HeapWord* ShenandoahHeap::allocate_loaded_archive_space(size_t size) {
2499 #if INCLUDE_CDS_JAVA_HEAP
2500   // CDS wants a continuous memory range to load a bunch of objects.
2501   // This effectively bypasses normal allocation paths, and requires
2502   // a bit of massaging to unbreak GC invariants.
2503 
2504   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared(size);
2505 
2506   // Easy case: a single regular region, no further adjustments needed.
2507   if (!ShenandoahHeapRegion::requires_humongous(size)) {
2508     return allocate_memory(req);
2509   }
2510 
2511   // Hard case: the requested size would cause a humongous allocation.
2512   // We need to make sure it looks like regular allocation to the rest of GC.
2513 
2514   // CDS code would guarantee no objects straddle multiple regions, as long as
2515   // regions are as large as MIN_GC_REGION_ALIGNMENT. It is impractical at this
2516   // point to deal with case when Shenandoah runs with smaller regions.
2517   // TODO: This check can be dropped once MIN_GC_REGION_ALIGNMENT agrees more with Shenandoah.
2518   if (ShenandoahHeapRegion::region_size_bytes() < ArchiveHeapWriter::MIN_GC_REGION_ALIGNMENT) {
2519     return nullptr;
2520   }
2521 
2522   HeapWord* mem = allocate_memory(req);
2523   size_t start_idx = heap_region_index_containing(mem);
2524   size_t num_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize);
2525 
2526   // Flip humongous -> regular.
2527   {
2528     ShenandoahHeapLocker locker(lock(), false);
2529     for (size_t c = start_idx; c < start_idx + num_regions; c++) {
2530       get_region(c)->make_regular_bypass();
2531     }
2532   }
2533 
2534   return mem;
2535 #else
2536   assert(false, "Archive heap loader should not be available, should not be here");
2537   return nullptr;
2538 #endif // INCLUDE_CDS_JAVA_HEAP
2539 }
2540 
2541 void ShenandoahHeap::complete_loaded_archive_space(MemRegion archive_space) {
2542   // Nothing to do here, except checking that heap looks fine.
2543 #ifdef ASSERT
2544   HeapWord* start = archive_space.start();
2545   HeapWord* end = archive_space.end();
2546 
2547   // No unclaimed space between the objects.
2548   // Objects are properly allocated in correct regions.
2549   HeapWord* cur = start;
2550   while (cur < end) {
2551     oop oop = cast_to_oop(cur);
2552     shenandoah_assert_in_correct_region(nullptr, oop);
2553     cur += oop->size();
2554   }
2555 
2556   // No unclaimed tail at the end of archive space.
2557   assert(cur == end,
2558          "Archive space should be fully used: " PTR_FORMAT " " PTR_FORMAT,
2559          p2i(cur), p2i(end));
2560 
2561   // Region bounds are good.
2562   ShenandoahHeapRegion* begin_reg = heap_region_containing(start);
2563   ShenandoahHeapRegion* end_reg = heap_region_containing(end);
2564   assert(begin_reg->is_regular(), "Must be");
2565   assert(end_reg->is_regular(), "Must be");
2566   assert(begin_reg->bottom() == start,
2567          "Must agree: archive-space-start: " PTR_FORMAT ", begin-region-bottom: " PTR_FORMAT,
2568          p2i(start), p2i(begin_reg->bottom()));
2569   assert(end_reg->top() == end,
2570          "Must agree: archive-space-end: " PTR_FORMAT ", end-region-top: " PTR_FORMAT,
2571          p2i(end), p2i(end_reg->top()));
2572 #endif
2573 }