1 /*
   2  * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2013, 2022, Red Hat, Inc. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "memory/allocation.hpp"
  28 #include "memory/universe.hpp"
  29 
  30 #include "gc/shared/gcArguments.hpp"
  31 #include "gc/shared/gcTimer.hpp"
  32 #include "gc/shared/gcTraceTime.inline.hpp"
  33 #include "gc/shared/locationPrinter.inline.hpp"
  34 #include "gc/shared/memAllocator.hpp"
  35 #include "gc/shared/plab.hpp"
  36 #include "gc/shared/tlab_globals.hpp"
  37 
  38 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  39 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
  40 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  41 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  42 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
  43 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  44 #include "gc/shenandoah/shenandoahControlThread.hpp"
  45 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  46 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  47 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  48 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
  49 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  50 #include "gc/shenandoah/shenandoahInitLogger.hpp"
  51 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  52 #include "gc/shenandoah/shenandoahMemoryPool.hpp"
  53 #include "gc/shenandoah/shenandoahMetrics.hpp"
  54 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  55 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  56 #include "gc/shenandoah/shenandoahPacer.inline.hpp"
  57 #include "gc/shenandoah/shenandoahPadding.hpp"
  58 #include "gc/shenandoah/shenandoahParallelCleaning.inline.hpp"
  59 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
  60 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
  61 #include "gc/shenandoah/shenandoahSTWMark.hpp"
  62 #include "gc/shenandoah/shenandoahUtils.hpp"
  63 #include "gc/shenandoah/shenandoahVerifier.hpp"
  64 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
  65 #include "gc/shenandoah/shenandoahVMOperations.hpp"
  66 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
  67 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  68 #include "gc/shenandoah/mode/shenandoahIUMode.hpp"
  69 #include "gc/shenandoah/mode/shenandoahPassiveMode.hpp"
  70 #include "gc/shenandoah/mode/shenandoahSATBMode.hpp"
  71 #if INCLUDE_JFR
  72 #include "gc/shenandoah/shenandoahJfrSupport.hpp"
  73 #endif
  74 
  75 #include "classfile/systemDictionary.hpp"
  76 #include "code/codeCache.hpp"
  77 #include "memory/classLoaderMetaspace.hpp"
  78 #include "memory/metaspaceUtils.hpp"
  79 #include "nmt/mallocTracker.hpp"
  80 #include "nmt/memTracker.hpp"
  81 #include "oops/compressedOops.inline.hpp"
  82 #include "prims/jvmtiTagMap.hpp"
  83 #include "runtime/atomic.hpp"
  84 #include "runtime/globals.hpp"
  85 #include "runtime/interfaceSupport.inline.hpp"
  86 #include "runtime/java.hpp"
  87 #include "runtime/orderAccess.hpp"
  88 #include "runtime/safepointMechanism.hpp"
  89 #include "runtime/vmThread.hpp"
  90 #include "utilities/events.hpp"
  91 #include "utilities/powerOfTwo.hpp"
  92 
  93 class ShenandoahPretouchHeapTask : public WorkerTask {
  94 private:
  95   ShenandoahRegionIterator _regions;
  96   const size_t _page_size;
  97 public:
  98   ShenandoahPretouchHeapTask(size_t page_size) :
  99     WorkerTask("Shenandoah Pretouch Heap"),
 100     _page_size(page_size) {}
 101 
 102   virtual void work(uint worker_id) {
 103     ShenandoahHeapRegion* r = _regions.next();
 104     while (r != nullptr) {
 105       if (r->is_committed()) {
 106         os::pretouch_memory(r->bottom(), r->end(), _page_size);
 107       }
 108       r = _regions.next();
 109     }
 110   }
 111 };
 112 
 113 class ShenandoahPretouchBitmapTask : public WorkerTask {
 114 private:
 115   ShenandoahRegionIterator _regions;
 116   char* _bitmap_base;
 117   const size_t _bitmap_size;
 118   const size_t _page_size;
 119 public:
 120   ShenandoahPretouchBitmapTask(char* bitmap_base, size_t bitmap_size, size_t page_size) :
 121     WorkerTask("Shenandoah Pretouch Bitmap"),
 122     _bitmap_base(bitmap_base),
 123     _bitmap_size(bitmap_size),
 124     _page_size(page_size) {}
 125 
 126   virtual void work(uint worker_id) {
 127     ShenandoahHeapRegion* r = _regions.next();
 128     while (r != nullptr) {
 129       size_t start = r->index()       * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
 130       size_t end   = (r->index() + 1) * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
 131       assert (end <= _bitmap_size, "end is sane: " SIZE_FORMAT " < " SIZE_FORMAT, end, _bitmap_size);
 132 
 133       if (r->is_committed()) {
 134         os::pretouch_memory(_bitmap_base + start, _bitmap_base + end, _page_size);
 135       }
 136 
 137       r = _regions.next();
 138     }
 139   }
 140 };
 141 
 142 jint ShenandoahHeap::initialize() {
 143   //
 144   // Figure out heap sizing
 145   //
 146 
 147   size_t init_byte_size = InitialHeapSize;
 148   size_t min_byte_size  = MinHeapSize;
 149   size_t max_byte_size  = MaxHeapSize;
 150   size_t heap_alignment = HeapAlignment;
 151 
 152   size_t reg_size_bytes = ShenandoahHeapRegion::region_size_bytes();
 153 
 154   Universe::check_alignment(max_byte_size,  reg_size_bytes, "Shenandoah heap");
 155   Universe::check_alignment(init_byte_size, reg_size_bytes, "Shenandoah heap");
 156 
 157   _num_regions = ShenandoahHeapRegion::region_count();
 158   assert(_num_regions == (max_byte_size / reg_size_bytes),
 159          "Regions should cover entire heap exactly: " SIZE_FORMAT " != " SIZE_FORMAT "/" SIZE_FORMAT,
 160          _num_regions, max_byte_size, reg_size_bytes);
 161 
 162   // Now we know the number of regions, initialize the heuristics.
 163   initialize_heuristics();
 164 
 165   size_t num_committed_regions = init_byte_size / reg_size_bytes;
 166   num_committed_regions = MIN2(num_committed_regions, _num_regions);
 167   assert(num_committed_regions <= _num_regions, "sanity");
 168   _initial_size = num_committed_regions * reg_size_bytes;
 169 
 170   size_t num_min_regions = min_byte_size / reg_size_bytes;
 171   num_min_regions = MIN2(num_min_regions, _num_regions);
 172   assert(num_min_regions <= _num_regions, "sanity");
 173   _minimum_size = num_min_regions * reg_size_bytes;
 174 
 175   // Default to max heap size.
 176   _soft_max_size = _num_regions * reg_size_bytes;
 177 
 178   _committed = _initial_size;
 179 
 180   size_t heap_page_size   = UseLargePages ? os::large_page_size() : os::vm_page_size();
 181   size_t bitmap_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
 182   size_t region_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
 183 
 184   //
 185   // Reserve and commit memory for heap
 186   //
 187 
 188   ReservedHeapSpace heap_rs = Universe::reserve_heap(max_byte_size, heap_alignment);
 189   initialize_reserved_region(heap_rs);
 190   _heap_region = MemRegion((HeapWord*)heap_rs.base(), heap_rs.size() / HeapWordSize);
 191   _heap_region_special = heap_rs.special();
 192 
 193   assert((((size_t) base()) & ShenandoahHeapRegion::region_size_bytes_mask()) == 0,
 194          "Misaligned heap: " PTR_FORMAT, p2i(base()));
 195   os::trace_page_sizes_for_requested_size("Heap",
 196                                           max_byte_size, heap_alignment,
 197                                           heap_rs.base(),
 198                                           heap_rs.size(), heap_rs.page_size());
 199 
 200 #if SHENANDOAH_OPTIMIZED_MARKTASK
 201   // The optimized ShenandoahMarkTask takes some bits away from the full object bits.
 202   // Fail if we ever attempt to address more than we can.
 203   if ((uintptr_t)heap_rs.end() >= ShenandoahMarkTask::max_addressable()) {
 204     FormatBuffer<512> buf("Shenandoah reserved [" PTR_FORMAT ", " PTR_FORMAT") for the heap, \n"
 205                           "but max object address is " PTR_FORMAT ". Try to reduce heap size, or try other \n"
 206                           "VM options that allocate heap at lower addresses (HeapBaseMinAddress, AllocateHeapAt, etc).",
 207                 p2i(heap_rs.base()), p2i(heap_rs.end()), ShenandoahMarkTask::max_addressable());
 208     vm_exit_during_initialization("Fatal Error", buf);
 209   }
 210 #endif
 211 
 212   ReservedSpace sh_rs = heap_rs.first_part(max_byte_size);
 213   if (!_heap_region_special) {
 214     os::commit_memory_or_exit(sh_rs.base(), _initial_size, heap_alignment, false,
 215                               "Cannot commit heap memory");
 216   }
 217 
 218   //
 219   // Reserve and commit memory for bitmap(s)
 220   //
 221 
 222   size_t bitmap_size_orig = ShenandoahMarkBitMap::compute_size(heap_rs.size());
 223   _bitmap_size = align_up(bitmap_size_orig, bitmap_page_size);
 224 
 225   size_t bitmap_bytes_per_region = reg_size_bytes / ShenandoahMarkBitMap::heap_map_factor();
 226 
 227   guarantee(bitmap_bytes_per_region != 0,
 228             "Bitmap bytes per region should not be zero");
 229   guarantee(is_power_of_2(bitmap_bytes_per_region),
 230             "Bitmap bytes per region should be power of two: " SIZE_FORMAT, bitmap_bytes_per_region);
 231 
 232   if (bitmap_page_size > bitmap_bytes_per_region) {
 233     _bitmap_regions_per_slice = bitmap_page_size / bitmap_bytes_per_region;
 234     _bitmap_bytes_per_slice = bitmap_page_size;
 235   } else {
 236     _bitmap_regions_per_slice = 1;
 237     _bitmap_bytes_per_slice = bitmap_bytes_per_region;
 238   }
 239 
 240   guarantee(_bitmap_regions_per_slice >= 1,
 241             "Should have at least one region per slice: " SIZE_FORMAT,
 242             _bitmap_regions_per_slice);
 243 
 244   guarantee(((_bitmap_bytes_per_slice) % bitmap_page_size) == 0,
 245             "Bitmap slices should be page-granular: bps = " SIZE_FORMAT ", page size = " SIZE_FORMAT,
 246             _bitmap_bytes_per_slice, bitmap_page_size);
 247 
 248   ReservedSpace bitmap(_bitmap_size, bitmap_page_size);
 249   os::trace_page_sizes_for_requested_size("Mark Bitmap",
 250                                           bitmap_size_orig, bitmap_page_size,
 251                                           bitmap.base(),
 252                                           bitmap.size(), bitmap.page_size());
 253   MemTracker::record_virtual_memory_type(bitmap.base(), mtGC);
 254   _bitmap_region = MemRegion((HeapWord*) bitmap.base(), bitmap.size() / HeapWordSize);
 255   _bitmap_region_special = bitmap.special();
 256 
 257   size_t bitmap_init_commit = _bitmap_bytes_per_slice *
 258                               align_up(num_committed_regions, _bitmap_regions_per_slice) / _bitmap_regions_per_slice;
 259   bitmap_init_commit = MIN2(_bitmap_size, bitmap_init_commit);
 260   if (!_bitmap_region_special) {
 261     os::commit_memory_or_exit((char *) _bitmap_region.start(), bitmap_init_commit, bitmap_page_size, false,
 262                               "Cannot commit bitmap memory");
 263   }
 264 
 265   _marking_context = new ShenandoahMarkingContext(_heap_region, _bitmap_region, _num_regions, _max_workers);
 266 
 267   if (ShenandoahVerify) {
 268     ReservedSpace verify_bitmap(_bitmap_size, bitmap_page_size);
 269     os::trace_page_sizes_for_requested_size("Verify Bitmap",
 270                                             bitmap_size_orig, bitmap_page_size,
 271                                             verify_bitmap.base(),
 272                                             verify_bitmap.size(), verify_bitmap.page_size());
 273     if (!verify_bitmap.special()) {
 274       os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), bitmap_page_size, false,
 275                                 "Cannot commit verification bitmap memory");
 276     }
 277     MemTracker::record_virtual_memory_type(verify_bitmap.base(), mtGC);
 278     MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize);
 279     _verification_bit_map.initialize(_heap_region, verify_bitmap_region);
 280     _verifier = new ShenandoahVerifier(this, &_verification_bit_map);
 281   }
 282 
 283   // Reserve aux bitmap for use in object_iterate(). We don't commit it here.
 284   size_t aux_bitmap_page_size = bitmap_page_size;
 285 #ifdef LINUX
 286   // In THP "advise" mode, we refrain from advising the system to use large pages
 287   // since we know these commits will be short lived, and there is no reason to trash
 288   // the THP area with this bitmap.
 289   if (UseTransparentHugePages) {
 290     aux_bitmap_page_size = os::vm_page_size();
 291   }
 292 #endif
 293   ReservedSpace aux_bitmap(_bitmap_size, aux_bitmap_page_size);
 294   os::trace_page_sizes_for_requested_size("Aux Bitmap",
 295                                           bitmap_size_orig, aux_bitmap_page_size,
 296                                           aux_bitmap.base(),
 297                                           aux_bitmap.size(), aux_bitmap.page_size());
 298   MemTracker::record_virtual_memory_type(aux_bitmap.base(), mtGC);
 299   _aux_bitmap_region = MemRegion((HeapWord*) aux_bitmap.base(), aux_bitmap.size() / HeapWordSize);
 300   _aux_bitmap_region_special = aux_bitmap.special();
 301   _aux_bit_map.initialize(_heap_region, _aux_bitmap_region);
 302 
 303   //
 304   // Create regions and region sets
 305   //
 306   size_t region_align = align_up(sizeof(ShenandoahHeapRegion), SHENANDOAH_CACHE_LINE_SIZE);
 307   size_t region_storage_size_orig = region_align * _num_regions;
 308   size_t region_storage_size = align_up(region_storage_size_orig,
 309                                         MAX2(region_page_size, os::vm_allocation_granularity()));
 310 
 311   ReservedSpace region_storage(region_storage_size, region_page_size);
 312   os::trace_page_sizes_for_requested_size("Region Storage",
 313                                           region_storage_size_orig, region_page_size,
 314                                           region_storage.base(),
 315                                           region_storage.size(), region_storage.page_size());
 316   MemTracker::record_virtual_memory_type(region_storage.base(), mtGC);
 317   if (!region_storage.special()) {
 318     os::commit_memory_or_exit(region_storage.base(), region_storage_size, region_page_size, false,
 319                               "Cannot commit region memory");
 320   }
 321 
 322   // Try to fit the collection set bitmap at lower addresses. This optimizes code generation for cset checks.
 323   // Go up until a sensible limit (subject to encoding constraints) and try to reserve the space there.
 324   // If not successful, bite a bullet and allocate at whatever address.
 325   {
 326     const size_t cset_align = MAX2<size_t>(os::vm_page_size(), os::vm_allocation_granularity());
 327     const size_t cset_size = align_up(((size_t) sh_rs.base() + sh_rs.size()) >> ShenandoahHeapRegion::region_size_bytes_shift(), cset_align);
 328     const size_t cset_page_size = os::vm_page_size();
 329 
 330     uintptr_t min = round_up_power_of_2(cset_align);
 331     uintptr_t max = (1u << 30u);
 332     ReservedSpace cset_rs;
 333 
 334     for (uintptr_t addr = min; addr <= max; addr <<= 1u) {
 335       char* req_addr = (char*)addr;
 336       assert(is_aligned(req_addr, cset_align), "Should be aligned");
 337       cset_rs = ReservedSpace(cset_size, cset_align, cset_page_size, req_addr);
 338       if (cset_rs.is_reserved()) {
 339         assert(cset_rs.base() == req_addr, "Allocated where requested: " PTR_FORMAT ", " PTR_FORMAT, p2i(cset_rs.base()), addr);
 340         _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
 341         break;
 342       }
 343     }
 344 
 345     if (_collection_set == nullptr) {
 346       cset_rs = ReservedSpace(cset_size, cset_align, os::vm_page_size());
 347       _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
 348     }
 349     os::trace_page_sizes_for_requested_size("Collection Set",
 350                                             cset_size, cset_page_size,
 351                                             cset_rs.base(),
 352                                             cset_rs.size(), cset_rs.page_size());
 353   }
 354 
 355   _regions = NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, _num_regions, mtGC);
 356   _free_set = new ShenandoahFreeSet(this, _num_regions);
 357 
 358   {
 359     ShenandoahHeapLocker locker(lock());
 360 
 361     for (size_t i = 0; i < _num_regions; i++) {
 362       HeapWord* start = (HeapWord*)sh_rs.base() + ShenandoahHeapRegion::region_size_words() * i;
 363       bool is_committed = i < num_committed_regions;
 364       void* loc = region_storage.base() + i * region_align;
 365 
 366       ShenandoahHeapRegion* r = new (loc) ShenandoahHeapRegion(start, i, is_committed);
 367       assert(is_aligned(r, SHENANDOAH_CACHE_LINE_SIZE), "Sanity");
 368 
 369       _marking_context->initialize_top_at_mark_start(r);
 370       _regions[i] = r;
 371       assert(!collection_set()->is_in(i), "New region should not be in collection set");
 372     }
 373 
 374     // Initialize to complete
 375     _marking_context->mark_complete();
 376 
 377     _free_set->rebuild();
 378   }
 379 
 380   if (AlwaysPreTouch) {
 381     // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads,
 382     // before initialize() below zeroes it with initializing thread. For any given region,
 383     // we touch the region and the corresponding bitmaps from the same thread.
 384     ShenandoahPushWorkerScope scope(workers(), _max_workers, false);
 385 
 386     _pretouch_heap_page_size = heap_page_size;
 387     _pretouch_bitmap_page_size = bitmap_page_size;
 388 
 389 #ifdef LINUX
 390     // UseTransparentHugePages would madvise that backing memory can be coalesced into huge
 391     // pages. But, the kernel needs to know that every small page is used, in order to coalesce
 392     // them into huge one. Therefore, we need to pretouch with smaller pages.
 393     if (UseTransparentHugePages) {
 394       _pretouch_heap_page_size = (size_t)os::vm_page_size();
 395       _pretouch_bitmap_page_size = (size_t)os::vm_page_size();
 396     }
 397 #endif
 398 
 399     // OS memory managers may want to coalesce back-to-back pages. Make their jobs
 400     // simpler by pre-touching continuous spaces (heap and bitmap) separately.
 401 
 402     ShenandoahPretouchBitmapTask bcl(bitmap.base(), _bitmap_size, _pretouch_bitmap_page_size);
 403     _workers->run_task(&bcl);
 404 
 405     ShenandoahPretouchHeapTask hcl(_pretouch_heap_page_size);
 406     _workers->run_task(&hcl);
 407   }
 408 
 409   //
 410   // Initialize the rest of GC subsystems
 411   //
 412 
 413   _liveness_cache = NEW_C_HEAP_ARRAY(ShenandoahLiveData*, _max_workers, mtGC);
 414   for (uint worker = 0; worker < _max_workers; worker++) {
 415     _liveness_cache[worker] = NEW_C_HEAP_ARRAY(ShenandoahLiveData, _num_regions, mtGC);
 416     Copy::fill_to_bytes(_liveness_cache[worker], _num_regions * sizeof(ShenandoahLiveData));
 417   }
 418 
 419   // There should probably be Shenandoah-specific options for these,
 420   // just as there are G1-specific options.
 421   {
 422     ShenandoahSATBMarkQueueSet& satbqs = ShenandoahBarrierSet::satb_mark_queue_set();
 423     satbqs.set_process_completed_buffers_threshold(20); // G1SATBProcessCompletedThreshold
 424     satbqs.set_buffer_enqueue_threshold_percentage(60); // G1SATBBufferEnqueueingThresholdPercent
 425   }
 426 
 427   _monitoring_support = new ShenandoahMonitoringSupport(this);
 428   _phase_timings = new ShenandoahPhaseTimings(max_workers());
 429   ShenandoahCodeRoots::initialize();
 430 
 431   if (ShenandoahPacing) {
 432     _pacer = new ShenandoahPacer(this);
 433     _pacer->setup_for_idle();
 434   } else {
 435     _pacer = nullptr;
 436   }
 437 
 438   _control_thread = new ShenandoahControlThread();
 439 
 440   ShenandoahInitLogger::print();
 441 
 442   return JNI_OK;
 443 }
 444 
 445 void ShenandoahHeap::initialize_mode() {
 446   if (ShenandoahGCMode != nullptr) {
 447     if (strcmp(ShenandoahGCMode, "satb") == 0) {
 448       _gc_mode = new ShenandoahSATBMode();
 449     } else if (strcmp(ShenandoahGCMode, "iu") == 0) {
 450       _gc_mode = new ShenandoahIUMode();
 451     } else if (strcmp(ShenandoahGCMode, "passive") == 0) {
 452       _gc_mode = new ShenandoahPassiveMode();
 453     } else {
 454       vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option");
 455     }
 456   } else {
 457     vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option (null)");
 458   }
 459   _gc_mode->initialize_flags();
 460   if (_gc_mode->is_diagnostic() && !UnlockDiagnosticVMOptions) {
 461     vm_exit_during_initialization(
 462             err_msg("GC mode \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
 463                     _gc_mode->name()));
 464   }
 465   if (_gc_mode->is_experimental() && !UnlockExperimentalVMOptions) {
 466     vm_exit_during_initialization(
 467             err_msg("GC mode \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
 468                     _gc_mode->name()));
 469   }
 470 }
 471 
 472 void ShenandoahHeap::initialize_heuristics() {
 473   assert(_gc_mode != nullptr, "Must be initialized");
 474   _heuristics = _gc_mode->initialize_heuristics();
 475 
 476   if (_heuristics->is_diagnostic() && !UnlockDiagnosticVMOptions) {
 477     vm_exit_during_initialization(
 478             err_msg("Heuristics \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
 479                     _heuristics->name()));
 480   }
 481   if (_heuristics->is_experimental() && !UnlockExperimentalVMOptions) {
 482     vm_exit_during_initialization(
 483             err_msg("Heuristics \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
 484                     _heuristics->name()));
 485   }
 486 }
 487 
 488 #ifdef _MSC_VER
 489 #pragma warning( push )
 490 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
 491 #endif
 492 
 493 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
 494   CollectedHeap(),
 495   _initial_size(0),
 496   _used(0),
 497   _committed(0),
 498   _bytes_allocated_since_gc_start(0),
 499   _max_workers(MAX2(ConcGCThreads, ParallelGCThreads)),
 500   _workers(nullptr),
 501   _safepoint_workers(nullptr),
 502   _heap_region_special(false),
 503   _num_regions(0),
 504   _regions(nullptr),
 505   _update_refs_iterator(this),
 506   _gc_no_progress_count(0),
 507   _control_thread(nullptr),
 508   _shenandoah_policy(policy),
 509   _gc_mode(nullptr),
 510   _heuristics(nullptr),
 511   _free_set(nullptr),
 512   _pacer(nullptr),
 513   _verifier(nullptr),
 514   _phase_timings(nullptr),
 515   _monitoring_support(nullptr),
 516   _memory_pool(nullptr),
 517   _stw_memory_manager("Shenandoah Pauses"),
 518   _cycle_memory_manager("Shenandoah Cycles"),
 519   _gc_timer(new ConcurrentGCTimer()),
 520   _soft_ref_policy(),
 521   _log_min_obj_alignment_in_bytes(LogMinObjAlignmentInBytes),
 522   _ref_processor(new ShenandoahReferenceProcessor(MAX2(_max_workers, 1U))),
 523   _marking_context(nullptr),
 524   _bitmap_size(0),
 525   _bitmap_regions_per_slice(0),
 526   _bitmap_bytes_per_slice(0),
 527   _bitmap_region_special(false),
 528   _aux_bitmap_region_special(false),
 529   _liveness_cache(nullptr),
 530   _collection_set(nullptr)
 531 {
 532   // Initialize GC mode early, so we can adjust barrier support
 533   initialize_mode();
 534   BarrierSet::set_barrier_set(new ShenandoahBarrierSet(this));
 535 
 536   _max_workers = MAX2(_max_workers, 1U);
 537   _workers = new ShenandoahWorkerThreads("Shenandoah GC Threads", _max_workers);
 538   if (_workers == nullptr) {
 539     vm_exit_during_initialization("Failed necessary allocation.");
 540   } else {
 541     _workers->initialize_workers();
 542   }
 543 
 544   if (ParallelGCThreads > 1) {
 545     _safepoint_workers = new ShenandoahWorkerThreads("Safepoint Cleanup Thread",
 546                                                 ParallelGCThreads);
 547     _safepoint_workers->initialize_workers();
 548   }
 549 }
 550 
 551 #ifdef _MSC_VER
 552 #pragma warning( pop )
 553 #endif
 554 
 555 class ShenandoahResetBitmapTask : public WorkerTask {
 556 private:
 557   ShenandoahRegionIterator _regions;
 558 
 559 public:
 560   ShenandoahResetBitmapTask() :
 561     WorkerTask("Shenandoah Reset Bitmap") {}
 562 
 563   void work(uint worker_id) {
 564     ShenandoahHeapRegion* region = _regions.next();
 565     ShenandoahHeap* heap = ShenandoahHeap::heap();
 566     ShenandoahMarkingContext* const ctx = heap->marking_context();
 567     while (region != nullptr) {
 568       if (heap->is_bitmap_slice_committed(region)) {
 569         ctx->clear_bitmap(region);
 570       }
 571       region = _regions.next();
 572     }
 573   }
 574 };
 575 
 576 void ShenandoahHeap::reset_mark_bitmap() {
 577   assert_gc_workers(_workers->active_workers());
 578   mark_incomplete_marking_context();
 579 
 580   ShenandoahResetBitmapTask task;
 581   _workers->run_task(&task);
 582 }
 583 
 584 void ShenandoahHeap::print_on(outputStream* st) const {
 585   st->print_cr("Shenandoah Heap");
 586   st->print_cr(" " SIZE_FORMAT "%s max, " SIZE_FORMAT "%s soft max, " SIZE_FORMAT "%s committed, " SIZE_FORMAT "%s used",
 587                byte_size_in_proper_unit(max_capacity()), proper_unit_for_byte_size(max_capacity()),
 588                byte_size_in_proper_unit(soft_max_capacity()), proper_unit_for_byte_size(soft_max_capacity()),
 589                byte_size_in_proper_unit(committed()),    proper_unit_for_byte_size(committed()),
 590                byte_size_in_proper_unit(used()),         proper_unit_for_byte_size(used()));
 591   st->print_cr(" " SIZE_FORMAT " x " SIZE_FORMAT"%s regions",
 592                num_regions(),
 593                byte_size_in_proper_unit(ShenandoahHeapRegion::region_size_bytes()),
 594                proper_unit_for_byte_size(ShenandoahHeapRegion::region_size_bytes()));
 595 
 596   st->print("Status: ");
 597   if (has_forwarded_objects())                 st->print("has forwarded objects, ");
 598   if (is_concurrent_mark_in_progress())        st->print("marking, ");
 599   if (is_evacuation_in_progress())             st->print("evacuating, ");
 600   if (is_update_refs_in_progress())            st->print("updating refs, ");
 601   if (is_degenerated_gc_in_progress())         st->print("degenerated gc, ");
 602   if (is_full_gc_in_progress())                st->print("full gc, ");
 603   if (is_full_gc_move_in_progress())           st->print("full gc move, ");
 604   if (is_concurrent_weak_root_in_progress())   st->print("concurrent weak roots, ");
 605   if (is_concurrent_strong_root_in_progress() &&
 606       !is_concurrent_weak_root_in_progress())  st->print("concurrent strong roots, ");
 607 
 608   if (cancelled_gc()) {
 609     st->print("cancelled");
 610   } else {
 611     st->print("not cancelled");
 612   }
 613   st->cr();
 614 
 615   st->print_cr("Reserved region:");
 616   st->print_cr(" - [" PTR_FORMAT ", " PTR_FORMAT ") ",
 617                p2i(reserved_region().start()),
 618                p2i(reserved_region().end()));
 619 
 620   ShenandoahCollectionSet* cset = collection_set();
 621   st->print_cr("Collection set:");
 622   if (cset != nullptr) {
 623     st->print_cr(" - map (vanilla): " PTR_FORMAT, p2i(cset->map_address()));
 624     st->print_cr(" - map (biased):  " PTR_FORMAT, p2i(cset->biased_map_address()));
 625   } else {
 626     st->print_cr(" (null)");
 627   }
 628 
 629   st->cr();
 630   MetaspaceUtils::print_on(st);
 631 
 632   if (Verbose) {
 633     st->cr();
 634     print_heap_regions_on(st);
 635   }
 636 }
 637 
 638 class ShenandoahInitWorkerGCLABClosure : public ThreadClosure {
 639 public:
 640   void do_thread(Thread* thread) {
 641     assert(thread != nullptr, "Sanity");
 642     assert(thread->is_Worker_thread(), "Only worker thread expected");
 643     ShenandoahThreadLocalData::initialize_gclab(thread);
 644   }
 645 };
 646 
 647 void ShenandoahHeap::post_initialize() {
 648   CollectedHeap::post_initialize();
 649   MutexLocker ml(Threads_lock);
 650 
 651   ShenandoahInitWorkerGCLABClosure init_gclabs;
 652   _workers->threads_do(&init_gclabs);
 653 
 654   // gclab can not be initialized early during VM startup, as it can not determinate its max_size.
 655   // Now, we will let WorkerThreads to initialize gclab when new worker is created.
 656   _workers->set_initialize_gclab();
 657   if (_safepoint_workers != nullptr) {
 658     _safepoint_workers->threads_do(&init_gclabs);
 659     _safepoint_workers->set_initialize_gclab();
 660   }
 661 
 662   _heuristics->initialize();
 663 
 664   JFR_ONLY(ShenandoahJFRSupport::register_jfr_type_serializers());
 665 }
 666 
 667 size_t ShenandoahHeap::used() const {
 668   return Atomic::load(&_used);
 669 }
 670 
 671 size_t ShenandoahHeap::committed() const {
 672   return Atomic::load(&_committed);
 673 }
 674 
 675 size_t ShenandoahHeap::available() const {
 676   return free_set()->available();
 677 }
 678 
 679 void ShenandoahHeap::increase_committed(size_t bytes) {
 680   shenandoah_assert_heaplocked_or_safepoint();
 681   _committed += bytes;
 682 }
 683 
 684 void ShenandoahHeap::decrease_committed(size_t bytes) {
 685   shenandoah_assert_heaplocked_or_safepoint();
 686   _committed -= bytes;
 687 }
 688 
 689 void ShenandoahHeap::increase_used(size_t bytes) {
 690   Atomic::add(&_used, bytes, memory_order_relaxed);
 691 }
 692 
 693 void ShenandoahHeap::set_used(size_t bytes) {
 694   Atomic::store(&_used, bytes);
 695 }
 696 
 697 void ShenandoahHeap::decrease_used(size_t bytes) {
 698   assert(used() >= bytes, "never decrease heap size by more than we've left");
 699   Atomic::sub(&_used, bytes, memory_order_relaxed);
 700 }
 701 
 702 void ShenandoahHeap::increase_allocated(size_t bytes) {
 703   Atomic::add(&_bytes_allocated_since_gc_start, bytes, memory_order_relaxed);
 704 }
 705 
 706 void ShenandoahHeap::notify_mutator_alloc_words(size_t words, bool waste) {
 707   size_t bytes = words * HeapWordSize;
 708   if (!waste) {
 709     increase_used(bytes);
 710   }
 711   increase_allocated(bytes);
 712   if (ShenandoahPacing) {
 713     control_thread()->pacing_notify_alloc(words);
 714     if (waste) {
 715       pacer()->claim_for_alloc(words, true);
 716     }
 717   }
 718 }
 719 
 720 size_t ShenandoahHeap::capacity() const {
 721   return committed();
 722 }
 723 
 724 size_t ShenandoahHeap::max_capacity() const {
 725   return _num_regions * ShenandoahHeapRegion::region_size_bytes();
 726 }
 727 
 728 size_t ShenandoahHeap::soft_max_capacity() const {
 729   size_t v = Atomic::load(&_soft_max_size);
 730   assert(min_capacity() <= v && v <= max_capacity(),
 731          "Should be in bounds: " SIZE_FORMAT " <= " SIZE_FORMAT " <= " SIZE_FORMAT,
 732          min_capacity(), v, max_capacity());
 733   return v;
 734 }
 735 
 736 void ShenandoahHeap::set_soft_max_capacity(size_t v) {
 737   assert(min_capacity() <= v && v <= max_capacity(),
 738          "Should be in bounds: " SIZE_FORMAT " <= " SIZE_FORMAT " <= " SIZE_FORMAT,
 739          min_capacity(), v, max_capacity());
 740   Atomic::store(&_soft_max_size, v);
 741 }
 742 
 743 size_t ShenandoahHeap::min_capacity() const {
 744   return _minimum_size;
 745 }
 746 
 747 size_t ShenandoahHeap::initial_capacity() const {
 748   return _initial_size;
 749 }
 750 
 751 bool ShenandoahHeap::is_in(const void* p) const {
 752   HeapWord* heap_base = (HeapWord*) base();
 753   HeapWord* last_region_end = heap_base + ShenandoahHeapRegion::region_size_words() * num_regions();
 754   return p >= heap_base && p < last_region_end;
 755 }
 756 
 757 void ShenandoahHeap::op_uncommit(double shrink_before, size_t shrink_until) {
 758   assert (ShenandoahUncommit, "should be enabled");
 759 
 760   // Application allocates from the beginning of the heap, and GC allocates at
 761   // the end of it. It is more efficient to uncommit from the end, so that applications
 762   // could enjoy the near committed regions. GC allocations are much less frequent,
 763   // and therefore can accept the committing costs.
 764 
 765   size_t count = 0;
 766   for (size_t i = num_regions(); i > 0; i--) { // care about size_t underflow
 767     ShenandoahHeapRegion* r = get_region(i - 1);
 768     if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
 769       ShenandoahHeapLocker locker(lock());
 770       if (r->is_empty_committed()) {
 771         if (committed() < shrink_until + ShenandoahHeapRegion::region_size_bytes()) {
 772           break;
 773         }
 774 
 775         r->make_uncommitted();
 776         count++;
 777       }
 778     }
 779     SpinPause(); // allow allocators to take the lock
 780   }
 781 
 782   if (count > 0) {
 783     control_thread()->notify_heap_changed();
 784   }
 785 }
 786 
 787 HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) {
 788   // New object should fit the GCLAB size
 789   size_t min_size = MAX2(size, PLAB::min_size());
 790 
 791   // Figure out size of new GCLAB, looking back at heuristics. Expand aggressively.
 792   size_t new_size = ShenandoahThreadLocalData::gclab_size(thread) * 2;
 793   new_size = MIN2(new_size, PLAB::max_size());
 794   new_size = MAX2(new_size, PLAB::min_size());
 795 
 796   // Record new heuristic value even if we take any shortcut. This captures
 797   // the case when moderately-sized objects always take a shortcut. At some point,
 798   // heuristics should catch up with them.
 799   ShenandoahThreadLocalData::set_gclab_size(thread, new_size);
 800 
 801   if (new_size < size) {
 802     // New size still does not fit the object. Fall back to shared allocation.
 803     // This avoids retiring perfectly good GCLABs, when we encounter a large object.
 804     return nullptr;
 805   }
 806 
 807   // Retire current GCLAB, and allocate a new one.
 808   PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
 809   gclab->retire();
 810 
 811   size_t actual_size = 0;
 812   HeapWord* gclab_buf = allocate_new_gclab(min_size, new_size, &actual_size);
 813   if (gclab_buf == nullptr) {
 814     return nullptr;
 815   }
 816 
 817   assert (size <= actual_size, "allocation should fit");
 818 
 819   if (ZeroTLAB) {
 820     // ..and clear it.
 821     Copy::zero_to_words(gclab_buf, actual_size);
 822   } else {
 823     // ...and zap just allocated object.
 824 #ifdef ASSERT
 825     // Skip mangling the space corresponding to the object header to
 826     // ensure that the returned space is not considered parsable by
 827     // any concurrent GC thread.
 828     size_t hdr_size = oopDesc::header_size();
 829     Copy::fill_to_words(gclab_buf + hdr_size, actual_size - hdr_size, badHeapWordVal);
 830 #endif // ASSERT
 831   }
 832   gclab->set_buf(gclab_buf, actual_size);
 833   return gclab->allocate(size);
 834 }
 835 
 836 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t min_size,
 837                                             size_t requested_size,
 838                                             size_t* actual_size) {
 839   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_tlab(min_size, requested_size);
 840   HeapWord* res = allocate_memory(req);
 841   if (res != nullptr) {
 842     *actual_size = req.actual_size();
 843   } else {
 844     *actual_size = 0;
 845   }
 846   return res;
 847 }
 848 
 849 HeapWord* ShenandoahHeap::allocate_new_gclab(size_t min_size,
 850                                              size_t word_size,
 851                                              size_t* actual_size) {
 852   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_gclab(min_size, word_size);
 853   HeapWord* res = allocate_memory(req);
 854   if (res != nullptr) {
 855     *actual_size = req.actual_size();
 856   } else {
 857     *actual_size = 0;
 858   }
 859   return res;
 860 }
 861 
 862 HeapWord* ShenandoahHeap::allocate_memory(ShenandoahAllocRequest& req) {
 863   intptr_t pacer_epoch = 0;
 864   bool in_new_region = false;
 865   HeapWord* result = nullptr;
 866 
 867   if (req.is_mutator_alloc()) {
 868     if (ShenandoahPacing) {
 869       pacer()->pace_for_alloc(req.size());
 870       pacer_epoch = pacer()->epoch();
 871     }
 872 
 873     if (!ShenandoahAllocFailureALot || !should_inject_alloc_failure()) {
 874       result = allocate_memory_under_lock(req, in_new_region);
 875     }
 876 
 877     // Check that gc overhead is not exceeded.
 878     //
 879     // Shenandoah will grind along for quite a while allocating one
 880     // object at a time using shared (non-tlab) allocations. This check
 881     // is testing that the GC overhead limit has not been exceeded.
 882     // This will notify the collector to start a cycle, but will raise
 883     // an OOME to the mutator if the last Full GCs have not made progress.
 884     if (result == nullptr && !req.is_lab_alloc() && get_gc_no_progress_count() > ShenandoahNoProgressThreshold) {
 885       control_thread()->handle_alloc_failure(req, false);
 886       return nullptr;
 887     }
 888 
 889     // Block until control thread reacted, then retry allocation.
 890     //
 891     // It might happen that one of the threads requesting allocation would unblock
 892     // way later after GC happened, only to fail the second allocation, because
 893     // other threads have already depleted the free storage. In this case, a better
 894     // strategy is to try again, as long as GC makes progress (or until at least
 895     // one full GC has completed).
 896     size_t original_count = shenandoah_policy()->full_gc_count();
 897     while (result == nullptr
 898         && (get_gc_no_progress_count() == 0 || original_count == shenandoah_policy()->full_gc_count())) {
 899       control_thread()->handle_alloc_failure(req);
 900       result = allocate_memory_under_lock(req, in_new_region);
 901     }
 902 
 903     if (log_is_enabled(Debug, gc, alloc)) {
 904       ResourceMark rm;
 905       log_debug(gc, alloc)("Thread: %s, Result: " PTR_FORMAT ", Request: %s, Size: " SIZE_FORMAT ", Original: " SIZE_FORMAT ", Latest: " SIZE_FORMAT,
 906                            Thread::current()->name(), p2i(result), req.type_string(), req.size(), original_count, get_gc_no_progress_count());
 907     }
 908   } else {
 909     assert(req.is_gc_alloc(), "Can only accept GC allocs here");
 910     result = allocate_memory_under_lock(req, in_new_region);
 911     // Do not call handle_alloc_failure() here, because we cannot block.
 912     // The allocation failure would be handled by the LRB slowpath with handle_alloc_failure_evac().
 913   }
 914 
 915   if (in_new_region) {
 916     control_thread()->notify_heap_changed();
 917   }
 918 
 919   if (result != nullptr) {
 920     size_t requested = req.size();
 921     size_t actual = req.actual_size();
 922 
 923     assert (req.is_lab_alloc() || (requested == actual),
 924             "Only LAB allocations are elastic: %s, requested = " SIZE_FORMAT ", actual = " SIZE_FORMAT,
 925             ShenandoahAllocRequest::alloc_type_to_string(req.type()), requested, actual);
 926 
 927     if (req.is_mutator_alloc()) {
 928       notify_mutator_alloc_words(actual, false);
 929 
 930       // If we requested more than we were granted, give the rest back to pacer.
 931       // This only matters if we are in the same pacing epoch: do not try to unpace
 932       // over the budget for the other phase.
 933       if (ShenandoahPacing && (pacer_epoch > 0) && (requested > actual)) {
 934         pacer()->unpace_for_alloc(pacer_epoch, requested - actual);
 935       }
 936     } else {
 937       increase_used(actual*HeapWordSize);
 938     }
 939   }
 940 
 941   return result;
 942 }
 943 
 944 HeapWord* ShenandoahHeap::allocate_memory_under_lock(ShenandoahAllocRequest& req, bool& in_new_region) {
 945   ShenandoahHeapLocker locker(lock());
 946   return _free_set->allocate(req, in_new_region);
 947 }
 948 
 949 HeapWord* ShenandoahHeap::mem_allocate(size_t size,
 950                                         bool*  gc_overhead_limit_was_exceeded) {
 951   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared(size);
 952   return allocate_memory(req);
 953 }
 954 
 955 MetaWord* ShenandoahHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
 956                                                              size_t size,
 957                                                              Metaspace::MetadataType mdtype) {
 958   MetaWord* result;
 959 
 960   // Inform metaspace OOM to GC heuristics if class unloading is possible.
 961   if (heuristics()->can_unload_classes()) {
 962     ShenandoahHeuristics* h = heuristics();
 963     h->record_metaspace_oom();
 964   }
 965 
 966   // Expand and retry allocation
 967   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
 968   if (result != nullptr) {
 969     return result;
 970   }
 971 
 972   // Start full GC
 973   collect(GCCause::_metadata_GC_clear_soft_refs);
 974 
 975   // Retry allocation
 976   result = loader_data->metaspace_non_null()->allocate(size, mdtype);
 977   if (result != nullptr) {
 978     return result;
 979   }
 980 
 981   // Expand and retry allocation
 982   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
 983   if (result != nullptr) {
 984     return result;
 985   }
 986 
 987   // Out of memory
 988   return nullptr;
 989 }
 990 
 991 class ShenandoahConcurrentEvacuateRegionObjectClosure : public ObjectClosure {
 992 private:
 993   ShenandoahHeap* const _heap;
 994   Thread* const _thread;
 995 public:
 996   ShenandoahConcurrentEvacuateRegionObjectClosure(ShenandoahHeap* heap) :
 997     _heap(heap), _thread(Thread::current()) {}
 998 
 999   void do_object(oop p) {
1000     shenandoah_assert_marked(nullptr, p);
1001     if (!p->is_forwarded()) {
1002       _heap->evacuate_object(p, _thread);
1003     }
1004   }
1005 };
1006 
1007 class ShenandoahEvacuationTask : public WorkerTask {
1008 private:
1009   ShenandoahHeap* const _sh;
1010   ShenandoahCollectionSet* const _cs;
1011   bool _concurrent;
1012 public:
1013   ShenandoahEvacuationTask(ShenandoahHeap* sh,
1014                            ShenandoahCollectionSet* cs,
1015                            bool concurrent) :
1016     WorkerTask("Shenandoah Evacuation"),
1017     _sh(sh),
1018     _cs(cs),
1019     _concurrent(concurrent)
1020   {}
1021 
1022   void work(uint worker_id) {
1023     if (_concurrent) {
1024       ShenandoahConcurrentWorkerSession worker_session(worker_id);
1025       ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
1026       ShenandoahEvacOOMScope oom_evac_scope;
1027       do_work();
1028     } else {
1029       ShenandoahParallelWorkerSession worker_session(worker_id);
1030       ShenandoahEvacOOMScope oom_evac_scope;
1031       do_work();
1032     }
1033   }
1034 
1035 private:
1036   void do_work() {
1037     ShenandoahConcurrentEvacuateRegionObjectClosure cl(_sh);
1038     ShenandoahHeapRegion* r;
1039     while ((r =_cs->claim_next()) != nullptr) {
1040       assert(r->has_live(), "Region " SIZE_FORMAT " should have been reclaimed early", r->index());
1041       _sh->marked_object_iterate(r, &cl);
1042 
1043       if (ShenandoahPacing) {
1044         _sh->pacer()->report_evac(r->used() >> LogHeapWordSize);
1045       }
1046 
1047       if (_sh->check_cancelled_gc_and_yield(_concurrent)) {
1048         break;
1049       }
1050     }
1051   }
1052 };
1053 
1054 void ShenandoahHeap::evacuate_collection_set(bool concurrent) {
1055   ShenandoahEvacuationTask task(this, _collection_set, concurrent);
1056   workers()->run_task(&task);
1057 }
1058 
1059 void ShenandoahHeap::trash_cset_regions() {
1060   ShenandoahHeapLocker locker(lock());
1061 
1062   ShenandoahCollectionSet* set = collection_set();
1063   ShenandoahHeapRegion* r;
1064   set->clear_current_index();
1065   while ((r = set->next()) != nullptr) {
1066     r->make_trash();
1067   }
1068   collection_set()->clear();
1069 }
1070 
1071 void ShenandoahHeap::print_heap_regions_on(outputStream* st) const {
1072   st->print_cr("Heap Regions:");
1073   st->print_cr("Region state: EU=empty-uncommitted, EC=empty-committed, R=regular, H=humongous start, HP=pinned humongous start");
1074   st->print_cr("              HC=humongous continuation, CS=collection set, TR=trash, P=pinned, CSP=pinned collection set");
1075   st->print_cr("BTE=bottom/top/end, TAMS=top-at-mark-start");
1076   st->print_cr("UWM=update watermark, U=used");
1077   st->print_cr("T=TLAB allocs, G=GCLAB allocs");
1078   st->print_cr("S=shared allocs, L=live data");
1079   st->print_cr("CP=critical pins");
1080 
1081   for (size_t i = 0; i < num_regions(); i++) {
1082     get_region(i)->print_on(st);
1083   }
1084 }
1085 
1086 void ShenandoahHeap::trash_humongous_region_at(ShenandoahHeapRegion* start) {
1087   assert(start->is_humongous_start(), "reclaim regions starting with the first one");
1088 
1089   oop humongous_obj = cast_to_oop(start->bottom());
1090   size_t size = humongous_obj->size();
1091   size_t required_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize);
1092   size_t index = start->index() + required_regions - 1;
1093 
1094   assert(!start->has_live(), "liveness must be zero");
1095 
1096   for(size_t i = 0; i < required_regions; i++) {
1097     // Reclaim from tail. Otherwise, assertion fails when printing region to trace log,
1098     // as it expects that every region belongs to a humongous region starting with a humongous start region.
1099     ShenandoahHeapRegion* region = get_region(index --);
1100 
1101     assert(region->is_humongous(), "expect correct humongous start or continuation");
1102     assert(!region->is_cset(), "Humongous region should not be in collection set");
1103 
1104     region->make_trash_immediate();
1105   }
1106 }
1107 
1108 class ShenandoahCheckCleanGCLABClosure : public ThreadClosure {
1109 public:
1110   ShenandoahCheckCleanGCLABClosure() {}
1111   void do_thread(Thread* thread) {
1112     PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1113     assert(gclab != nullptr, "GCLAB should be initialized for %s", thread->name());
1114     assert(gclab->words_remaining() == 0, "GCLAB should not need retirement");
1115   }
1116 };
1117 
1118 class ShenandoahRetireGCLABClosure : public ThreadClosure {
1119 private:
1120   bool const _resize;
1121 public:
1122   ShenandoahRetireGCLABClosure(bool resize) : _resize(resize) {}
1123   void do_thread(Thread* thread) {
1124     PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1125     assert(gclab != nullptr, "GCLAB should be initialized for %s", thread->name());
1126     gclab->retire();
1127     if (_resize && ShenandoahThreadLocalData::gclab_size(thread) > 0) {
1128       ShenandoahThreadLocalData::set_gclab_size(thread, 0);
1129     }
1130   }
1131 };
1132 
1133 void ShenandoahHeap::labs_make_parsable() {
1134   assert(UseTLAB, "Only call with UseTLAB");
1135 
1136   ShenandoahRetireGCLABClosure cl(false);
1137 
1138   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1139     ThreadLocalAllocBuffer& tlab = t->tlab();
1140     tlab.make_parsable();
1141     cl.do_thread(t);
1142   }
1143 
1144   workers()->threads_do(&cl);
1145 }
1146 
1147 void ShenandoahHeap::tlabs_retire(bool resize) {
1148   assert(UseTLAB, "Only call with UseTLAB");
1149   assert(!resize || ResizeTLAB, "Only call for resize when ResizeTLAB is enabled");
1150 
1151   ThreadLocalAllocStats stats;
1152 
1153   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1154     ThreadLocalAllocBuffer& tlab = t->tlab();
1155     tlab.retire(&stats);
1156     if (resize) {
1157       tlab.resize();
1158     }
1159   }
1160 
1161   stats.publish();
1162 
1163 #ifdef ASSERT
1164   ShenandoahCheckCleanGCLABClosure cl;
1165   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1166     cl.do_thread(t);
1167   }
1168   workers()->threads_do(&cl);
1169 #endif
1170 }
1171 
1172 void ShenandoahHeap::gclabs_retire(bool resize) {
1173   assert(UseTLAB, "Only call with UseTLAB");
1174   assert(!resize || ResizeTLAB, "Only call for resize when ResizeTLAB is enabled");
1175 
1176   ShenandoahRetireGCLABClosure cl(resize);
1177   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1178     cl.do_thread(t);
1179   }
1180   workers()->threads_do(&cl);
1181 
1182   if (safepoint_workers() != nullptr) {
1183     safepoint_workers()->threads_do(&cl);
1184   }
1185 }
1186 
1187 // Returns size in bytes
1188 size_t ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const {
1189   if (ShenandoahElasticTLAB) {
1190     // With Elastic TLABs, return the max allowed size, and let the allocation path
1191     // figure out the safe size for current allocation.
1192     return ShenandoahHeapRegion::max_tlab_size_bytes();
1193   } else {
1194     return MIN2(_free_set->unsafe_peek_free(), ShenandoahHeapRegion::max_tlab_size_bytes());
1195   }
1196 }
1197 
1198 size_t ShenandoahHeap::max_tlab_size() const {
1199   // Returns size in words
1200   return ShenandoahHeapRegion::max_tlab_size_words();
1201 }
1202 
1203 void ShenandoahHeap::collect(GCCause::Cause cause) {
1204   control_thread()->request_gc(cause);
1205 }
1206 
1207 void ShenandoahHeap::do_full_collection(bool clear_all_soft_refs) {
1208   //assert(false, "Shouldn't need to do full collections");
1209 }
1210 
1211 HeapWord* ShenandoahHeap::block_start(const void* addr) const {
1212   ShenandoahHeapRegion* r = heap_region_containing(addr);
1213   if (r != nullptr) {
1214     return r->block_start(addr);
1215   }
1216   return nullptr;
1217 }
1218 
1219 bool ShenandoahHeap::block_is_obj(const HeapWord* addr) const {
1220   ShenandoahHeapRegion* r = heap_region_containing(addr);
1221   return r->block_is_obj(addr);
1222 }
1223 
1224 bool ShenandoahHeap::print_location(outputStream* st, void* addr) const {
1225   return BlockLocationPrinter<ShenandoahHeap>::print_location(st, addr);
1226 }
1227 
1228 void ShenandoahHeap::prepare_for_verify() {
1229   if (SafepointSynchronize::is_at_safepoint() && UseTLAB) {
1230     labs_make_parsable();
1231   }
1232 }
1233 
1234 void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const {
1235   tcl->do_thread(_control_thread);
1236   workers()->threads_do(tcl);
1237   if (_safepoint_workers != nullptr) {
1238     _safepoint_workers->threads_do(tcl);
1239   }
1240 }
1241 
1242 void ShenandoahHeap::print_tracing_info() const {
1243   LogTarget(Info, gc, stats) lt;
1244   if (lt.is_enabled()) {
1245     ResourceMark rm;
1246     LogStream ls(lt);
1247 
1248     phase_timings()->print_global_on(&ls);
1249 
1250     ls.cr();
1251     ls.cr();
1252 
1253     shenandoah_policy()->print_gc_stats(&ls);
1254 
1255     ls.cr();
1256     ls.cr();
1257   }
1258 }
1259 
1260 void ShenandoahHeap::verify(VerifyOption vo) {
1261   if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
1262     if (ShenandoahVerify) {
1263       verifier()->verify_generic(vo);
1264     } else {
1265       // TODO: Consider allocating verification bitmaps on demand,
1266       // and turn this on unconditionally.
1267     }
1268   }
1269 }
1270 size_t ShenandoahHeap::tlab_capacity(Thread *thr) const {
1271   return _free_set->capacity();
1272 }
1273 
1274 class ObjectIterateScanRootClosure : public BasicOopIterateClosure {
1275 private:
1276   MarkBitMap* _bitmap;
1277   ShenandoahScanObjectStack* _oop_stack;
1278   ShenandoahHeap* const _heap;
1279   ShenandoahMarkingContext* const _marking_context;
1280 
1281   template <class T>
1282   void do_oop_work(T* p) {
1283     T o = RawAccess<>::oop_load(p);
1284     if (!CompressedOops::is_null(o)) {
1285       oop obj = CompressedOops::decode_not_null(o);
1286       if (_heap->is_concurrent_weak_root_in_progress() && !_marking_context->is_marked(obj)) {
1287         // There may be dead oops in weak roots in concurrent root phase, do not touch them.
1288         return;
1289       }
1290       obj = ShenandoahBarrierSet::barrier_set()->load_reference_barrier(obj);
1291 
1292       assert(oopDesc::is_oop(obj), "must be a valid oop");
1293       if (!_bitmap->is_marked(obj)) {
1294         _bitmap->mark(obj);
1295         _oop_stack->push(obj);
1296       }
1297     }
1298   }
1299 public:
1300   ObjectIterateScanRootClosure(MarkBitMap* bitmap, ShenandoahScanObjectStack* oop_stack) :
1301     _bitmap(bitmap), _oop_stack(oop_stack), _heap(ShenandoahHeap::heap()),
1302     _marking_context(_heap->marking_context()) {}
1303   void do_oop(oop* p)       { do_oop_work(p); }
1304   void do_oop(narrowOop* p) { do_oop_work(p); }
1305 };
1306 
1307 /*
1308  * This is public API, used in preparation of object_iterate().
1309  * Since we don't do linear scan of heap in object_iterate() (see comment below), we don't
1310  * need to make the heap parsable. For Shenandoah-internal linear heap scans that we can
1311  * control, we call SH::tlabs_retire, SH::gclabs_retire.
1312  */
1313 void ShenandoahHeap::ensure_parsability(bool retire_tlabs) {
1314   // No-op.
1315 }
1316 
1317 /*
1318  * Iterates objects in the heap. This is public API, used for, e.g., heap dumping.
1319  *
1320  * We cannot safely iterate objects by doing a linear scan at random points in time. Linear
1321  * scanning needs to deal with dead objects, which may have dead Klass* pointers (e.g.
1322  * calling oopDesc::size() would crash) or dangling reference fields (crashes) etc. Linear
1323  * scanning therefore depends on having a valid marking bitmap to support it. However, we only
1324  * have a valid marking bitmap after successful marking. In particular, we *don't* have a valid
1325  * marking bitmap during marking, after aborted marking or during/after cleanup (when we just
1326  * wiped the bitmap in preparation for next marking).
1327  *
1328  * For all those reasons, we implement object iteration as a single marking traversal, reporting
1329  * objects as we mark+traverse through the heap, starting from GC roots. JVMTI IterateThroughHeap
1330  * is allowed to report dead objects, but is not required to do so.
1331  */
1332 void ShenandoahHeap::object_iterate(ObjectClosure* cl) {
1333   // Reset bitmap
1334   if (!prepare_aux_bitmap_for_iteration())
1335     return;
1336 
1337   ShenandoahScanObjectStack oop_stack;
1338   ObjectIterateScanRootClosure oops(&_aux_bit_map, &oop_stack);
1339   // Seed the stack with root scan
1340   scan_roots_for_iteration(&oop_stack, &oops);
1341 
1342   // Work through the oop stack to traverse heap
1343   while (! oop_stack.is_empty()) {
1344     oop obj = oop_stack.pop();
1345     assert(oopDesc::is_oop(obj), "must be a valid oop");
1346     cl->do_object(obj);
1347     obj->oop_iterate(&oops);
1348   }
1349 
1350   assert(oop_stack.is_empty(), "should be empty");
1351   // Reclaim bitmap
1352   reclaim_aux_bitmap_for_iteration();
1353 }
1354 
1355 bool ShenandoahHeap::prepare_aux_bitmap_for_iteration() {
1356   assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1357 
1358   if (!_aux_bitmap_region_special && !os::commit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size(), false)) {
1359     log_warning(gc)("Could not commit native memory for auxiliary marking bitmap for heap iteration");
1360     return false;
1361   }
1362   // Reset bitmap
1363   _aux_bit_map.clear();
1364   return true;
1365 }
1366 
1367 void ShenandoahHeap::scan_roots_for_iteration(ShenandoahScanObjectStack* oop_stack, ObjectIterateScanRootClosure* oops) {
1368   // Process GC roots according to current GC cycle
1369   // This populates the work stack with initial objects
1370   // It is important to relinquish the associated locks before diving
1371   // into heap dumper
1372   uint n_workers = safepoint_workers() != nullptr ? safepoint_workers()->active_workers() : 1;
1373   ShenandoahHeapIterationRootScanner rp(n_workers);
1374   rp.roots_do(oops);
1375 }
1376 
1377 void ShenandoahHeap::reclaim_aux_bitmap_for_iteration() {
1378   if (!_aux_bitmap_region_special && !os::uncommit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size())) {
1379     log_warning(gc)("Could not uncommit native memory for auxiliary marking bitmap for heap iteration");
1380   }
1381 }
1382 
1383 // Closure for parallelly iterate objects
1384 class ShenandoahObjectIterateParScanClosure : public BasicOopIterateClosure {
1385 private:
1386   MarkBitMap* _bitmap;
1387   ShenandoahObjToScanQueue* _queue;
1388   ShenandoahHeap* const _heap;
1389   ShenandoahMarkingContext* const _marking_context;
1390 
1391   template <class T>
1392   void do_oop_work(T* p) {
1393     T o = RawAccess<>::oop_load(p);
1394     if (!CompressedOops::is_null(o)) {
1395       oop obj = CompressedOops::decode_not_null(o);
1396       if (_heap->is_concurrent_weak_root_in_progress() && !_marking_context->is_marked(obj)) {
1397         // There may be dead oops in weak roots in concurrent root phase, do not touch them.
1398         return;
1399       }
1400       obj = ShenandoahBarrierSet::barrier_set()->load_reference_barrier(obj);
1401 
1402       assert(oopDesc::is_oop(obj), "Must be a valid oop");
1403       if (_bitmap->par_mark(obj)) {
1404         _queue->push(ShenandoahMarkTask(obj));
1405       }
1406     }
1407   }
1408 public:
1409   ShenandoahObjectIterateParScanClosure(MarkBitMap* bitmap, ShenandoahObjToScanQueue* q) :
1410     _bitmap(bitmap), _queue(q), _heap(ShenandoahHeap::heap()),
1411     _marking_context(_heap->marking_context()) {}
1412   void do_oop(oop* p)       { do_oop_work(p); }
1413   void do_oop(narrowOop* p) { do_oop_work(p); }
1414 };
1415 
1416 // Object iterator for parallel heap iteraion.
1417 // The root scanning phase happenes in construction as a preparation of
1418 // parallel marking queues.
1419 // Every worker processes it's own marking queue. work-stealing is used
1420 // to balance workload.
1421 class ShenandoahParallelObjectIterator : public ParallelObjectIteratorImpl {
1422 private:
1423   uint                         _num_workers;
1424   bool                         _init_ready;
1425   MarkBitMap*                  _aux_bit_map;
1426   ShenandoahHeap*              _heap;
1427   ShenandoahScanObjectStack    _roots_stack; // global roots stack
1428   ShenandoahObjToScanQueueSet* _task_queues;
1429 public:
1430   ShenandoahParallelObjectIterator(uint num_workers, MarkBitMap* bitmap) :
1431         _num_workers(num_workers),
1432         _init_ready(false),
1433         _aux_bit_map(bitmap),
1434         _heap(ShenandoahHeap::heap()) {
1435     // Initialize bitmap
1436     _init_ready = _heap->prepare_aux_bitmap_for_iteration();
1437     if (!_init_ready) {
1438       return;
1439     }
1440 
1441     ObjectIterateScanRootClosure oops(_aux_bit_map, &_roots_stack);
1442     _heap->scan_roots_for_iteration(&_roots_stack, &oops);
1443 
1444     _init_ready = prepare_worker_queues();
1445   }
1446 
1447   ~ShenandoahParallelObjectIterator() {
1448     // Reclaim bitmap
1449     _heap->reclaim_aux_bitmap_for_iteration();
1450     // Reclaim queue for workers
1451     if (_task_queues!= nullptr) {
1452       for (uint i = 0; i < _num_workers; ++i) {
1453         ShenandoahObjToScanQueue* q = _task_queues->queue(i);
1454         if (q != nullptr) {
1455           delete q;
1456           _task_queues->register_queue(i, nullptr);
1457         }
1458       }
1459       delete _task_queues;
1460       _task_queues = nullptr;
1461     }
1462   }
1463 
1464   virtual void object_iterate(ObjectClosure* cl, uint worker_id) {
1465     if (_init_ready) {
1466       object_iterate_parallel(cl, worker_id, _task_queues);
1467     }
1468   }
1469 
1470 private:
1471   // Divide global root_stack into worker queues
1472   bool prepare_worker_queues() {
1473     _task_queues = new ShenandoahObjToScanQueueSet((int) _num_workers);
1474     // Initialize queues for every workers
1475     for (uint i = 0; i < _num_workers; ++i) {
1476       ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue();
1477       _task_queues->register_queue(i, task_queue);
1478     }
1479     // Divide roots among the workers. Assume that object referencing distribution
1480     // is related with root kind, use round-robin to make every worker have same chance
1481     // to process every kind of roots
1482     size_t roots_num = _roots_stack.size();
1483     if (roots_num == 0) {
1484       // No work to do
1485       return false;
1486     }
1487 
1488     for (uint j = 0; j < roots_num; j++) {
1489       uint stack_id = j % _num_workers;
1490       oop obj = _roots_stack.pop();
1491       _task_queues->queue(stack_id)->push(ShenandoahMarkTask(obj));
1492     }
1493     return true;
1494   }
1495 
1496   void object_iterate_parallel(ObjectClosure* cl,
1497                                uint worker_id,
1498                                ShenandoahObjToScanQueueSet* queue_set) {
1499     assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1500     assert(queue_set != nullptr, "task queue must not be null");
1501 
1502     ShenandoahObjToScanQueue* q = queue_set->queue(worker_id);
1503     assert(q != nullptr, "object iterate queue must not be null");
1504 
1505     ShenandoahMarkTask t;
1506     ShenandoahObjectIterateParScanClosure oops(_aux_bit_map, q);
1507 
1508     // Work through the queue to traverse heap.
1509     // Steal when there is no task in queue.
1510     while (q->pop(t) || queue_set->steal(worker_id, t)) {
1511       oop obj = t.obj();
1512       assert(oopDesc::is_oop(obj), "must be a valid oop");
1513       cl->do_object(obj);
1514       obj->oop_iterate(&oops);
1515     }
1516     assert(q->is_empty(), "should be empty");
1517   }
1518 };
1519 
1520 ParallelObjectIteratorImpl* ShenandoahHeap::parallel_object_iterator(uint workers) {
1521   return new ShenandoahParallelObjectIterator(workers, &_aux_bit_map);
1522 }
1523 
1524 // Keep alive an object that was loaded with AS_NO_KEEPALIVE.
1525 void ShenandoahHeap::keep_alive(oop obj) {
1526   if (is_concurrent_mark_in_progress() && (obj != nullptr)) {
1527     ShenandoahBarrierSet::barrier_set()->enqueue(obj);
1528   }
1529 }
1530 
1531 void ShenandoahHeap::heap_region_iterate(ShenandoahHeapRegionClosure* blk) const {
1532   for (size_t i = 0; i < num_regions(); i++) {
1533     ShenandoahHeapRegion* current = get_region(i);
1534     blk->heap_region_do(current);
1535   }
1536 }
1537 
1538 class ShenandoahParallelHeapRegionTask : public WorkerTask {
1539 private:
1540   ShenandoahHeap* const _heap;
1541   ShenandoahHeapRegionClosure* const _blk;
1542 
1543   shenandoah_padding(0);
1544   volatile size_t _index;
1545   shenandoah_padding(1);
1546 
1547 public:
1548   ShenandoahParallelHeapRegionTask(ShenandoahHeapRegionClosure* blk) :
1549           WorkerTask("Shenandoah Parallel Region Operation"),
1550           _heap(ShenandoahHeap::heap()), _blk(blk), _index(0) {}
1551 
1552   void work(uint worker_id) {
1553     ShenandoahParallelWorkerSession worker_session(worker_id);
1554     size_t stride = ShenandoahParallelRegionStride;
1555 
1556     size_t max = _heap->num_regions();
1557     while (Atomic::load(&_index) < max) {
1558       size_t cur = Atomic::fetch_then_add(&_index, stride, memory_order_relaxed);
1559       size_t start = cur;
1560       size_t end = MIN2(cur + stride, max);
1561       if (start >= max) break;
1562 
1563       for (size_t i = cur; i < end; i++) {
1564         ShenandoahHeapRegion* current = _heap->get_region(i);
1565         _blk->heap_region_do(current);
1566       }
1567     }
1568   }
1569 };
1570 
1571 void ShenandoahHeap::parallel_heap_region_iterate(ShenandoahHeapRegionClosure* blk) const {
1572   assert(blk->is_thread_safe(), "Only thread-safe closures here");
1573   if (num_regions() > ShenandoahParallelRegionStride) {
1574     ShenandoahParallelHeapRegionTask task(blk);
1575     workers()->run_task(&task);
1576   } else {
1577     heap_region_iterate(blk);
1578   }
1579 }
1580 
1581 class ShenandoahInitMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
1582 private:
1583   ShenandoahMarkingContext* const _ctx;
1584 public:
1585   ShenandoahInitMarkUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
1586 
1587   void heap_region_do(ShenandoahHeapRegion* r) {
1588     assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index());
1589     if (r->is_active()) {
1590       // Check if region needs updating its TAMS. We have updated it already during concurrent
1591       // reset, so it is very likely we don't need to do another write here.
1592       if (_ctx->top_at_mark_start(r) != r->top()) {
1593         _ctx->capture_top_at_mark_start(r);
1594       }
1595     } else {
1596       assert(_ctx->top_at_mark_start(r) == r->top(),
1597              "Region " SIZE_FORMAT " should already have correct TAMS", r->index());
1598     }
1599   }
1600 
1601   bool is_thread_safe() { return true; }
1602 };
1603 
1604 class ShenandoahRendezvousClosure : public HandshakeClosure {
1605 public:
1606   inline ShenandoahRendezvousClosure() : HandshakeClosure("ShenandoahRendezvous") {}
1607   inline void do_thread(Thread* thread) {}
1608 };
1609 
1610 void ShenandoahHeap::rendezvous_threads() {
1611   ShenandoahRendezvousClosure cl;
1612   Handshake::execute(&cl);
1613 }
1614 
1615 void ShenandoahHeap::recycle_trash() {
1616   free_set()->recycle_trash();
1617 }
1618 
1619 class ShenandoahResetUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
1620 private:
1621   ShenandoahMarkingContext* const _ctx;
1622 public:
1623   ShenandoahResetUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
1624 
1625   void heap_region_do(ShenandoahHeapRegion* r) {
1626     if (r->is_active()) {
1627       // Reset live data and set TAMS optimistically. We would recheck these under the pause
1628       // anyway to capture any updates that happened since now.
1629       r->clear_live_data();
1630       _ctx->capture_top_at_mark_start(r);
1631     }
1632   }
1633 
1634   bool is_thread_safe() { return true; }
1635 };
1636 
1637 void ShenandoahHeap::prepare_gc() {
1638   reset_mark_bitmap();
1639 
1640   ShenandoahResetUpdateRegionStateClosure cl;
1641   parallel_heap_region_iterate(&cl);
1642 }
1643 
1644 class ShenandoahFinalMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
1645 private:
1646   ShenandoahMarkingContext* const _ctx;
1647   ShenandoahHeapLock* const _lock;
1648 
1649 public:
1650   ShenandoahFinalMarkUpdateRegionStateClosure() :
1651     _ctx(ShenandoahHeap::heap()->complete_marking_context()), _lock(ShenandoahHeap::heap()->lock()) {}
1652 
1653   void heap_region_do(ShenandoahHeapRegion* r) {
1654     if (r->is_active()) {
1655       // All allocations past TAMS are implicitly live, adjust the region data.
1656       // Bitmaps/TAMS are swapped at this point, so we need to poll complete bitmap.
1657       HeapWord *tams = _ctx->top_at_mark_start(r);
1658       HeapWord *top = r->top();
1659       if (top > tams) {
1660         r->increase_live_data_alloc_words(pointer_delta(top, tams));
1661       }
1662 
1663       // We are about to select the collection set, make sure it knows about
1664       // current pinning status. Also, this allows trashing more regions that
1665       // now have their pinning status dropped.
1666       if (r->is_pinned()) {
1667         if (r->pin_count() == 0) {
1668           ShenandoahHeapLocker locker(_lock);
1669           r->make_unpinned();
1670         }
1671       } else {
1672         if (r->pin_count() > 0) {
1673           ShenandoahHeapLocker locker(_lock);
1674           r->make_pinned();
1675         }
1676       }
1677 
1678       // Remember limit for updating refs. It's guaranteed that we get no
1679       // from-space-refs written from here on.
1680       r->set_update_watermark_at_safepoint(r->top());
1681     } else {
1682       assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index());
1683       assert(_ctx->top_at_mark_start(r) == r->top(),
1684              "Region " SIZE_FORMAT " should have correct TAMS", r->index());
1685     }
1686   }
1687 
1688   bool is_thread_safe() { return true; }
1689 };
1690 
1691 void ShenandoahHeap::prepare_regions_and_collection_set(bool concurrent) {
1692   assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
1693   {
1694     ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::final_update_region_states :
1695                                          ShenandoahPhaseTimings::degen_gc_final_update_region_states);
1696     ShenandoahFinalMarkUpdateRegionStateClosure cl;
1697     parallel_heap_region_iterate(&cl);
1698 
1699     assert_pinned_region_status();
1700   }
1701 
1702   {
1703     ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::choose_cset :
1704                                          ShenandoahPhaseTimings::degen_gc_choose_cset);
1705     ShenandoahHeapLocker locker(lock());
1706     _collection_set->clear();
1707     heuristics()->choose_collection_set(_collection_set);
1708   }
1709 
1710   {
1711     ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::final_rebuild_freeset :
1712                                          ShenandoahPhaseTimings::degen_gc_final_rebuild_freeset);
1713     ShenandoahHeapLocker locker(lock());
1714     _free_set->rebuild();
1715   }
1716 }
1717 
1718 void ShenandoahHeap::do_class_unloading() {
1719   _unloader.unload();
1720 }
1721 
1722 void ShenandoahHeap::stw_weak_refs(bool full_gc) {
1723   // Weak refs processing
1724   ShenandoahPhaseTimings::Phase phase = full_gc ? ShenandoahPhaseTimings::full_gc_weakrefs
1725                                                 : ShenandoahPhaseTimings::degen_gc_weakrefs;
1726   ShenandoahTimingsTracker t(phase);
1727   ShenandoahGCWorkerPhase worker_phase(phase);
1728   ref_processor()->process_references(phase, workers(), false /* concurrent */);
1729 }
1730 
1731 void ShenandoahHeap::prepare_update_heap_references(bool concurrent) {
1732   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
1733 
1734   // Evacuation is over, no GCLABs are needed anymore. GCLABs are under URWM, so we need to
1735   // make them parsable for update code to work correctly. Plus, we can compute new sizes
1736   // for future GCLABs here.
1737   if (UseTLAB) {
1738     ShenandoahGCPhase phase(concurrent ?
1739                             ShenandoahPhaseTimings::init_update_refs_manage_gclabs :
1740                             ShenandoahPhaseTimings::degen_gc_init_update_refs_manage_gclabs);
1741     gclabs_retire(ResizeTLAB);
1742   }
1743 
1744   _update_refs_iterator.reset();
1745 }
1746 
1747 void ShenandoahHeap::set_gc_state_all_threads(char state) {
1748   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1749     ShenandoahThreadLocalData::set_gc_state(t, state);
1750   }
1751 }
1752 
1753 void ShenandoahHeap::set_gc_state_mask(uint mask, bool value) {
1754   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should really be Shenandoah safepoint");
1755   _gc_state.set_cond(mask, value);
1756   set_gc_state_all_threads(_gc_state.raw_value());
1757 }
1758 
1759 void ShenandoahHeap::set_concurrent_mark_in_progress(bool in_progress) {
1760   assert(!has_forwarded_objects(), "Not expected before/after mark phase");
1761   set_gc_state_mask(MARKING, in_progress);
1762   ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
1763 }
1764 
1765 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
1766   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only call this at safepoint");
1767   set_gc_state_mask(EVACUATION, in_progress);
1768 }
1769 
1770 void ShenandoahHeap::set_concurrent_strong_root_in_progress(bool in_progress) {
1771   if (in_progress) {
1772     _concurrent_strong_root_in_progress.set();
1773   } else {
1774     _concurrent_strong_root_in_progress.unset();
1775   }
1776 }
1777 
1778 void ShenandoahHeap::set_concurrent_weak_root_in_progress(bool cond) {
1779   set_gc_state_mask(WEAK_ROOTS, cond);
1780 }
1781 
1782 GCTracer* ShenandoahHeap::tracer() {
1783   return shenandoah_policy()->tracer();
1784 }
1785 
1786 size_t ShenandoahHeap::tlab_used(Thread* thread) const {
1787   return _free_set->used();
1788 }
1789 
1790 bool ShenandoahHeap::try_cancel_gc() {
1791   jbyte prev = _cancelled_gc.cmpxchg(CANCELLED, CANCELLABLE);
1792   return prev == CANCELLABLE;
1793 }
1794 
1795 void ShenandoahHeap::cancel_gc(GCCause::Cause cause) {
1796   if (try_cancel_gc()) {
1797     FormatBuffer<> msg("Cancelling GC: %s", GCCause::to_string(cause));
1798     log_info(gc)("%s", msg.buffer());
1799     Events::log(Thread::current(), "%s", msg.buffer());
1800   }
1801 }
1802 
1803 uint ShenandoahHeap::max_workers() {
1804   return _max_workers;
1805 }
1806 
1807 void ShenandoahHeap::stop() {
1808   // The shutdown sequence should be able to terminate when GC is running.
1809 
1810   // Step 0. Notify policy to disable event recording.
1811   _shenandoah_policy->record_shutdown();
1812 
1813   // Step 1. Notify control thread that we are in shutdown.
1814   // Note that we cannot do that with stop(), because stop() is blocking and waits for the actual shutdown.
1815   // Doing stop() here would wait for the normal GC cycle to complete, never falling through to cancel below.
1816   control_thread()->prepare_for_graceful_shutdown();
1817 
1818   // Step 2. Notify GC workers that we are cancelling GC.
1819   cancel_gc(GCCause::_shenandoah_stop_vm);
1820 
1821   // Step 3. Wait until GC worker exits normally.
1822   control_thread()->stop();
1823 }
1824 
1825 void ShenandoahHeap::stw_unload_classes(bool full_gc) {
1826   if (!unload_classes()) return;
1827   // Unload classes and purge SystemDictionary.
1828   {
1829     ShenandoahPhaseTimings::Phase phase = full_gc ?
1830                                           ShenandoahPhaseTimings::full_gc_purge_class_unload :
1831                                           ShenandoahPhaseTimings::degen_gc_purge_class_unload;
1832     ShenandoahIsAliveSelector is_alive;
1833     {
1834       CodeCache::UnlinkingScope scope(is_alive.is_alive_closure());
1835       ShenandoahGCPhase gc_phase(phase);
1836       ShenandoahGCWorkerPhase worker_phase(phase);
1837       bool unloading_occurred = SystemDictionary::do_unloading(gc_timer());
1838 
1839       uint num_workers = _workers->active_workers();
1840       ShenandoahClassUnloadingTask unlink_task(phase, num_workers, unloading_occurred);
1841       _workers->run_task(&unlink_task);
1842     }
1843     // Release unloaded nmethods's memory.
1844     CodeCache::flush_unlinked_nmethods();
1845   }
1846 
1847   {
1848     ShenandoahGCPhase phase(full_gc ?
1849                             ShenandoahPhaseTimings::full_gc_purge_cldg :
1850                             ShenandoahPhaseTimings::degen_gc_purge_cldg);
1851     ClassLoaderDataGraph::purge(/*at_safepoint*/true);
1852   }
1853   // Resize and verify metaspace
1854   MetaspaceGC::compute_new_size();
1855   DEBUG_ONLY(MetaspaceUtils::verify();)
1856 }
1857 
1858 // Weak roots are either pre-evacuated (final mark) or updated (final updaterefs),
1859 // so they should not have forwarded oops.
1860 // However, we do need to "null" dead oops in the roots, if can not be done
1861 // in concurrent cycles.
1862 void ShenandoahHeap::stw_process_weak_roots(bool full_gc) {
1863   uint num_workers = _workers->active_workers();
1864   ShenandoahPhaseTimings::Phase timing_phase = full_gc ?
1865                                                ShenandoahPhaseTimings::full_gc_purge_weak_par :
1866                                                ShenandoahPhaseTimings::degen_gc_purge_weak_par;
1867   ShenandoahGCPhase phase(timing_phase);
1868   ShenandoahGCWorkerPhase worker_phase(timing_phase);
1869   // Cleanup weak roots
1870   if (has_forwarded_objects()) {
1871     ShenandoahForwardedIsAliveClosure is_alive;
1872     ShenandoahUpdateRefsClosure keep_alive;
1873     ShenandoahParallelWeakRootsCleaningTask<ShenandoahForwardedIsAliveClosure, ShenandoahUpdateRefsClosure>
1874       cleaning_task(timing_phase, &is_alive, &keep_alive, num_workers);
1875     _workers->run_task(&cleaning_task);
1876   } else {
1877     ShenandoahIsAliveClosure is_alive;
1878 #ifdef ASSERT
1879     ShenandoahAssertNotForwardedClosure verify_cl;
1880     ShenandoahParallelWeakRootsCleaningTask<ShenandoahIsAliveClosure, ShenandoahAssertNotForwardedClosure>
1881       cleaning_task(timing_phase, &is_alive, &verify_cl, num_workers);
1882 #else
1883     ShenandoahParallelWeakRootsCleaningTask<ShenandoahIsAliveClosure, DoNothingClosure>
1884       cleaning_task(timing_phase, &is_alive, &do_nothing_cl, num_workers);
1885 #endif
1886     _workers->run_task(&cleaning_task);
1887   }
1888 }
1889 
1890 void ShenandoahHeap::parallel_cleaning(bool full_gc) {
1891   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
1892   assert(is_stw_gc_in_progress(), "Only for Degenerated and Full GC");
1893   ShenandoahGCPhase phase(full_gc ?
1894                           ShenandoahPhaseTimings::full_gc_purge :
1895                           ShenandoahPhaseTimings::degen_gc_purge);
1896   stw_weak_refs(full_gc);
1897   stw_process_weak_roots(full_gc);
1898   stw_unload_classes(full_gc);
1899 }
1900 
1901 void ShenandoahHeap::set_has_forwarded_objects(bool cond) {
1902   set_gc_state_mask(HAS_FORWARDED, cond);
1903 }
1904 
1905 void ShenandoahHeap::set_unload_classes(bool uc) {
1906   _unload_classes.set_cond(uc);
1907 }
1908 
1909 bool ShenandoahHeap::unload_classes() const {
1910   return _unload_classes.is_set();
1911 }
1912 
1913 address ShenandoahHeap::in_cset_fast_test_addr() {
1914   ShenandoahHeap* heap = ShenandoahHeap::heap();
1915   assert(heap->collection_set() != nullptr, "Sanity");
1916   return (address) heap->collection_set()->biased_map_address();
1917 }
1918 
1919 size_t ShenandoahHeap::bytes_allocated_since_gc_start() const {
1920   return Atomic::load(&_bytes_allocated_since_gc_start);
1921 }
1922 
1923 void ShenandoahHeap::reset_bytes_allocated_since_gc_start() {
1924   Atomic::store(&_bytes_allocated_since_gc_start, (size_t)0);
1925 }
1926 
1927 void ShenandoahHeap::set_degenerated_gc_in_progress(bool in_progress) {
1928   _degenerated_gc_in_progress.set_cond(in_progress);
1929 }
1930 
1931 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) {
1932   _full_gc_in_progress.set_cond(in_progress);
1933 }
1934 
1935 void ShenandoahHeap::set_full_gc_move_in_progress(bool in_progress) {
1936   assert (is_full_gc_in_progress(), "should be");
1937   _full_gc_move_in_progress.set_cond(in_progress);
1938 }
1939 
1940 void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) {
1941   set_gc_state_mask(UPDATEREFS, in_progress);
1942 }
1943 
1944 void ShenandoahHeap::register_nmethod(nmethod* nm) {
1945   ShenandoahCodeRoots::register_nmethod(nm);
1946 }
1947 
1948 void ShenandoahHeap::unregister_nmethod(nmethod* nm) {
1949   ShenandoahCodeRoots::unregister_nmethod(nm);
1950 }
1951 
1952 void ShenandoahHeap::pin_object(JavaThread* thr, oop o) {
1953   heap_region_containing(o)->record_pin();
1954 }
1955 
1956 void ShenandoahHeap::unpin_object(JavaThread* thr, oop o) {
1957   ShenandoahHeapRegion* r = heap_region_containing(o);
1958   assert(r != nullptr, "Sanity");
1959   assert(r->pin_count() > 0, "Region " SIZE_FORMAT " should have non-zero pins", r->index());
1960   r->record_unpin();
1961 }
1962 
1963 void ShenandoahHeap::sync_pinned_region_status() {
1964   ShenandoahHeapLocker locker(lock());
1965 
1966   for (size_t i = 0; i < num_regions(); i++) {
1967     ShenandoahHeapRegion *r = get_region(i);
1968     if (r->is_active()) {
1969       if (r->is_pinned()) {
1970         if (r->pin_count() == 0) {
1971           r->make_unpinned();
1972         }
1973       } else {
1974         if (r->pin_count() > 0) {
1975           r->make_pinned();
1976         }
1977       }
1978     }
1979   }
1980 
1981   assert_pinned_region_status();
1982 }
1983 
1984 #ifdef ASSERT
1985 void ShenandoahHeap::assert_pinned_region_status() {
1986   for (size_t i = 0; i < num_regions(); i++) {
1987     ShenandoahHeapRegion* r = get_region(i);
1988     assert((r->is_pinned() && r->pin_count() > 0) || (!r->is_pinned() && r->pin_count() == 0),
1989            "Region " SIZE_FORMAT " pinning status is inconsistent", i);
1990   }
1991 }
1992 #endif
1993 
1994 ConcurrentGCTimer* ShenandoahHeap::gc_timer() const {
1995   return _gc_timer;
1996 }
1997 
1998 void ShenandoahHeap::prepare_concurrent_roots() {
1999   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2000   assert(!is_stw_gc_in_progress(), "Only concurrent GC");
2001   set_concurrent_strong_root_in_progress(!collection_set()->is_empty());
2002   set_concurrent_weak_root_in_progress(true);
2003   if (unload_classes()) {
2004     _unloader.prepare();
2005   }
2006 }
2007 
2008 void ShenandoahHeap::finish_concurrent_roots() {
2009   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2010   assert(!is_stw_gc_in_progress(), "Only concurrent GC");
2011   if (unload_classes()) {
2012     _unloader.finish();
2013   }
2014 }
2015 
2016 #ifdef ASSERT
2017 void ShenandoahHeap::assert_gc_workers(uint nworkers) {
2018   assert(nworkers > 0 && nworkers <= max_workers(), "Sanity");
2019 
2020   if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
2021     if (UseDynamicNumberOfGCThreads) {
2022       assert(nworkers <= ParallelGCThreads, "Cannot use more than it has");
2023     } else {
2024       // Use ParallelGCThreads inside safepoints
2025       assert(nworkers == ParallelGCThreads, "Use ParallelGCThreads within safepoints");
2026     }
2027   } else {
2028     if (UseDynamicNumberOfGCThreads) {
2029       assert(nworkers <= ConcGCThreads, "Cannot use more than it has");
2030     } else {
2031       // Use ConcGCThreads outside safepoints
2032       assert(nworkers == ConcGCThreads, "Use ConcGCThreads outside safepoints");
2033     }
2034   }
2035 }
2036 #endif
2037 
2038 ShenandoahVerifier* ShenandoahHeap::verifier() {
2039   guarantee(ShenandoahVerify, "Should be enabled");
2040   assert (_verifier != nullptr, "sanity");
2041   return _verifier;
2042 }
2043 
2044 template<bool CONCURRENT>
2045 class ShenandoahUpdateHeapRefsTask : public WorkerTask {
2046 private:
2047   ShenandoahHeap* _heap;
2048   ShenandoahRegionIterator* _regions;
2049 public:
2050   ShenandoahUpdateHeapRefsTask(ShenandoahRegionIterator* regions) :
2051     WorkerTask("Shenandoah Update References"),
2052     _heap(ShenandoahHeap::heap()),
2053     _regions(regions) {
2054   }
2055 
2056   void work(uint worker_id) {
2057     if (CONCURRENT) {
2058       ShenandoahConcurrentWorkerSession worker_session(worker_id);
2059       ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
2060       do_work<ShenandoahConcUpdateRefsClosure>();
2061     } else {
2062       ShenandoahParallelWorkerSession worker_session(worker_id);
2063       do_work<ShenandoahSTWUpdateRefsClosure>();
2064     }
2065   }
2066 
2067 private:
2068   template<class T>
2069   void do_work() {
2070     T cl;
2071     ShenandoahHeapRegion* r = _regions->next();
2072     ShenandoahMarkingContext* const ctx = _heap->complete_marking_context();
2073     while (r != nullptr) {
2074       HeapWord* update_watermark = r->get_update_watermark();
2075       assert (update_watermark >= r->bottom(), "sanity");
2076       if (r->is_active() && !r->is_cset()) {
2077         _heap->marked_object_oop_iterate(r, &cl, update_watermark);
2078       }
2079       if (ShenandoahPacing) {
2080         _heap->pacer()->report_updaterefs(pointer_delta(update_watermark, r->bottom()));
2081       }
2082       if (_heap->check_cancelled_gc_and_yield(CONCURRENT)) {
2083         return;
2084       }
2085       r = _regions->next();
2086     }
2087   }
2088 };
2089 
2090 void ShenandoahHeap::update_heap_references(bool concurrent) {
2091   assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
2092 
2093   if (concurrent) {
2094     ShenandoahUpdateHeapRefsTask<true> task(&_update_refs_iterator);
2095     workers()->run_task(&task);
2096   } else {
2097     ShenandoahUpdateHeapRefsTask<false> task(&_update_refs_iterator);
2098     workers()->run_task(&task);
2099   }
2100 }
2101 
2102 
2103 class ShenandoahFinalUpdateRefsUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
2104 private:
2105   ShenandoahHeapLock* const _lock;
2106 
2107 public:
2108   ShenandoahFinalUpdateRefsUpdateRegionStateClosure() : _lock(ShenandoahHeap::heap()->lock()) {}
2109 
2110   void heap_region_do(ShenandoahHeapRegion* r) {
2111     // Drop unnecessary "pinned" state from regions that does not have CP marks
2112     // anymore, as this would allow trashing them.
2113 
2114     if (r->is_active()) {
2115       if (r->is_pinned()) {
2116         if (r->pin_count() == 0) {
2117           ShenandoahHeapLocker locker(_lock);
2118           r->make_unpinned();
2119         }
2120       } else {
2121         if (r->pin_count() > 0) {
2122           ShenandoahHeapLocker locker(_lock);
2123           r->make_pinned();
2124         }
2125       }
2126     }
2127   }
2128 
2129   bool is_thread_safe() { return true; }
2130 };
2131 
2132 void ShenandoahHeap::update_heap_region_states(bool concurrent) {
2133   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2134   assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
2135 
2136   {
2137     ShenandoahGCPhase phase(concurrent ?
2138                             ShenandoahPhaseTimings::final_update_refs_update_region_states :
2139                             ShenandoahPhaseTimings::degen_gc_final_update_refs_update_region_states);
2140     ShenandoahFinalUpdateRefsUpdateRegionStateClosure cl;
2141     parallel_heap_region_iterate(&cl);
2142 
2143     assert_pinned_region_status();
2144   }
2145 
2146   {
2147     ShenandoahGCPhase phase(concurrent ?
2148                             ShenandoahPhaseTimings::final_update_refs_trash_cset :
2149                             ShenandoahPhaseTimings::degen_gc_final_update_refs_trash_cset);
2150     trash_cset_regions();
2151   }
2152 }
2153 
2154 void ShenandoahHeap::rebuild_free_set(bool concurrent) {
2155   {
2156     ShenandoahGCPhase phase(concurrent ?
2157                             ShenandoahPhaseTimings::final_update_refs_rebuild_freeset :
2158                             ShenandoahPhaseTimings::degen_gc_final_update_refs_rebuild_freeset);
2159     ShenandoahHeapLocker locker(lock());
2160     _free_set->rebuild();
2161   }
2162 }
2163 
2164 void ShenandoahHeap::print_extended_on(outputStream *st) const {
2165   print_on(st);
2166   st->cr();
2167   print_heap_regions_on(st);
2168 }
2169 
2170 bool ShenandoahHeap::is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self) {
2171   size_t slice = r->index() / _bitmap_regions_per_slice;
2172 
2173   size_t regions_from = _bitmap_regions_per_slice * slice;
2174   size_t regions_to   = MIN2(num_regions(), _bitmap_regions_per_slice * (slice + 1));
2175   for (size_t g = regions_from; g < regions_to; g++) {
2176     assert (g / _bitmap_regions_per_slice == slice, "same slice");
2177     if (skip_self && g == r->index()) continue;
2178     if (get_region(g)->is_committed()) {
2179       return true;
2180     }
2181   }
2182   return false;
2183 }
2184 
2185 bool ShenandoahHeap::commit_bitmap_slice(ShenandoahHeapRegion* r) {
2186   shenandoah_assert_heaplocked();
2187 
2188   // Bitmaps in special regions do not need commits
2189   if (_bitmap_region_special) {
2190     return true;
2191   }
2192 
2193   if (is_bitmap_slice_committed(r, true)) {
2194     // Some other region from the group is already committed, meaning the bitmap
2195     // slice is already committed, we exit right away.
2196     return true;
2197   }
2198 
2199   // Commit the bitmap slice:
2200   size_t slice = r->index() / _bitmap_regions_per_slice;
2201   size_t off = _bitmap_bytes_per_slice * slice;
2202   size_t len = _bitmap_bytes_per_slice;
2203   char* start = (char*) _bitmap_region.start() + off;
2204 
2205   if (!os::commit_memory(start, len, false)) {
2206     return false;
2207   }
2208 
2209   if (AlwaysPreTouch) {
2210     os::pretouch_memory(start, start + len, _pretouch_bitmap_page_size);
2211   }
2212 
2213   return true;
2214 }
2215 
2216 bool ShenandoahHeap::uncommit_bitmap_slice(ShenandoahHeapRegion *r) {
2217   shenandoah_assert_heaplocked();
2218 
2219   // Bitmaps in special regions do not need uncommits
2220   if (_bitmap_region_special) {
2221     return true;
2222   }
2223 
2224   if (is_bitmap_slice_committed(r, true)) {
2225     // Some other region from the group is still committed, meaning the bitmap
2226     // slice is should stay committed, exit right away.
2227     return true;
2228   }
2229 
2230   // Uncommit the bitmap slice:
2231   size_t slice = r->index() / _bitmap_regions_per_slice;
2232   size_t off = _bitmap_bytes_per_slice * slice;
2233   size_t len = _bitmap_bytes_per_slice;
2234   if (!os::uncommit_memory((char*)_bitmap_region.start() + off, len)) {
2235     return false;
2236   }
2237   return true;
2238 }
2239 
2240 void ShenandoahHeap::safepoint_synchronize_begin() {
2241   if (ShenandoahSuspendibleWorkers) {
2242     SuspendibleThreadSet::synchronize();
2243   }
2244 }
2245 
2246 void ShenandoahHeap::safepoint_synchronize_end() {
2247   if (ShenandoahSuspendibleWorkers) {
2248     SuspendibleThreadSet::desynchronize();
2249   }
2250 }
2251 
2252 void ShenandoahHeap::entry_uncommit(double shrink_before, size_t shrink_until) {
2253   static const char *msg = "Concurrent uncommit";
2254   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_uncommit, true /* log_heap_usage */);
2255   EventMark em("%s", msg);
2256 
2257   op_uncommit(shrink_before, shrink_until);
2258 }
2259 
2260 void ShenandoahHeap::try_inject_alloc_failure() {
2261   if (ShenandoahAllocFailureALot && !cancelled_gc() && ((os::random() % 1000) > 950)) {
2262     _inject_alloc_failure.set();
2263     os::naked_short_sleep(1);
2264     if (cancelled_gc()) {
2265       log_info(gc)("Allocation failure was successfully injected");
2266     }
2267   }
2268 }
2269 
2270 bool ShenandoahHeap::should_inject_alloc_failure() {
2271   return _inject_alloc_failure.is_set() && _inject_alloc_failure.try_unset();
2272 }
2273 
2274 void ShenandoahHeap::initialize_serviceability() {
2275   _memory_pool = new ShenandoahMemoryPool(this);
2276   _cycle_memory_manager.add_pool(_memory_pool);
2277   _stw_memory_manager.add_pool(_memory_pool);
2278 }
2279 
2280 GrowableArray<GCMemoryManager*> ShenandoahHeap::memory_managers() {
2281   GrowableArray<GCMemoryManager*> memory_managers(2);
2282   memory_managers.append(&_cycle_memory_manager);
2283   memory_managers.append(&_stw_memory_manager);
2284   return memory_managers;
2285 }
2286 
2287 GrowableArray<MemoryPool*> ShenandoahHeap::memory_pools() {
2288   GrowableArray<MemoryPool*> memory_pools(1);
2289   memory_pools.append(_memory_pool);
2290   return memory_pools;
2291 }
2292 
2293 MemoryUsage ShenandoahHeap::memory_usage() {
2294   return _memory_pool->get_memory_usage();
2295 }
2296 
2297 ShenandoahRegionIterator::ShenandoahRegionIterator() :
2298   _heap(ShenandoahHeap::heap()),
2299   _index(0) {}
2300 
2301 ShenandoahRegionIterator::ShenandoahRegionIterator(ShenandoahHeap* heap) :
2302   _heap(heap),
2303   _index(0) {}
2304 
2305 void ShenandoahRegionIterator::reset() {
2306   _index = 0;
2307 }
2308 
2309 bool ShenandoahRegionIterator::has_next() const {
2310   return _index < _heap->num_regions();
2311 }
2312 
2313 char ShenandoahHeap::gc_state() const {
2314   return _gc_state.raw_value();
2315 }
2316 
2317 ShenandoahLiveData* ShenandoahHeap::get_liveness_cache(uint worker_id) {
2318 #ifdef ASSERT
2319   assert(_liveness_cache != nullptr, "sanity");
2320   assert(worker_id < _max_workers, "sanity");
2321   for (uint i = 0; i < num_regions(); i++) {
2322     assert(_liveness_cache[worker_id][i] == 0, "liveness cache should be empty");
2323   }
2324 #endif
2325   return _liveness_cache[worker_id];
2326 }
2327 
2328 void ShenandoahHeap::flush_liveness_cache(uint worker_id) {
2329   assert(worker_id < _max_workers, "sanity");
2330   assert(_liveness_cache != nullptr, "sanity");
2331   ShenandoahLiveData* ld = _liveness_cache[worker_id];
2332   for (uint i = 0; i < num_regions(); i++) {
2333     ShenandoahLiveData live = ld[i];
2334     if (live > 0) {
2335       ShenandoahHeapRegion* r = get_region(i);
2336       r->increase_live_data_gc_words(live);
2337       ld[i] = 0;
2338     }
2339   }
2340 }
2341 
2342 bool ShenandoahHeap::requires_barriers(stackChunkOop obj) const {
2343   if (is_idle()) return false;
2344 
2345   // Objects allocated after marking start are implicitly alive, don't need any barriers during
2346   // marking phase.
2347   if (is_concurrent_mark_in_progress() &&
2348      !marking_context()->allocated_after_mark_start(obj)) {
2349     return true;
2350   }
2351 
2352   // Can not guarantee obj is deeply good.
2353   if (has_forwarded_objects()) {
2354     return true;
2355   }
2356 
2357   return false;
2358 }