1 /*
   2  * Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2013, 2022, Red Hat, Inc. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "memory/allocation.hpp"
  28 #include "memory/universe.hpp"
  29 
  30 #include "gc/shared/classUnloadingContext.hpp"
  31 #include "gc/shared/gcArguments.hpp"
  32 #include "gc/shared/gcTimer.hpp"
  33 #include "gc/shared/gcTraceTime.inline.hpp"
  34 #include "gc/shared/locationPrinter.inline.hpp"
  35 #include "gc/shared/memAllocator.hpp"
  36 #include "gc/shared/plab.hpp"
  37 #include "gc/shared/tlab_globals.hpp"
  38 
  39 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  40 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
  41 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  42 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  43 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
  44 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  45 #include "gc/shenandoah/shenandoahControlThread.hpp"
  46 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  47 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  48 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  49 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
  50 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  51 #include "gc/shenandoah/shenandoahInitLogger.hpp"
  52 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  53 #include "gc/shenandoah/shenandoahMemoryPool.hpp"
  54 #include "gc/shenandoah/shenandoahMetrics.hpp"
  55 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  56 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  57 #include "gc/shenandoah/shenandoahPacer.inline.hpp"
  58 #include "gc/shenandoah/shenandoahPadding.hpp"
  59 #include "gc/shenandoah/shenandoahParallelCleaning.inline.hpp"
  60 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
  61 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
  62 #include "gc/shenandoah/shenandoahSTWMark.hpp"
  63 #include "gc/shenandoah/shenandoahUtils.hpp"
  64 #include "gc/shenandoah/shenandoahVerifier.hpp"
  65 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
  66 #include "gc/shenandoah/shenandoahVMOperations.hpp"
  67 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
  68 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  69 #include "gc/shenandoah/mode/shenandoahPassiveMode.hpp"
  70 #include "gc/shenandoah/mode/shenandoahSATBMode.hpp"
  71 #if INCLUDE_JFR
  72 #include "gc/shenandoah/shenandoahJfrSupport.hpp"
  73 #endif
  74 
  75 #include "classfile/systemDictionary.hpp"
  76 #include "code/codeCache.hpp"
  77 #include "memory/classLoaderMetaspace.hpp"
  78 #include "memory/metaspaceUtils.hpp"
  79 #include "nmt/mallocTracker.hpp"
  80 #include "nmt/memTracker.hpp"
  81 #include "oops/compressedOops.inline.hpp"
  82 #include "prims/jvmtiTagMap.hpp"
  83 #include "runtime/atomic.hpp"
  84 #include "runtime/globals.hpp"
  85 #include "runtime/interfaceSupport.inline.hpp"
  86 #include "runtime/java.hpp"
  87 #include "runtime/orderAccess.hpp"
  88 #include "runtime/safepointMechanism.hpp"
  89 #include "runtime/stackWatermarkSet.hpp"
  90 #include "runtime/vmThread.hpp"
  91 #include "utilities/events.hpp"
  92 #include "utilities/powerOfTwo.hpp"
  93 
  94 class ShenandoahPretouchHeapTask : public WorkerTask {
  95 private:
  96   ShenandoahRegionIterator _regions;
  97   const size_t _page_size;
  98 public:
  99   ShenandoahPretouchHeapTask(size_t page_size) :
 100     WorkerTask("Shenandoah Pretouch Heap"),
 101     _page_size(page_size) {}
 102 
 103   virtual void work(uint worker_id) {
 104     ShenandoahHeapRegion* r = _regions.next();
 105     while (r != nullptr) {
 106       if (r->is_committed()) {
 107         os::pretouch_memory(r->bottom(), r->end(), _page_size);
 108       }
 109       r = _regions.next();
 110     }
 111   }
 112 };
 113 
 114 class ShenandoahPretouchBitmapTask : public WorkerTask {
 115 private:
 116   ShenandoahRegionIterator _regions;
 117   char* _bitmap_base;
 118   const size_t _bitmap_size;
 119   const size_t _page_size;
 120 public:
 121   ShenandoahPretouchBitmapTask(char* bitmap_base, size_t bitmap_size, size_t page_size) :
 122     WorkerTask("Shenandoah Pretouch Bitmap"),
 123     _bitmap_base(bitmap_base),
 124     _bitmap_size(bitmap_size),
 125     _page_size(page_size) {}
 126 
 127   virtual void work(uint worker_id) {
 128     ShenandoahHeapRegion* r = _regions.next();
 129     while (r != nullptr) {
 130       size_t start = r->index()       * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
 131       size_t end   = (r->index() + 1) * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
 132       assert (end <= _bitmap_size, "end is sane: " SIZE_FORMAT " < " SIZE_FORMAT, end, _bitmap_size);
 133 
 134       if (r->is_committed()) {
 135         os::pretouch_memory(_bitmap_base + start, _bitmap_base + end, _page_size);
 136       }
 137 
 138       r = _regions.next();
 139     }
 140   }
 141 };
 142 
 143 jint ShenandoahHeap::initialize() {
 144   //
 145   // Figure out heap sizing
 146   //
 147 
 148   size_t init_byte_size = InitialHeapSize;
 149   size_t min_byte_size  = MinHeapSize;
 150   size_t max_byte_size  = MaxHeapSize;
 151   size_t heap_alignment = HeapAlignment;
 152 
 153   size_t reg_size_bytes = ShenandoahHeapRegion::region_size_bytes();
 154 
 155   Universe::check_alignment(max_byte_size,  reg_size_bytes, "Shenandoah heap");
 156   Universe::check_alignment(init_byte_size, reg_size_bytes, "Shenandoah heap");
 157 
 158   _num_regions = ShenandoahHeapRegion::region_count();
 159   assert(_num_regions == (max_byte_size / reg_size_bytes),
 160          "Regions should cover entire heap exactly: " SIZE_FORMAT " != " SIZE_FORMAT "/" SIZE_FORMAT,
 161          _num_regions, max_byte_size, reg_size_bytes);
 162 
 163   // Now we know the number of regions, initialize the heuristics.
 164   initialize_heuristics();
 165 
 166   size_t num_committed_regions = init_byte_size / reg_size_bytes;
 167   num_committed_regions = MIN2(num_committed_regions, _num_regions);
 168   assert(num_committed_regions <= _num_regions, "sanity");
 169   _initial_size = num_committed_regions * reg_size_bytes;
 170 
 171   size_t num_min_regions = min_byte_size / reg_size_bytes;
 172   num_min_regions = MIN2(num_min_regions, _num_regions);
 173   assert(num_min_regions <= _num_regions, "sanity");
 174   _minimum_size = num_min_regions * reg_size_bytes;
 175 
 176   // Default to max heap size.
 177   _soft_max_size = _num_regions * reg_size_bytes;
 178 
 179   _committed = _initial_size;
 180 
 181   size_t heap_page_size   = UseLargePages ? os::large_page_size() : os::vm_page_size();
 182   size_t bitmap_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
 183   size_t region_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
 184 
 185   //
 186   // Reserve and commit memory for heap
 187   //
 188 
 189   ReservedHeapSpace heap_rs = Universe::reserve_heap(max_byte_size, heap_alignment);
 190   initialize_reserved_region(heap_rs);
 191   _heap_region = MemRegion((HeapWord*)heap_rs.base(), heap_rs.size() / HeapWordSize);
 192   _heap_region_special = heap_rs.special();
 193 
 194   assert((((size_t) base()) & ShenandoahHeapRegion::region_size_bytes_mask()) == 0,
 195          "Misaligned heap: " PTR_FORMAT, p2i(base()));
 196   os::trace_page_sizes_for_requested_size("Heap",
 197                                           max_byte_size, heap_alignment,
 198                                           heap_rs.base(),
 199                                           heap_rs.size(), heap_rs.page_size());
 200 
 201 #if SHENANDOAH_OPTIMIZED_MARKTASK
 202   // The optimized ShenandoahMarkTask takes some bits away from the full object bits.
 203   // Fail if we ever attempt to address more than we can.
 204   if ((uintptr_t)heap_rs.end() >= ShenandoahMarkTask::max_addressable()) {
 205     FormatBuffer<512> buf("Shenandoah reserved [" PTR_FORMAT ", " PTR_FORMAT") for the heap, \n"
 206                           "but max object address is " PTR_FORMAT ". Try to reduce heap size, or try other \n"
 207                           "VM options that allocate heap at lower addresses (HeapBaseMinAddress, AllocateHeapAt, etc).",
 208                 p2i(heap_rs.base()), p2i(heap_rs.end()), ShenandoahMarkTask::max_addressable());
 209     vm_exit_during_initialization("Fatal Error", buf);
 210   }
 211 #endif
 212 
 213   ReservedSpace sh_rs = heap_rs.first_part(max_byte_size);
 214   if (!_heap_region_special) {
 215     os::commit_memory_or_exit(sh_rs.base(), _initial_size, heap_alignment, false,
 216                               "Cannot commit heap memory");
 217   }
 218 
 219   //
 220   // Reserve and commit memory for bitmap(s)
 221   //
 222 
 223   size_t bitmap_size_orig = ShenandoahMarkBitMap::compute_size(heap_rs.size());
 224   _bitmap_size = align_up(bitmap_size_orig, bitmap_page_size);
 225 
 226   size_t bitmap_bytes_per_region = reg_size_bytes / ShenandoahMarkBitMap::heap_map_factor();
 227 
 228   guarantee(bitmap_bytes_per_region != 0,
 229             "Bitmap bytes per region should not be zero");
 230   guarantee(is_power_of_2(bitmap_bytes_per_region),
 231             "Bitmap bytes per region should be power of two: " SIZE_FORMAT, bitmap_bytes_per_region);
 232 
 233   if (bitmap_page_size > bitmap_bytes_per_region) {
 234     _bitmap_regions_per_slice = bitmap_page_size / bitmap_bytes_per_region;
 235     _bitmap_bytes_per_slice = bitmap_page_size;
 236   } else {
 237     _bitmap_regions_per_slice = 1;
 238     _bitmap_bytes_per_slice = bitmap_bytes_per_region;
 239   }
 240 
 241   guarantee(_bitmap_regions_per_slice >= 1,
 242             "Should have at least one region per slice: " SIZE_FORMAT,
 243             _bitmap_regions_per_slice);
 244 
 245   guarantee(((_bitmap_bytes_per_slice) % bitmap_page_size) == 0,
 246             "Bitmap slices should be page-granular: bps = " SIZE_FORMAT ", page size = " SIZE_FORMAT,
 247             _bitmap_bytes_per_slice, bitmap_page_size);
 248 
 249   ReservedSpace bitmap(_bitmap_size, bitmap_page_size);
 250   os::trace_page_sizes_for_requested_size("Mark Bitmap",
 251                                           bitmap_size_orig, bitmap_page_size,
 252                                           bitmap.base(),
 253                                           bitmap.size(), bitmap.page_size());
 254   MemTracker::record_virtual_memory_type(bitmap.base(), mtGC);
 255   _bitmap_region = MemRegion((HeapWord*) bitmap.base(), bitmap.size() / HeapWordSize);
 256   _bitmap_region_special = bitmap.special();
 257 
 258   size_t bitmap_init_commit = _bitmap_bytes_per_slice *
 259                               align_up(num_committed_regions, _bitmap_regions_per_slice) / _bitmap_regions_per_slice;
 260   bitmap_init_commit = MIN2(_bitmap_size, bitmap_init_commit);
 261   if (!_bitmap_region_special) {
 262     os::commit_memory_or_exit((char *) _bitmap_region.start(), bitmap_init_commit, bitmap_page_size, false,
 263                               "Cannot commit bitmap memory");
 264   }
 265 
 266   _marking_context = new ShenandoahMarkingContext(_heap_region, _bitmap_region, _num_regions, _max_workers);
 267 
 268   if (ShenandoahVerify) {
 269     ReservedSpace verify_bitmap(_bitmap_size, bitmap_page_size);
 270     os::trace_page_sizes_for_requested_size("Verify Bitmap",
 271                                             bitmap_size_orig, bitmap_page_size,
 272                                             verify_bitmap.base(),
 273                                             verify_bitmap.size(), verify_bitmap.page_size());
 274     if (!verify_bitmap.special()) {
 275       os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), bitmap_page_size, false,
 276                                 "Cannot commit verification bitmap memory");
 277     }
 278     MemTracker::record_virtual_memory_type(verify_bitmap.base(), mtGC);
 279     MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize);
 280     _verification_bit_map.initialize(_heap_region, verify_bitmap_region);
 281     _verifier = new ShenandoahVerifier(this, &_verification_bit_map);
 282   }
 283 
 284   // Reserve aux bitmap for use in object_iterate(). We don't commit it here.
 285   size_t aux_bitmap_page_size = bitmap_page_size;
 286 
 287   ReservedSpace aux_bitmap(_bitmap_size, aux_bitmap_page_size);
 288   os::trace_page_sizes_for_requested_size("Aux Bitmap",
 289                                           bitmap_size_orig, aux_bitmap_page_size,
 290                                           aux_bitmap.base(),
 291                                           aux_bitmap.size(), aux_bitmap.page_size());
 292   MemTracker::record_virtual_memory_type(aux_bitmap.base(), mtGC);
 293   _aux_bitmap_region = MemRegion((HeapWord*) aux_bitmap.base(), aux_bitmap.size() / HeapWordSize);
 294   _aux_bitmap_region_special = aux_bitmap.special();
 295   _aux_bit_map.initialize(_heap_region, _aux_bitmap_region);
 296 
 297   //
 298   // Create regions and region sets
 299   //
 300   size_t region_align = align_up(sizeof(ShenandoahHeapRegion), SHENANDOAH_CACHE_LINE_SIZE);
 301   size_t region_storage_size_orig = region_align * _num_regions;
 302   size_t region_storage_size = align_up(region_storage_size_orig,
 303                                         MAX2(region_page_size, os::vm_allocation_granularity()));
 304 
 305   ReservedSpace region_storage(region_storage_size, region_page_size);
 306   os::trace_page_sizes_for_requested_size("Region Storage",
 307                                           region_storage_size_orig, region_page_size,
 308                                           region_storage.base(),
 309                                           region_storage.size(), region_storage.page_size());
 310   MemTracker::record_virtual_memory_type(region_storage.base(), mtGC);
 311   if (!region_storage.special()) {
 312     os::commit_memory_or_exit(region_storage.base(), region_storage_size, region_page_size, false,
 313                               "Cannot commit region memory");
 314   }
 315 
 316   // Try to fit the collection set bitmap at lower addresses. This optimizes code generation for cset checks.
 317   // Go up until a sensible limit (subject to encoding constraints) and try to reserve the space there.
 318   // If not successful, bite a bullet and allocate at whatever address.
 319   {
 320     const size_t cset_align = MAX2<size_t>(os::vm_page_size(), os::vm_allocation_granularity());
 321     const size_t cset_size = align_up(((size_t) sh_rs.base() + sh_rs.size()) >> ShenandoahHeapRegion::region_size_bytes_shift(), cset_align);
 322     const size_t cset_page_size = os::vm_page_size();
 323 
 324     uintptr_t min = round_up_power_of_2(cset_align);
 325     uintptr_t max = (1u << 30u);
 326     ReservedSpace cset_rs;
 327 
 328     for (uintptr_t addr = min; addr <= max; addr <<= 1u) {
 329       char* req_addr = (char*)addr;
 330       assert(is_aligned(req_addr, cset_align), "Should be aligned");
 331       cset_rs = ReservedSpace(cset_size, cset_align, cset_page_size, req_addr);
 332       if (cset_rs.is_reserved()) {
 333         assert(cset_rs.base() == req_addr, "Allocated where requested: " PTR_FORMAT ", " PTR_FORMAT, p2i(cset_rs.base()), addr);
 334         _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
 335         break;
 336       }
 337     }
 338 
 339     if (_collection_set == nullptr) {
 340       cset_rs = ReservedSpace(cset_size, cset_align, os::vm_page_size());
 341       _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
 342     }
 343     os::trace_page_sizes_for_requested_size("Collection Set",
 344                                             cset_size, cset_page_size,
 345                                             cset_rs.base(),
 346                                             cset_rs.size(), cset_rs.page_size());
 347   }
 348 
 349   _regions = NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, _num_regions, mtGC);
 350   _free_set = new ShenandoahFreeSet(this, _num_regions);
 351 
 352   {
 353     ShenandoahHeapLocker locker(lock());
 354 
 355     for (size_t i = 0; i < _num_regions; i++) {
 356       HeapWord* start = (HeapWord*)sh_rs.base() + ShenandoahHeapRegion::region_size_words() * i;
 357       bool is_committed = i < num_committed_regions;
 358       void* loc = region_storage.base() + i * region_align;
 359 
 360       ShenandoahHeapRegion* r = new (loc) ShenandoahHeapRegion(start, i, is_committed);
 361       assert(is_aligned(r, SHENANDOAH_CACHE_LINE_SIZE), "Sanity");
 362 
 363       _marking_context->initialize_top_at_mark_start(r);
 364       _regions[i] = r;
 365       assert(!collection_set()->is_in(i), "New region should not be in collection set");
 366     }
 367 
 368     // Initialize to complete
 369     _marking_context->mark_complete();
 370 
 371     _free_set->rebuild();
 372   }
 373 
 374   if (AlwaysPreTouch) {
 375     // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads,
 376     // before initialize() below zeroes it with initializing thread. For any given region,
 377     // we touch the region and the corresponding bitmaps from the same thread.
 378     ShenandoahPushWorkerScope scope(workers(), _max_workers, false);
 379 
 380     _pretouch_heap_page_size = heap_page_size;
 381     _pretouch_bitmap_page_size = bitmap_page_size;
 382 
 383     // OS memory managers may want to coalesce back-to-back pages. Make their jobs
 384     // simpler by pre-touching continuous spaces (heap and bitmap) separately.
 385 
 386     ShenandoahPretouchBitmapTask bcl(bitmap.base(), _bitmap_size, _pretouch_bitmap_page_size);
 387     _workers->run_task(&bcl);
 388 
 389     ShenandoahPretouchHeapTask hcl(_pretouch_heap_page_size);
 390     _workers->run_task(&hcl);
 391   }
 392 
 393   //
 394   // Initialize the rest of GC subsystems
 395   //
 396 
 397   _liveness_cache = NEW_C_HEAP_ARRAY(ShenandoahLiveData*, _max_workers, mtGC);
 398   for (uint worker = 0; worker < _max_workers; worker++) {
 399     _liveness_cache[worker] = NEW_C_HEAP_ARRAY(ShenandoahLiveData, _num_regions, mtGC);
 400     Copy::fill_to_bytes(_liveness_cache[worker], _num_regions * sizeof(ShenandoahLiveData));
 401   }
 402 
 403   // There should probably be Shenandoah-specific options for these,
 404   // just as there are G1-specific options.
 405   {
 406     ShenandoahSATBMarkQueueSet& satbqs = ShenandoahBarrierSet::satb_mark_queue_set();
 407     satbqs.set_process_completed_buffers_threshold(20); // G1SATBProcessCompletedThreshold
 408     satbqs.set_buffer_enqueue_threshold_percentage(60); // G1SATBBufferEnqueueingThresholdPercent
 409   }
 410 
 411   _monitoring_support = new ShenandoahMonitoringSupport(this);
 412   _phase_timings = new ShenandoahPhaseTimings(max_workers());
 413   ShenandoahCodeRoots::initialize();
 414 
 415   if (ShenandoahPacing) {
 416     _pacer = new ShenandoahPacer(this);
 417     _pacer->setup_for_idle();
 418   }
 419 
 420   _control_thread = new ShenandoahControlThread();
 421 
 422   ShenandoahInitLogger::print();
 423 
 424   return JNI_OK;
 425 }
 426 
 427 void ShenandoahHeap::initialize_mode() {
 428   if (ShenandoahGCMode != nullptr) {
 429     if (strcmp(ShenandoahGCMode, "satb") == 0) {
 430       _gc_mode = new ShenandoahSATBMode();
 431     } else if (strcmp(ShenandoahGCMode, "passive") == 0) {
 432       _gc_mode = new ShenandoahPassiveMode();
 433     } else {
 434       vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option");
 435     }
 436   } else {
 437     vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option (null)");
 438   }
 439   _gc_mode->initialize_flags();
 440   if (_gc_mode->is_diagnostic() && !UnlockDiagnosticVMOptions) {
 441     vm_exit_during_initialization(
 442             err_msg("GC mode \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
 443                     _gc_mode->name()));
 444   }
 445   if (_gc_mode->is_experimental() && !UnlockExperimentalVMOptions) {
 446     vm_exit_during_initialization(
 447             err_msg("GC mode \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
 448                     _gc_mode->name()));
 449   }
 450 }
 451 
 452 void ShenandoahHeap::initialize_heuristics() {
 453   assert(_gc_mode != nullptr, "Must be initialized");
 454   _heuristics = _gc_mode->initialize_heuristics();
 455 
 456   if (_heuristics->is_diagnostic() && !UnlockDiagnosticVMOptions) {
 457     vm_exit_during_initialization(
 458             err_msg("Heuristics \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
 459                     _heuristics->name()));
 460   }
 461   if (_heuristics->is_experimental() && !UnlockExperimentalVMOptions) {
 462     vm_exit_during_initialization(
 463             err_msg("Heuristics \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
 464                     _heuristics->name()));
 465   }
 466 }
 467 
 468 #ifdef _MSC_VER
 469 #pragma warning( push )
 470 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
 471 #endif
 472 
 473 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
 474   CollectedHeap(),
 475   _initial_size(0),
 476   _used(0),
 477   _committed(0),
 478   _bytes_allocated_since_gc_start(0),
 479   _max_workers(MAX2(ConcGCThreads, ParallelGCThreads)),
 480   _workers(nullptr),
 481   _safepoint_workers(nullptr),
 482   _heap_region_special(false),
 483   _num_regions(0),
 484   _regions(nullptr),
 485   _update_refs_iterator(this),
 486   _gc_state_changed(false),
 487   _gc_no_progress_count(0),
 488   _control_thread(nullptr),
 489   _shenandoah_policy(policy),
 490   _gc_mode(nullptr),
 491   _heuristics(nullptr),
 492   _free_set(nullptr),
 493   _pacer(nullptr),
 494   _verifier(nullptr),
 495   _phase_timings(nullptr),
 496   _monitoring_support(nullptr),
 497   _memory_pool(nullptr),
 498   _stw_memory_manager("Shenandoah Pauses"),
 499   _cycle_memory_manager("Shenandoah Cycles"),
 500   _gc_timer(new ConcurrentGCTimer()),
 501   _log_min_obj_alignment_in_bytes(LogMinObjAlignmentInBytes),
 502   _ref_processor(new ShenandoahReferenceProcessor(MAX2(_max_workers, 1U))),
 503   _marking_context(nullptr),
 504   _bitmap_size(0),
 505   _bitmap_regions_per_slice(0),
 506   _bitmap_bytes_per_slice(0),
 507   _bitmap_region_special(false),
 508   _aux_bitmap_region_special(false),
 509   _liveness_cache(nullptr),
 510   _collection_set(nullptr)
 511 {
 512   // Initialize GC mode early, so we can adjust barrier support
 513   initialize_mode();
 514   BarrierSet::set_barrier_set(new ShenandoahBarrierSet(this));
 515 
 516   _max_workers = MAX2(_max_workers, 1U);
 517   _workers = new ShenandoahWorkerThreads("Shenandoah GC Threads", _max_workers);
 518   if (_workers == nullptr) {
 519     vm_exit_during_initialization("Failed necessary allocation.");
 520   } else {
 521     _workers->initialize_workers();
 522   }
 523 
 524   if (ParallelGCThreads > 1) {
 525     _safepoint_workers = new ShenandoahWorkerThreads("Safepoint Cleanup Thread",
 526                                                 ParallelGCThreads);
 527     _safepoint_workers->initialize_workers();
 528   }
 529 }
 530 
 531 #ifdef _MSC_VER
 532 #pragma warning( pop )
 533 #endif
 534 
 535 class ShenandoahResetBitmapTask : public WorkerTask {
 536 private:
 537   ShenandoahRegionIterator _regions;
 538 
 539 public:
 540   ShenandoahResetBitmapTask() :
 541     WorkerTask("Shenandoah Reset Bitmap") {}
 542 
 543   void work(uint worker_id) {
 544     ShenandoahHeapRegion* region = _regions.next();
 545     ShenandoahHeap* heap = ShenandoahHeap::heap();
 546     ShenandoahMarkingContext* const ctx = heap->marking_context();
 547     while (region != nullptr) {
 548       if (heap->is_bitmap_slice_committed(region)) {
 549         ctx->clear_bitmap(region);
 550       }
 551       region = _regions.next();
 552     }
 553   }
 554 };
 555 
 556 void ShenandoahHeap::reset_mark_bitmap() {
 557   assert_gc_workers(_workers->active_workers());
 558   mark_incomplete_marking_context();
 559 
 560   ShenandoahResetBitmapTask task;
 561   _workers->run_task(&task);
 562 }
 563 
 564 void ShenandoahHeap::print_on(outputStream* st) const {
 565   st->print_cr("Shenandoah Heap");
 566   st->print_cr(" " SIZE_FORMAT "%s max, " SIZE_FORMAT "%s soft max, " SIZE_FORMAT "%s committed, " SIZE_FORMAT "%s used",
 567                byte_size_in_proper_unit(max_capacity()), proper_unit_for_byte_size(max_capacity()),
 568                byte_size_in_proper_unit(soft_max_capacity()), proper_unit_for_byte_size(soft_max_capacity()),
 569                byte_size_in_proper_unit(committed()),    proper_unit_for_byte_size(committed()),
 570                byte_size_in_proper_unit(used()),         proper_unit_for_byte_size(used()));
 571   st->print_cr(" " SIZE_FORMAT " x " SIZE_FORMAT"%s regions",
 572                num_regions(),
 573                byte_size_in_proper_unit(ShenandoahHeapRegion::region_size_bytes()),
 574                proper_unit_for_byte_size(ShenandoahHeapRegion::region_size_bytes()));
 575 
 576   st->print("Status: ");
 577   if (has_forwarded_objects())                 st->print("has forwarded objects, ");
 578   if (is_concurrent_mark_in_progress())        st->print("marking, ");
 579   if (is_evacuation_in_progress())             st->print("evacuating, ");
 580   if (is_update_refs_in_progress())            st->print("updating refs, ");
 581   if (is_degenerated_gc_in_progress())         st->print("degenerated gc, ");
 582   if (is_full_gc_in_progress())                st->print("full gc, ");
 583   if (is_full_gc_move_in_progress())           st->print("full gc move, ");
 584   if (is_concurrent_weak_root_in_progress())   st->print("concurrent weak roots, ");
 585   if (is_concurrent_strong_root_in_progress() &&
 586       !is_concurrent_weak_root_in_progress())  st->print("concurrent strong roots, ");
 587 
 588   if (cancelled_gc()) {
 589     st->print("cancelled");
 590   } else {
 591     st->print("not cancelled");
 592   }
 593   st->cr();
 594 
 595   st->print_cr("Reserved region:");
 596   st->print_cr(" - [" PTR_FORMAT ", " PTR_FORMAT ") ",
 597                p2i(reserved_region().start()),
 598                p2i(reserved_region().end()));
 599 
 600   ShenandoahCollectionSet* cset = collection_set();
 601   st->print_cr("Collection set:");
 602   if (cset != nullptr) {
 603     st->print_cr(" - map (vanilla): " PTR_FORMAT, p2i(cset->map_address()));
 604     st->print_cr(" - map (biased):  " PTR_FORMAT, p2i(cset->biased_map_address()));
 605   } else {
 606     st->print_cr(" (null)");
 607   }
 608 
 609   st->cr();
 610   MetaspaceUtils::print_on(st);
 611 
 612   if (Verbose) {
 613     st->cr();
 614     print_heap_regions_on(st);
 615   }
 616 }
 617 
 618 class ShenandoahInitWorkerGCLABClosure : public ThreadClosure {
 619 public:
 620   void do_thread(Thread* thread) {
 621     assert(thread != nullptr, "Sanity");
 622     assert(thread->is_Worker_thread(), "Only worker thread expected");
 623     ShenandoahThreadLocalData::initialize_gclab(thread);
 624   }
 625 };
 626 
 627 void ShenandoahHeap::post_initialize() {
 628   CollectedHeap::post_initialize();
 629   MutexLocker ml(Threads_lock);
 630 
 631   ShenandoahInitWorkerGCLABClosure init_gclabs;
 632   _workers->threads_do(&init_gclabs);
 633 
 634   // gclab can not be initialized early during VM startup, as it can not determinate its max_size.
 635   // Now, we will let WorkerThreads to initialize gclab when new worker is created.
 636   _workers->set_initialize_gclab();
 637   if (_safepoint_workers != nullptr) {
 638     _safepoint_workers->threads_do(&init_gclabs);
 639     _safepoint_workers->set_initialize_gclab();
 640   }
 641 
 642   _heuristics->initialize();
 643 
 644   JFR_ONLY(ShenandoahJFRSupport::register_jfr_type_serializers();)
 645 }
 646 
 647 size_t ShenandoahHeap::used() const {
 648   return Atomic::load(&_used);
 649 }
 650 
 651 size_t ShenandoahHeap::committed() const {
 652   return Atomic::load(&_committed);
 653 }
 654 
 655 size_t ShenandoahHeap::available() const {
 656   return free_set()->available();
 657 }
 658 
 659 void ShenandoahHeap::increase_committed(size_t bytes) {
 660   shenandoah_assert_heaplocked_or_safepoint();
 661   _committed += bytes;
 662 }
 663 
 664 void ShenandoahHeap::decrease_committed(size_t bytes) {
 665   shenandoah_assert_heaplocked_or_safepoint();
 666   _committed -= bytes;
 667 }
 668 
 669 void ShenandoahHeap::increase_used(size_t bytes) {
 670   Atomic::add(&_used, bytes, memory_order_relaxed);
 671 }
 672 
 673 void ShenandoahHeap::set_used(size_t bytes) {
 674   Atomic::store(&_used, bytes);
 675 }
 676 
 677 void ShenandoahHeap::decrease_used(size_t bytes) {
 678   assert(used() >= bytes, "never decrease heap size by more than we've left");
 679   Atomic::sub(&_used, bytes, memory_order_relaxed);
 680 }
 681 
 682 void ShenandoahHeap::increase_allocated(size_t bytes) {
 683   Atomic::add(&_bytes_allocated_since_gc_start, bytes, memory_order_relaxed);
 684 }
 685 
 686 void ShenandoahHeap::notify_mutator_alloc_words(size_t words, bool waste) {
 687   size_t bytes = words * HeapWordSize;
 688   if (!waste) {
 689     increase_used(bytes);
 690   }
 691   increase_allocated(bytes);
 692   if (ShenandoahPacing) {
 693     control_thread()->pacing_notify_alloc(words);
 694     if (waste) {
 695       pacer()->claim_for_alloc(words, true);
 696     }
 697   }
 698 }
 699 
 700 size_t ShenandoahHeap::capacity() const {
 701   return committed();
 702 }
 703 
 704 size_t ShenandoahHeap::max_capacity() const {
 705   return _num_regions * ShenandoahHeapRegion::region_size_bytes();
 706 }
 707 
 708 size_t ShenandoahHeap::soft_max_capacity() const {
 709   size_t v = Atomic::load(&_soft_max_size);
 710   assert(min_capacity() <= v && v <= max_capacity(),
 711          "Should be in bounds: " SIZE_FORMAT " <= " SIZE_FORMAT " <= " SIZE_FORMAT,
 712          min_capacity(), v, max_capacity());
 713   return v;
 714 }
 715 
 716 void ShenandoahHeap::set_soft_max_capacity(size_t v) {
 717   assert(min_capacity() <= v && v <= max_capacity(),
 718          "Should be in bounds: " SIZE_FORMAT " <= " SIZE_FORMAT " <= " SIZE_FORMAT,
 719          min_capacity(), v, max_capacity());
 720   Atomic::store(&_soft_max_size, v);
 721 }
 722 
 723 size_t ShenandoahHeap::min_capacity() const {
 724   return _minimum_size;
 725 }
 726 
 727 size_t ShenandoahHeap::initial_capacity() const {
 728   return _initial_size;
 729 }
 730 
 731 bool ShenandoahHeap::is_in(const void* p) const {
 732   HeapWord* heap_base = (HeapWord*) base();
 733   HeapWord* last_region_end = heap_base + ShenandoahHeapRegion::region_size_words() * num_regions();
 734   return p >= heap_base && p < last_region_end;
 735 }
 736 
 737 void ShenandoahHeap::maybe_uncommit(double shrink_before, size_t shrink_until) {
 738   assert (ShenandoahUncommit, "should be enabled");
 739 
 740   // Determine if there is work to do. This avoids taking heap lock if there is
 741   // no work available, avoids spamming logs with superfluous logging messages,
 742   // and minimises the amount of work while locks are taken.
 743 
 744   if (committed() <= shrink_until) return;
 745 
 746   bool has_work = false;
 747   for (size_t i = 0; i < num_regions(); i++) {
 748     ShenandoahHeapRegion* r = get_region(i);
 749     if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
 750       has_work = true;
 751       break;
 752     }
 753   }
 754 
 755   if (has_work) {
 756     static const char* msg = "Concurrent uncommit";
 757     ShenandoahConcurrentPhase gcPhase(msg, ShenandoahPhaseTimings::conc_uncommit, true /* log_heap_usage */);
 758     EventMark em("%s", msg);
 759 
 760     op_uncommit(shrink_before, shrink_until);
 761   }
 762 }
 763 
 764 void ShenandoahHeap::op_uncommit(double shrink_before, size_t shrink_until) {
 765   assert (ShenandoahUncommit, "should be enabled");
 766 
 767   // Application allocates from the beginning of the heap, and GC allocates at
 768   // the end of it. It is more efficient to uncommit from the end, so that applications
 769   // could enjoy the near committed regions. GC allocations are much less frequent,
 770   // and therefore can accept the committing costs.
 771 
 772   size_t count = 0;
 773   for (size_t i = num_regions(); i > 0; i--) { // care about size_t underflow
 774     ShenandoahHeapRegion* r = get_region(i - 1);
 775     if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
 776       ShenandoahHeapLocker locker(lock());
 777       if (r->is_empty_committed()) {
 778         if (committed() < shrink_until + ShenandoahHeapRegion::region_size_bytes()) {
 779           break;
 780         }
 781 
 782         r->make_uncommitted();
 783         count++;
 784       }
 785     }
 786     SpinPause(); // allow allocators to take the lock
 787   }
 788 
 789   if (count > 0) {
 790     notify_heap_changed();
 791   }
 792 }
 793 
 794 bool ShenandoahHeap::check_soft_max_changed() {
 795   size_t new_soft_max = Atomic::load(&SoftMaxHeapSize);
 796   size_t old_soft_max = soft_max_capacity();
 797   if (new_soft_max != old_soft_max) {
 798     new_soft_max = MAX2(min_capacity(), new_soft_max);
 799     new_soft_max = MIN2(max_capacity(), new_soft_max);
 800     if (new_soft_max != old_soft_max) {
 801       log_info(gc)("Soft Max Heap Size: " SIZE_FORMAT "%s -> " SIZE_FORMAT "%s",
 802                    byte_size_in_proper_unit(old_soft_max), proper_unit_for_byte_size(old_soft_max),
 803                    byte_size_in_proper_unit(new_soft_max), proper_unit_for_byte_size(new_soft_max)
 804       );
 805       set_soft_max_capacity(new_soft_max);
 806       return true;
 807     }
 808   }
 809   return false;
 810 }
 811 
 812 void ShenandoahHeap::notify_heap_changed() {
 813   // Update monitoring counters when we took a new region. This amortizes the
 814   // update costs on slow path.
 815   monitoring_support()->notify_heap_changed();
 816 
 817   // This is called from allocation path, and thus should be fast.
 818   _heap_changed.try_set();
 819 }
 820 
 821 void ShenandoahHeap::set_forced_counters_update(bool value) {
 822   monitoring_support()->set_forced_counters_update(value);
 823 }
 824 
 825 void ShenandoahHeap::handle_force_counters_update() {
 826   monitoring_support()->handle_force_counters_update();
 827 }
 828 
 829 HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) {
 830   // New object should fit the GCLAB size
 831   size_t min_size = MAX2(size, PLAB::min_size());
 832 
 833   // Figure out size of new GCLAB, looking back at heuristics. Expand aggressively.
 834   size_t new_size = ShenandoahThreadLocalData::gclab_size(thread) * 2;
 835   new_size = MIN2(new_size, PLAB::max_size());
 836   new_size = MAX2(new_size, PLAB::min_size());
 837 
 838   // Record new heuristic value even if we take any shortcut. This captures
 839   // the case when moderately-sized objects always take a shortcut. At some point,
 840   // heuristics should catch up with them.
 841   ShenandoahThreadLocalData::set_gclab_size(thread, new_size);
 842 
 843   if (new_size < size) {
 844     // New size still does not fit the object. Fall back to shared allocation.
 845     // This avoids retiring perfectly good GCLABs, when we encounter a large object.
 846     return nullptr;
 847   }
 848 
 849   // Retire current GCLAB, and allocate a new one.
 850   PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
 851   gclab->retire();
 852 
 853   size_t actual_size = 0;
 854   HeapWord* gclab_buf = allocate_new_gclab(min_size, new_size, &actual_size);
 855   if (gclab_buf == nullptr) {
 856     return nullptr;
 857   }
 858 
 859   assert (size <= actual_size, "allocation should fit");
 860 
 861   // ...and clear or zap just allocated TLAB, if needed.
 862   if (ZeroTLAB) {
 863     Copy::zero_to_words(gclab_buf, actual_size);
 864   } else if (ZapTLAB) {
 865     // Skip mangling the space corresponding to the object header to
 866     // ensure that the returned space is not considered parsable by
 867     // any concurrent GC thread.
 868     size_t hdr_size = oopDesc::header_size();
 869     Copy::fill_to_words(gclab_buf + hdr_size, actual_size - hdr_size, badHeapWordVal);
 870   }
 871   gclab->set_buf(gclab_buf, actual_size);
 872   return gclab->allocate(size);
 873 }
 874 
 875 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t min_size,
 876                                             size_t requested_size,
 877                                             size_t* actual_size) {
 878   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_tlab(min_size, requested_size);
 879   HeapWord* res = allocate_memory(req);
 880   if (res != nullptr) {
 881     *actual_size = req.actual_size();
 882   } else {
 883     *actual_size = 0;
 884   }
 885   return res;
 886 }
 887 
 888 HeapWord* ShenandoahHeap::allocate_new_gclab(size_t min_size,
 889                                              size_t word_size,
 890                                              size_t* actual_size) {
 891   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_gclab(min_size, word_size);
 892   HeapWord* res = allocate_memory(req);
 893   if (res != nullptr) {
 894     *actual_size = req.actual_size();
 895   } else {
 896     *actual_size = 0;
 897   }
 898   return res;
 899 }
 900 
 901 HeapWord* ShenandoahHeap::allocate_memory(ShenandoahAllocRequest& req) {
 902   intptr_t pacer_epoch = 0;
 903   bool in_new_region = false;
 904   HeapWord* result = nullptr;
 905 
 906   if (req.is_mutator_alloc()) {
 907     if (ShenandoahPacing) {
 908       pacer()->pace_for_alloc(req.size());
 909       pacer_epoch = pacer()->epoch();
 910     }
 911 
 912     if (!ShenandoahAllocFailureALot || !should_inject_alloc_failure()) {
 913       result = allocate_memory_under_lock(req, in_new_region);
 914     }
 915 
 916     // Check that gc overhead is not exceeded.
 917     //
 918     // Shenandoah will grind along for quite a while allocating one
 919     // object at a time using shared (non-tlab) allocations. This check
 920     // is testing that the GC overhead limit has not been exceeded.
 921     // This will notify the collector to start a cycle, but will raise
 922     // an OOME to the mutator if the last Full GCs have not made progress.
 923     if (result == nullptr && !req.is_lab_alloc() && get_gc_no_progress_count() > ShenandoahNoProgressThreshold) {
 924       control_thread()->handle_alloc_failure(req, false);
 925       return nullptr;
 926     }
 927 
 928     if (result == nullptr) {
 929       // Block until control thread reacted, then retry allocation.
 930       //
 931       // It might happen that one of the threads requesting allocation would unblock
 932       // way later after GC happened, only to fail the second allocation, because
 933       // other threads have already depleted the free storage. In this case, a better
 934       // strategy is to try again, until at least one full GC has completed.
 935       //
 936       // Stop retrying and return nullptr to cause OOMError exception if our allocation failed even after:
 937       //   a) We experienced a GC that had good progress, or
 938       //   b) We experienced at least one Full GC (whether or not it had good progress)
 939       //
 940       // TODO: Consider GLOBAL GC rather than Full GC to remediate OOM condition: https://bugs.openjdk.org/browse/JDK-8335910
 941 
 942       size_t original_count = shenandoah_policy()->full_gc_count();
 943       while ((result == nullptr) && (original_count == shenandoah_policy()->full_gc_count())) {
 944         control_thread()->handle_alloc_failure(req, true);
 945         result = allocate_memory_under_lock(req, in_new_region);
 946       }
 947       if (result != nullptr) {
 948         // If our allocation request has been satisifed after it initially failed, we count this as good gc progress
 949         notify_gc_progress();
 950       }
 951       if (log_is_enabled(Debug, gc, alloc)) {
 952         ResourceMark rm;
 953         log_debug(gc, alloc)("Thread: %s, Result: " PTR_FORMAT ", Request: %s, Size: " SIZE_FORMAT
 954                              ", Original: " SIZE_FORMAT ", Latest: " SIZE_FORMAT,
 955                              Thread::current()->name(), p2i(result), req.type_string(), req.size(),
 956                              original_count, get_gc_no_progress_count());
 957       }
 958     }
 959   } else {
 960     assert(req.is_gc_alloc(), "Can only accept GC allocs here");
 961     result = allocate_memory_under_lock(req, in_new_region);
 962     // Do not call handle_alloc_failure() here, because we cannot block.
 963     // The allocation failure would be handled by the LRB slowpath with handle_alloc_failure_evac().
 964   }
 965 
 966   if (in_new_region) {
 967     notify_heap_changed();
 968   }
 969 
 970   if (result != nullptr) {
 971     size_t requested = req.size();
 972     size_t actual = req.actual_size();
 973 
 974     assert (req.is_lab_alloc() || (requested == actual),
 975             "Only LAB allocations are elastic: %s, requested = " SIZE_FORMAT ", actual = " SIZE_FORMAT,
 976             ShenandoahAllocRequest::alloc_type_to_string(req.type()), requested, actual);
 977 
 978     if (req.is_mutator_alloc()) {
 979       notify_mutator_alloc_words(actual, false);
 980 
 981       // If we requested more than we were granted, give the rest back to pacer.
 982       // This only matters if we are in the same pacing epoch: do not try to unpace
 983       // over the budget for the other phase.
 984       if (ShenandoahPacing && (pacer_epoch > 0) && (requested > actual)) {
 985         pacer()->unpace_for_alloc(pacer_epoch, requested - actual);
 986       }
 987     } else {
 988       increase_used(actual*HeapWordSize);
 989     }
 990   }
 991 
 992   return result;
 993 }
 994 
 995 HeapWord* ShenandoahHeap::allocate_memory_under_lock(ShenandoahAllocRequest& req, bool& in_new_region) {
 996   // If we are dealing with mutator allocation, then we may need to block for safepoint.
 997   // We cannot block for safepoint for GC allocations, because there is a high chance
 998   // we are already running at safepoint or from stack watermark machinery, and we cannot
 999   // block again.
1000   ShenandoahHeapLocker locker(lock(), req.is_mutator_alloc());
1001   return _free_set->allocate(req, in_new_region);
1002 }
1003 
1004 HeapWord* ShenandoahHeap::mem_allocate(size_t size,
1005                                         bool*  gc_overhead_limit_was_exceeded) {
1006   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared(size);
1007   return allocate_memory(req);
1008 }
1009 
1010 MetaWord* ShenandoahHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
1011                                                              size_t size,
1012                                                              Metaspace::MetadataType mdtype) {
1013   MetaWord* result;
1014 
1015   // Inform metaspace OOM to GC heuristics if class unloading is possible.
1016   if (heuristics()->can_unload_classes()) {
1017     ShenandoahHeuristics* h = heuristics();
1018     h->record_metaspace_oom();
1019   }
1020 
1021   // Expand and retry allocation
1022   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
1023   if (result != nullptr) {
1024     return result;
1025   }
1026 
1027   // Start full GC
1028   collect(GCCause::_metadata_GC_clear_soft_refs);
1029 
1030   // Retry allocation
1031   result = loader_data->metaspace_non_null()->allocate(size, mdtype);
1032   if (result != nullptr) {
1033     return result;
1034   }
1035 
1036   // Expand and retry allocation
1037   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
1038   if (result != nullptr) {
1039     return result;
1040   }
1041 
1042   // Out of memory
1043   return nullptr;
1044 }
1045 
1046 class ShenandoahConcurrentEvacuateRegionObjectClosure : public ObjectClosure {
1047 private:
1048   ShenandoahHeap* const _heap;
1049   Thread* const _thread;
1050 public:
1051   ShenandoahConcurrentEvacuateRegionObjectClosure(ShenandoahHeap* heap) :
1052     _heap(heap), _thread(Thread::current()) {}
1053 
1054   void do_object(oop p) {
1055     shenandoah_assert_marked(nullptr, p);
1056     if (!p->is_forwarded()) {
1057       _heap->evacuate_object(p, _thread);
1058     }
1059   }
1060 };
1061 
1062 class ShenandoahEvacuationTask : public WorkerTask {
1063 private:
1064   ShenandoahHeap* const _sh;
1065   ShenandoahCollectionSet* const _cs;
1066   bool _concurrent;
1067 public:
1068   ShenandoahEvacuationTask(ShenandoahHeap* sh,
1069                            ShenandoahCollectionSet* cs,
1070                            bool concurrent) :
1071     WorkerTask("Shenandoah Evacuation"),
1072     _sh(sh),
1073     _cs(cs),
1074     _concurrent(concurrent)
1075   {}
1076 
1077   void work(uint worker_id) {
1078     if (_concurrent) {
1079       ShenandoahConcurrentWorkerSession worker_session(worker_id);
1080       ShenandoahSuspendibleThreadSetJoiner stsj;
1081       ShenandoahEvacOOMScope oom_evac_scope;
1082       do_work();
1083     } else {
1084       ShenandoahParallelWorkerSession worker_session(worker_id);
1085       ShenandoahEvacOOMScope oom_evac_scope;
1086       do_work();
1087     }
1088   }
1089 
1090 private:
1091   void do_work() {
1092     ShenandoahConcurrentEvacuateRegionObjectClosure cl(_sh);
1093     ShenandoahHeapRegion* r;
1094     while ((r =_cs->claim_next()) != nullptr) {
1095       assert(r->has_live(), "Region " SIZE_FORMAT " should have been reclaimed early", r->index());
1096       _sh->marked_object_iterate(r, &cl);
1097 
1098       if (ShenandoahPacing) {
1099         _sh->pacer()->report_evac(r->used() >> LogHeapWordSize);
1100       }
1101 
1102       if (_sh->check_cancelled_gc_and_yield(_concurrent)) {
1103         break;
1104       }
1105     }
1106   }
1107 };
1108 
1109 void ShenandoahHeap::evacuate_collection_set(bool concurrent) {
1110   ShenandoahEvacuationTask task(this, _collection_set, concurrent);
1111   workers()->run_task(&task);
1112 }
1113 
1114 oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) {
1115   if (ShenandoahThreadLocalData::is_oom_during_evac(Thread::current())) {
1116     // This thread went through the OOM during evac protocol and it is safe to return
1117     // the forward pointer. It must not attempt to evacuate any more.
1118     return ShenandoahBarrierSet::resolve_forwarded(p);
1119   }
1120 
1121   assert(ShenandoahThreadLocalData::is_evac_allowed(thread), "must be enclosed in oom-evac scope");
1122 
1123   size_t size = p->size();
1124 
1125   assert(!heap_region_containing(p)->is_humongous(), "never evacuate humongous objects");
1126 
1127   bool alloc_from_gclab = true;
1128   HeapWord* copy = nullptr;
1129 
1130 #ifdef ASSERT
1131   if (ShenandoahOOMDuringEvacALot &&
1132       (os::random() & 1) == 0) { // Simulate OOM every ~2nd slow-path call
1133     copy = nullptr;
1134   } else {
1135 #endif
1136     if (UseTLAB) {
1137       copy = allocate_from_gclab(thread, size);
1138     }
1139     if (copy == nullptr) {
1140       ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared_gc(size);
1141       copy = allocate_memory(req);
1142       alloc_from_gclab = false;
1143     }
1144 #ifdef ASSERT
1145   }
1146 #endif
1147 
1148   if (copy == nullptr) {
1149     control_thread()->handle_alloc_failure_evac(size);
1150 
1151     _oom_evac_handler.handle_out_of_memory_during_evacuation();
1152 
1153     return ShenandoahBarrierSet::resolve_forwarded(p);
1154   }
1155 
1156   // Copy the object:
1157   Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(p), copy, size);
1158 
1159   // Try to install the new forwarding pointer.
1160   oop copy_val = cast_to_oop(copy);
1161   oop result = ShenandoahForwarding::try_update_forwardee(p, copy_val);
1162   if (result == copy_val) {
1163     // Successfully evacuated. Our copy is now the public one!
1164     ContinuationGCSupport::relativize_stack_chunk(copy_val);
1165     shenandoah_assert_correct(nullptr, copy_val);
1166     return copy_val;
1167   }  else {
1168     // Failed to evacuate. We need to deal with the object that is left behind. Since this
1169     // new allocation is certainly after TAMS, it will be considered live in the next cycle.
1170     // But if it happens to contain references to evacuated regions, those references would
1171     // not get updated for this stale copy during this cycle, and we will crash while scanning
1172     // it the next cycle.
1173     //
1174     // For GCLAB allocations, it is enough to rollback the allocation ptr. Either the next
1175     // object will overwrite this stale copy, or the filler object on LAB retirement will
1176     // do this. For non-GCLAB allocations, we have no way to retract the allocation, and
1177     // have to explicitly overwrite the copy with the filler object. With that overwrite,
1178     // we have to keep the fwdptr initialized and pointing to our (stale) copy.
1179     if (alloc_from_gclab) {
1180       ShenandoahThreadLocalData::gclab(thread)->undo_allocation(copy, size);
1181     } else {
1182       fill_with_object(copy, size);
1183       shenandoah_assert_correct(nullptr, copy_val);
1184     }
1185     shenandoah_assert_correct(nullptr, result);
1186     return result;
1187   }
1188 }
1189 
1190 void ShenandoahHeap::trash_cset_regions() {
1191   ShenandoahHeapLocker locker(lock());
1192 
1193   ShenandoahCollectionSet* set = collection_set();
1194   ShenandoahHeapRegion* r;
1195   set->clear_current_index();
1196   while ((r = set->next()) != nullptr) {
1197     r->make_trash();
1198   }
1199   collection_set()->clear();
1200 }
1201 
1202 void ShenandoahHeap::print_heap_regions_on(outputStream* st) const {
1203   st->print_cr("Heap Regions:");
1204   st->print_cr("Region state: EU=empty-uncommitted, EC=empty-committed, R=regular, H=humongous start, HP=pinned humongous start");
1205   st->print_cr("              HC=humongous continuation, CS=collection set, TR=trash, P=pinned, CSP=pinned collection set");
1206   st->print_cr("BTE=bottom/top/end, TAMS=top-at-mark-start");
1207   st->print_cr("UWM=update watermark, U=used");
1208   st->print_cr("T=TLAB allocs, G=GCLAB allocs");
1209   st->print_cr("S=shared allocs, L=live data");
1210   st->print_cr("CP=critical pins");
1211 
1212   for (size_t i = 0; i < num_regions(); i++) {
1213     get_region(i)->print_on(st);
1214   }
1215 }
1216 
1217 void ShenandoahHeap::trash_humongous_region_at(ShenandoahHeapRegion* start) {
1218   assert(start->is_humongous_start(), "reclaim regions starting with the first one");
1219 
1220   oop humongous_obj = cast_to_oop(start->bottom());
1221   size_t size = humongous_obj->size();
1222   size_t required_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize);
1223   size_t index = start->index() + required_regions - 1;
1224 
1225   assert(!start->has_live(), "liveness must be zero");
1226 
1227   for(size_t i = 0; i < required_regions; i++) {
1228     // Reclaim from tail. Otherwise, assertion fails when printing region to trace log,
1229     // as it expects that every region belongs to a humongous region starting with a humongous start region.
1230     ShenandoahHeapRegion* region = get_region(index --);
1231 
1232     assert(region->is_humongous(), "expect correct humongous start or continuation");
1233     assert(!region->is_cset(), "Humongous region should not be in collection set");
1234 
1235     region->make_trash_immediate();
1236   }
1237 }
1238 
1239 class ShenandoahCheckCleanGCLABClosure : public ThreadClosure {
1240 public:
1241   ShenandoahCheckCleanGCLABClosure() {}
1242   void do_thread(Thread* thread) {
1243     PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1244     assert(gclab != nullptr, "GCLAB should be initialized for %s", thread->name());
1245     assert(gclab->words_remaining() == 0, "GCLAB should not need retirement");
1246   }
1247 };
1248 
1249 class ShenandoahRetireGCLABClosure : public ThreadClosure {
1250 private:
1251   bool const _resize;
1252 public:
1253   ShenandoahRetireGCLABClosure(bool resize) : _resize(resize) {}
1254   void do_thread(Thread* thread) {
1255     PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1256     assert(gclab != nullptr, "GCLAB should be initialized for %s", thread->name());
1257     gclab->retire();
1258     if (_resize && ShenandoahThreadLocalData::gclab_size(thread) > 0) {
1259       ShenandoahThreadLocalData::set_gclab_size(thread, 0);
1260     }
1261   }
1262 };
1263 
1264 void ShenandoahHeap::labs_make_parsable() {
1265   assert(UseTLAB, "Only call with UseTLAB");
1266 
1267   ShenandoahRetireGCLABClosure cl(false);
1268 
1269   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1270     ThreadLocalAllocBuffer& tlab = t->tlab();
1271     tlab.make_parsable();
1272     cl.do_thread(t);
1273   }
1274 
1275   workers()->threads_do(&cl);
1276 }
1277 
1278 void ShenandoahHeap::tlabs_retire(bool resize) {
1279   assert(UseTLAB, "Only call with UseTLAB");
1280   assert(!resize || ResizeTLAB, "Only call for resize when ResizeTLAB is enabled");
1281 
1282   ThreadLocalAllocStats stats;
1283 
1284   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1285     ThreadLocalAllocBuffer& tlab = t->tlab();
1286     tlab.retire(&stats);
1287     if (resize) {
1288       tlab.resize();
1289     }
1290   }
1291 
1292   stats.publish();
1293 
1294 #ifdef ASSERT
1295   ShenandoahCheckCleanGCLABClosure cl;
1296   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1297     cl.do_thread(t);
1298   }
1299   workers()->threads_do(&cl);
1300 #endif
1301 }
1302 
1303 void ShenandoahHeap::gclabs_retire(bool resize) {
1304   assert(UseTLAB, "Only call with UseTLAB");
1305   assert(!resize || ResizeTLAB, "Only call for resize when ResizeTLAB is enabled");
1306 
1307   ShenandoahRetireGCLABClosure cl(resize);
1308   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1309     cl.do_thread(t);
1310   }
1311   workers()->threads_do(&cl);
1312 
1313   if (safepoint_workers() != nullptr) {
1314     safepoint_workers()->threads_do(&cl);
1315   }
1316 }
1317 
1318 // Returns size in bytes
1319 size_t ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const {
1320   // Return the max allowed size, and let the allocation path
1321   // figure out the safe size for current allocation.
1322   return ShenandoahHeapRegion::max_tlab_size_bytes();
1323 }
1324 
1325 size_t ShenandoahHeap::max_tlab_size() const {
1326   // Returns size in words
1327   return ShenandoahHeapRegion::max_tlab_size_words();
1328 }
1329 
1330 void ShenandoahHeap::collect(GCCause::Cause cause) {
1331   control_thread()->request_gc(cause);
1332 }
1333 
1334 void ShenandoahHeap::do_full_collection(bool clear_all_soft_refs) {
1335   //assert(false, "Shouldn't need to do full collections");
1336 }
1337 
1338 HeapWord* ShenandoahHeap::block_start(const void* addr) const {
1339   ShenandoahHeapRegion* r = heap_region_containing(addr);
1340   if (r != nullptr) {
1341     return r->block_start(addr);
1342   }
1343   return nullptr;
1344 }
1345 
1346 bool ShenandoahHeap::block_is_obj(const HeapWord* addr) const {
1347   ShenandoahHeapRegion* r = heap_region_containing(addr);
1348   return r->block_is_obj(addr);
1349 }
1350 
1351 bool ShenandoahHeap::print_location(outputStream* st, void* addr) const {
1352   return BlockLocationPrinter<ShenandoahHeap>::print_location(st, addr);
1353 }
1354 
1355 void ShenandoahHeap::prepare_for_verify() {
1356   if (SafepointSynchronize::is_at_safepoint() && UseTLAB) {
1357     labs_make_parsable();
1358   }
1359 }
1360 
1361 void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const {
1362   if (_shenandoah_policy->is_at_shutdown()) {
1363     return;
1364   }
1365 
1366   if (_control_thread != nullptr) {
1367     tcl->do_thread(_control_thread);
1368   }
1369 
1370   workers()->threads_do(tcl);
1371   if (_safepoint_workers != nullptr) {
1372     _safepoint_workers->threads_do(tcl);
1373   }
1374 }
1375 
1376 void ShenandoahHeap::print_tracing_info() const {
1377   LogTarget(Info, gc, stats) lt;
1378   if (lt.is_enabled()) {
1379     ResourceMark rm;
1380     LogStream ls(lt);
1381 
1382     phase_timings()->print_global_on(&ls);
1383 
1384     ls.cr();
1385     ls.cr();
1386 
1387     shenandoah_policy()->print_gc_stats(&ls);
1388 
1389     ls.cr();
1390     ls.cr();
1391   }
1392 }
1393 
1394 void ShenandoahHeap::verify(VerifyOption vo) {
1395   if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
1396     if (ShenandoahVerify) {
1397       verifier()->verify_generic(vo);
1398     } else {
1399       // TODO: Consider allocating verification bitmaps on demand,
1400       // and turn this on unconditionally.
1401     }
1402   }
1403 }
1404 size_t ShenandoahHeap::tlab_capacity(Thread *thr) const {
1405   return _free_set->capacity();
1406 }
1407 
1408 class ObjectIterateScanRootClosure : public BasicOopIterateClosure {
1409 private:
1410   MarkBitMap* _bitmap;
1411   ShenandoahScanObjectStack* _oop_stack;
1412   ShenandoahHeap* const _heap;
1413   ShenandoahMarkingContext* const _marking_context;
1414 
1415   template <class T>
1416   void do_oop_work(T* p) {
1417     T o = RawAccess<>::oop_load(p);
1418     if (!CompressedOops::is_null(o)) {
1419       oop obj = CompressedOops::decode_not_null(o);
1420       if (_heap->is_concurrent_weak_root_in_progress() && !_marking_context->is_marked(obj)) {
1421         // There may be dead oops in weak roots in concurrent root phase, do not touch them.
1422         return;
1423       }
1424       obj = ShenandoahBarrierSet::barrier_set()->load_reference_barrier(obj);
1425 
1426       assert(oopDesc::is_oop(obj), "must be a valid oop");
1427       if (!_bitmap->is_marked(obj)) {
1428         _bitmap->mark(obj);
1429         _oop_stack->push(obj);
1430       }
1431     }
1432   }
1433 public:
1434   ObjectIterateScanRootClosure(MarkBitMap* bitmap, ShenandoahScanObjectStack* oop_stack) :
1435     _bitmap(bitmap), _oop_stack(oop_stack), _heap(ShenandoahHeap::heap()),
1436     _marking_context(_heap->marking_context()) {}
1437   void do_oop(oop* p)       { do_oop_work(p); }
1438   void do_oop(narrowOop* p) { do_oop_work(p); }
1439 };
1440 
1441 /*
1442  * This is public API, used in preparation of object_iterate().
1443  * Since we don't do linear scan of heap in object_iterate() (see comment below), we don't
1444  * need to make the heap parsable. For Shenandoah-internal linear heap scans that we can
1445  * control, we call SH::tlabs_retire, SH::gclabs_retire.
1446  */
1447 void ShenandoahHeap::ensure_parsability(bool retire_tlabs) {
1448   // No-op.
1449 }
1450 
1451 /*
1452  * Iterates objects in the heap. This is public API, used for, e.g., heap dumping.
1453  *
1454  * We cannot safely iterate objects by doing a linear scan at random points in time. Linear
1455  * scanning needs to deal with dead objects, which may have dead Klass* pointers (e.g.
1456  * calling oopDesc::size() would crash) or dangling reference fields (crashes) etc. Linear
1457  * scanning therefore depends on having a valid marking bitmap to support it. However, we only
1458  * have a valid marking bitmap after successful marking. In particular, we *don't* have a valid
1459  * marking bitmap during marking, after aborted marking or during/after cleanup (when we just
1460  * wiped the bitmap in preparation for next marking).
1461  *
1462  * For all those reasons, we implement object iteration as a single marking traversal, reporting
1463  * objects as we mark+traverse through the heap, starting from GC roots. JVMTI IterateThroughHeap
1464  * is allowed to report dead objects, but is not required to do so.
1465  */
1466 void ShenandoahHeap::object_iterate(ObjectClosure* cl) {
1467   // Reset bitmap
1468   if (!prepare_aux_bitmap_for_iteration())
1469     return;
1470 
1471   ShenandoahScanObjectStack oop_stack;
1472   ObjectIterateScanRootClosure oops(&_aux_bit_map, &oop_stack);
1473   // Seed the stack with root scan
1474   scan_roots_for_iteration(&oop_stack, &oops);
1475 
1476   // Work through the oop stack to traverse heap
1477   while (! oop_stack.is_empty()) {
1478     oop obj = oop_stack.pop();
1479     assert(oopDesc::is_oop(obj), "must be a valid oop");
1480     cl->do_object(obj);
1481     obj->oop_iterate(&oops);
1482   }
1483 
1484   assert(oop_stack.is_empty(), "should be empty");
1485   // Reclaim bitmap
1486   reclaim_aux_bitmap_for_iteration();
1487 }
1488 
1489 bool ShenandoahHeap::prepare_aux_bitmap_for_iteration() {
1490   assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1491 
1492   if (!_aux_bitmap_region_special && !os::commit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size(), false)) {
1493     log_warning(gc)("Could not commit native memory for auxiliary marking bitmap for heap iteration");
1494     return false;
1495   }
1496   // Reset bitmap
1497   _aux_bit_map.clear();
1498   return true;
1499 }
1500 
1501 void ShenandoahHeap::scan_roots_for_iteration(ShenandoahScanObjectStack* oop_stack, ObjectIterateScanRootClosure* oops) {
1502   // Process GC roots according to current GC cycle
1503   // This populates the work stack with initial objects
1504   // It is important to relinquish the associated locks before diving
1505   // into heap dumper
1506   uint n_workers = safepoint_workers() != nullptr ? safepoint_workers()->active_workers() : 1;
1507   ShenandoahHeapIterationRootScanner rp(n_workers);
1508   rp.roots_do(oops);
1509 }
1510 
1511 void ShenandoahHeap::reclaim_aux_bitmap_for_iteration() {
1512   if (!_aux_bitmap_region_special && !os::uncommit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size())) {
1513     log_warning(gc)("Could not uncommit native memory for auxiliary marking bitmap for heap iteration");
1514   }
1515 }
1516 
1517 // Closure for parallelly iterate objects
1518 class ShenandoahObjectIterateParScanClosure : public BasicOopIterateClosure {
1519 private:
1520   MarkBitMap* _bitmap;
1521   ShenandoahObjToScanQueue* _queue;
1522   ShenandoahHeap* const _heap;
1523   ShenandoahMarkingContext* const _marking_context;
1524 
1525   template <class T>
1526   void do_oop_work(T* p) {
1527     T o = RawAccess<>::oop_load(p);
1528     if (!CompressedOops::is_null(o)) {
1529       oop obj = CompressedOops::decode_not_null(o);
1530       if (_heap->is_concurrent_weak_root_in_progress() && !_marking_context->is_marked(obj)) {
1531         // There may be dead oops in weak roots in concurrent root phase, do not touch them.
1532         return;
1533       }
1534       obj = ShenandoahBarrierSet::barrier_set()->load_reference_barrier(obj);
1535 
1536       assert(oopDesc::is_oop(obj), "Must be a valid oop");
1537       if (_bitmap->par_mark(obj)) {
1538         _queue->push(ShenandoahMarkTask(obj));
1539       }
1540     }
1541   }
1542 public:
1543   ShenandoahObjectIterateParScanClosure(MarkBitMap* bitmap, ShenandoahObjToScanQueue* q) :
1544     _bitmap(bitmap), _queue(q), _heap(ShenandoahHeap::heap()),
1545     _marking_context(_heap->marking_context()) {}
1546   void do_oop(oop* p)       { do_oop_work(p); }
1547   void do_oop(narrowOop* p) { do_oop_work(p); }
1548 };
1549 
1550 // Object iterator for parallel heap iteraion.
1551 // The root scanning phase happenes in construction as a preparation of
1552 // parallel marking queues.
1553 // Every worker processes it's own marking queue. work-stealing is used
1554 // to balance workload.
1555 class ShenandoahParallelObjectIterator : public ParallelObjectIteratorImpl {
1556 private:
1557   uint                         _num_workers;
1558   bool                         _init_ready;
1559   MarkBitMap*                  _aux_bit_map;
1560   ShenandoahHeap*              _heap;
1561   ShenandoahScanObjectStack    _roots_stack; // global roots stack
1562   ShenandoahObjToScanQueueSet* _task_queues;
1563 public:
1564   ShenandoahParallelObjectIterator(uint num_workers, MarkBitMap* bitmap) :
1565         _num_workers(num_workers),
1566         _init_ready(false),
1567         _aux_bit_map(bitmap),
1568         _heap(ShenandoahHeap::heap()) {
1569     // Initialize bitmap
1570     _init_ready = _heap->prepare_aux_bitmap_for_iteration();
1571     if (!_init_ready) {
1572       return;
1573     }
1574 
1575     ObjectIterateScanRootClosure oops(_aux_bit_map, &_roots_stack);
1576     _heap->scan_roots_for_iteration(&_roots_stack, &oops);
1577 
1578     _init_ready = prepare_worker_queues();
1579   }
1580 
1581   ~ShenandoahParallelObjectIterator() {
1582     // Reclaim bitmap
1583     _heap->reclaim_aux_bitmap_for_iteration();
1584     // Reclaim queue for workers
1585     if (_task_queues!= nullptr) {
1586       for (uint i = 0; i < _num_workers; ++i) {
1587         ShenandoahObjToScanQueue* q = _task_queues->queue(i);
1588         if (q != nullptr) {
1589           delete q;
1590           _task_queues->register_queue(i, nullptr);
1591         }
1592       }
1593       delete _task_queues;
1594       _task_queues = nullptr;
1595     }
1596   }
1597 
1598   virtual void object_iterate(ObjectClosure* cl, uint worker_id) {
1599     if (_init_ready) {
1600       object_iterate_parallel(cl, worker_id, _task_queues);
1601     }
1602   }
1603 
1604 private:
1605   // Divide global root_stack into worker queues
1606   bool prepare_worker_queues() {
1607     _task_queues = new ShenandoahObjToScanQueueSet((int) _num_workers);
1608     // Initialize queues for every workers
1609     for (uint i = 0; i < _num_workers; ++i) {
1610       ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue();
1611       _task_queues->register_queue(i, task_queue);
1612     }
1613     // Divide roots among the workers. Assume that object referencing distribution
1614     // is related with root kind, use round-robin to make every worker have same chance
1615     // to process every kind of roots
1616     size_t roots_num = _roots_stack.size();
1617     if (roots_num == 0) {
1618       // No work to do
1619       return false;
1620     }
1621 
1622     for (uint j = 0; j < roots_num; j++) {
1623       uint stack_id = j % _num_workers;
1624       oop obj = _roots_stack.pop();
1625       _task_queues->queue(stack_id)->push(ShenandoahMarkTask(obj));
1626     }
1627     return true;
1628   }
1629 
1630   void object_iterate_parallel(ObjectClosure* cl,
1631                                uint worker_id,
1632                                ShenandoahObjToScanQueueSet* queue_set) {
1633     assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1634     assert(queue_set != nullptr, "task queue must not be null");
1635 
1636     ShenandoahObjToScanQueue* q = queue_set->queue(worker_id);
1637     assert(q != nullptr, "object iterate queue must not be null");
1638 
1639     ShenandoahMarkTask t;
1640     ShenandoahObjectIterateParScanClosure oops(_aux_bit_map, q);
1641 
1642     // Work through the queue to traverse heap.
1643     // Steal when there is no task in queue.
1644     while (q->pop(t) || queue_set->steal(worker_id, t)) {
1645       oop obj = t.obj();
1646       assert(oopDesc::is_oop(obj), "must be a valid oop");
1647       cl->do_object(obj);
1648       obj->oop_iterate(&oops);
1649     }
1650     assert(q->is_empty(), "should be empty");
1651   }
1652 };
1653 
1654 ParallelObjectIteratorImpl* ShenandoahHeap::parallel_object_iterator(uint workers) {
1655   return new ShenandoahParallelObjectIterator(workers, &_aux_bit_map);
1656 }
1657 
1658 // Keep alive an object that was loaded with AS_NO_KEEPALIVE.
1659 void ShenandoahHeap::keep_alive(oop obj) {
1660   if (is_concurrent_mark_in_progress() && (obj != nullptr)) {
1661     ShenandoahBarrierSet::barrier_set()->enqueue(obj);
1662   }
1663 }
1664 
1665 void ShenandoahHeap::heap_region_iterate(ShenandoahHeapRegionClosure* blk) const {
1666   for (size_t i = 0; i < num_regions(); i++) {
1667     ShenandoahHeapRegion* current = get_region(i);
1668     blk->heap_region_do(current);
1669   }
1670 }
1671 
1672 class ShenandoahParallelHeapRegionTask : public WorkerTask {
1673 private:
1674   ShenandoahHeap* const _heap;
1675   ShenandoahHeapRegionClosure* const _blk;
1676   size_t const _stride;
1677 
1678   shenandoah_padding(0);
1679   volatile size_t _index;
1680   shenandoah_padding(1);
1681 
1682 public:
1683   ShenandoahParallelHeapRegionTask(ShenandoahHeapRegionClosure* blk, size_t stride) :
1684           WorkerTask("Shenandoah Parallel Region Operation"),
1685           _heap(ShenandoahHeap::heap()), _blk(blk), _stride(stride), _index(0) {}
1686 
1687   void work(uint worker_id) {
1688     ShenandoahParallelWorkerSession worker_session(worker_id);
1689     size_t stride = _stride;
1690 
1691     size_t max = _heap->num_regions();
1692     while (Atomic::load(&_index) < max) {
1693       size_t cur = Atomic::fetch_then_add(&_index, stride, memory_order_relaxed);
1694       size_t start = cur;
1695       size_t end = MIN2(cur + stride, max);
1696       if (start >= max) break;
1697 
1698       for (size_t i = cur; i < end; i++) {
1699         ShenandoahHeapRegion* current = _heap->get_region(i);
1700         _blk->heap_region_do(current);
1701       }
1702     }
1703   }
1704 };
1705 
1706 void ShenandoahHeap::parallel_heap_region_iterate(ShenandoahHeapRegionClosure* blk) const {
1707   assert(blk->is_thread_safe(), "Only thread-safe closures here");
1708   const uint active_workers = workers()->active_workers();
1709   const size_t n_regions = num_regions();
1710   size_t stride = ShenandoahParallelRegionStride;
1711   if (stride == 0 && active_workers > 1) {
1712     // Automatically derive the stride to balance the work between threads
1713     // evenly. Do not try to split work if below the reasonable threshold.
1714     constexpr size_t threshold = 4096;
1715     stride = n_regions <= threshold ?
1716             threshold :
1717             (n_regions + active_workers - 1) / active_workers;
1718   }
1719 
1720   if (n_regions > stride && active_workers > 1) {
1721     ShenandoahParallelHeapRegionTask task(blk, stride);
1722     workers()->run_task(&task);
1723   } else {
1724     heap_region_iterate(blk);
1725   }
1726 }
1727 
1728 class ShenandoahRendezvousClosure : public HandshakeClosure {
1729 public:
1730   inline ShenandoahRendezvousClosure(const char* name) : HandshakeClosure(name) {}
1731   inline void do_thread(Thread* thread) {}
1732 };
1733 
1734 void ShenandoahHeap::rendezvous_threads(const char* name) {
1735   ShenandoahRendezvousClosure cl(name);
1736   Handshake::execute(&cl);
1737 }
1738 
1739 void ShenandoahHeap::recycle_trash() {
1740   free_set()->recycle_trash();
1741 }
1742 
1743 class ShenandoahResetUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
1744 private:
1745   ShenandoahMarkingContext* const _ctx;
1746 public:
1747   ShenandoahResetUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
1748 
1749   void heap_region_do(ShenandoahHeapRegion* r) {
1750     if (r->is_active()) {
1751       // Reset live data and set TAMS optimistically. We would recheck these under the pause
1752       // anyway to capture any updates that happened since now.
1753       r->clear_live_data();
1754       _ctx->capture_top_at_mark_start(r);
1755     }
1756   }
1757 
1758   bool is_thread_safe() { return true; }
1759 };
1760 
1761 void ShenandoahHeap::prepare_gc() {
1762   reset_mark_bitmap();
1763 
1764   ShenandoahResetUpdateRegionStateClosure cl;
1765   parallel_heap_region_iterate(&cl);
1766 }
1767 
1768 class ShenandoahFinalMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
1769 private:
1770   ShenandoahMarkingContext* const _ctx;
1771   ShenandoahHeapLock* const _lock;
1772 
1773 public:
1774   ShenandoahFinalMarkUpdateRegionStateClosure() :
1775     _ctx(ShenandoahHeap::heap()->complete_marking_context()), _lock(ShenandoahHeap::heap()->lock()) {}
1776 
1777   void heap_region_do(ShenandoahHeapRegion* r) {
1778     if (r->is_active()) {
1779       // All allocations past TAMS are implicitly live, adjust the region data.
1780       // Bitmaps/TAMS are swapped at this point, so we need to poll complete bitmap.
1781       HeapWord *tams = _ctx->top_at_mark_start(r);
1782       HeapWord *top = r->top();
1783       if (top > tams) {
1784         r->increase_live_data_alloc_words(pointer_delta(top, tams));
1785       }
1786 
1787       // We are about to select the collection set, make sure it knows about
1788       // current pinning status. Also, this allows trashing more regions that
1789       // now have their pinning status dropped.
1790       if (r->is_pinned()) {
1791         if (r->pin_count() == 0) {
1792           ShenandoahHeapLocker locker(_lock);
1793           r->make_unpinned();
1794         }
1795       } else {
1796         if (r->pin_count() > 0) {
1797           ShenandoahHeapLocker locker(_lock);
1798           r->make_pinned();
1799         }
1800       }
1801 
1802       // Remember limit for updating refs. It's guaranteed that we get no
1803       // from-space-refs written from here on.
1804       r->set_update_watermark_at_safepoint(r->top());
1805     } else {
1806       assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index());
1807       assert(_ctx->top_at_mark_start(r) == r->top(),
1808              "Region " SIZE_FORMAT " should have correct TAMS", r->index());
1809     }
1810   }
1811 
1812   bool is_thread_safe() { return true; }
1813 };
1814 
1815 void ShenandoahHeap::prepare_regions_and_collection_set(bool concurrent) {
1816   assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
1817   {
1818     ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::final_update_region_states :
1819                                          ShenandoahPhaseTimings::degen_gc_final_update_region_states);
1820     ShenandoahFinalMarkUpdateRegionStateClosure cl;
1821     parallel_heap_region_iterate(&cl);
1822 
1823     assert_pinned_region_status();
1824   }
1825 
1826   {
1827     ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::choose_cset :
1828                                          ShenandoahPhaseTimings::degen_gc_choose_cset);
1829     ShenandoahHeapLocker locker(lock());
1830     _collection_set->clear();
1831     heuristics()->choose_collection_set(_collection_set);
1832   }
1833 
1834   {
1835     ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::final_rebuild_freeset :
1836                                          ShenandoahPhaseTimings::degen_gc_final_rebuild_freeset);
1837     ShenandoahHeapLocker locker(lock());
1838     _free_set->rebuild();
1839   }
1840 }
1841 
1842 void ShenandoahHeap::do_class_unloading() {
1843   _unloader.unload();
1844 }
1845 
1846 void ShenandoahHeap::stw_weak_refs(bool full_gc) {
1847   // Weak refs processing
1848   ShenandoahPhaseTimings::Phase phase = full_gc ? ShenandoahPhaseTimings::full_gc_weakrefs
1849                                                 : ShenandoahPhaseTimings::degen_gc_weakrefs;
1850   ShenandoahTimingsTracker t(phase);
1851   ShenandoahGCWorkerPhase worker_phase(phase);
1852   ref_processor()->process_references(phase, workers(), false /* concurrent */);
1853 }
1854 
1855 void ShenandoahHeap::prepare_update_heap_references(bool concurrent) {
1856   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
1857 
1858   // Evacuation is over, no GCLABs are needed anymore. GCLABs are under URWM, so we need to
1859   // make them parsable for update code to work correctly. Plus, we can compute new sizes
1860   // for future GCLABs here.
1861   if (UseTLAB) {
1862     ShenandoahGCPhase phase(concurrent ?
1863                             ShenandoahPhaseTimings::init_update_refs_manage_gclabs :
1864                             ShenandoahPhaseTimings::degen_gc_init_update_refs_manage_gclabs);
1865     gclabs_retire(ResizeTLAB);
1866   }
1867 
1868   _update_refs_iterator.reset();
1869 }
1870 
1871 void ShenandoahHeap::propagate_gc_state_to_java_threads() {
1872   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at Shenandoah safepoint");
1873   if (_gc_state_changed) {
1874     _gc_state_changed = false;
1875     char state = gc_state();
1876     for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1877       ShenandoahThreadLocalData::set_gc_state(t, state);
1878     }
1879   }
1880 }
1881 
1882 void ShenandoahHeap::set_gc_state(uint mask, bool value) {
1883   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at Shenandoah safepoint");
1884   _gc_state.set_cond(mask, value);
1885   _gc_state_changed = true;
1886 }
1887 
1888 void ShenandoahHeap::set_concurrent_mark_in_progress(bool in_progress) {
1889   assert(!has_forwarded_objects(), "Not expected before/after mark phase");
1890   set_gc_state(MARKING, in_progress);
1891   ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
1892 }
1893 
1894 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
1895   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only call this at safepoint");
1896   set_gc_state(EVACUATION, in_progress);
1897 }
1898 
1899 void ShenandoahHeap::set_concurrent_strong_root_in_progress(bool in_progress) {
1900   if (in_progress) {
1901     _concurrent_strong_root_in_progress.set();
1902   } else {
1903     _concurrent_strong_root_in_progress.unset();
1904   }
1905 }
1906 
1907 void ShenandoahHeap::set_concurrent_weak_root_in_progress(bool cond) {
1908   set_gc_state(WEAK_ROOTS, cond);
1909 }
1910 
1911 GCTracer* ShenandoahHeap::tracer() {
1912   return shenandoah_policy()->tracer();
1913 }
1914 
1915 size_t ShenandoahHeap::tlab_used(Thread* thread) const {
1916   return _free_set->used();
1917 }
1918 
1919 bool ShenandoahHeap::try_cancel_gc() {
1920   jbyte prev = _cancelled_gc.cmpxchg(CANCELLED, CANCELLABLE);
1921   return prev == CANCELLABLE;
1922 }
1923 
1924 void ShenandoahHeap::cancel_gc(GCCause::Cause cause) {
1925   if (try_cancel_gc()) {
1926     FormatBuffer<> msg("Cancelling GC: %s", GCCause::to_string(cause));
1927     log_info(gc)("%s", msg.buffer());
1928     Events::log(Thread::current(), "%s", msg.buffer());
1929   }
1930 }
1931 
1932 uint ShenandoahHeap::max_workers() {
1933   return _max_workers;
1934 }
1935 
1936 void ShenandoahHeap::stop() {
1937   // The shutdown sequence should be able to terminate when GC is running.
1938 
1939   // Step 0. Notify policy to disable event recording and prevent visiting gc threads during shutdown
1940   _shenandoah_policy->record_shutdown();
1941 
1942   // Step 1. Notify control thread that we are in shutdown.
1943   // Note that we cannot do that with stop(), because stop() is blocking and waits for the actual shutdown.
1944   // Doing stop() here would wait for the normal GC cycle to complete, never falling through to cancel below.
1945   control_thread()->prepare_for_graceful_shutdown();
1946 
1947   // Step 2. Notify GC workers that we are cancelling GC.
1948   cancel_gc(GCCause::_shenandoah_stop_vm);
1949 
1950   // Step 3. Wait until GC worker exits normally.
1951   control_thread()->stop();
1952 }
1953 
1954 void ShenandoahHeap::stw_unload_classes(bool full_gc) {
1955   if (!unload_classes()) return;
1956   ClassUnloadingContext ctx(_workers->active_workers(),
1957                             true /* unregister_nmethods_during_purge */,
1958                             false /* lock_nmethod_free_separately */);
1959 
1960   // Unload classes and purge SystemDictionary.
1961   {
1962     ShenandoahPhaseTimings::Phase phase = full_gc ?
1963                                           ShenandoahPhaseTimings::full_gc_purge_class_unload :
1964                                           ShenandoahPhaseTimings::degen_gc_purge_class_unload;
1965     ShenandoahIsAliveSelector is_alive;
1966     {
1967       CodeCache::UnlinkingScope scope(is_alive.is_alive_closure());
1968       ShenandoahGCPhase gc_phase(phase);
1969       ShenandoahGCWorkerPhase worker_phase(phase);
1970       bool unloading_occurred = SystemDictionary::do_unloading(gc_timer());
1971 
1972       uint num_workers = _workers->active_workers();
1973       ShenandoahClassUnloadingTask unlink_task(phase, num_workers, unloading_occurred);
1974       _workers->run_task(&unlink_task);
1975     }
1976     // Release unloaded nmethods's memory.
1977     ClassUnloadingContext::context()->purge_and_free_nmethods();
1978   }
1979 
1980   {
1981     ShenandoahGCPhase phase(full_gc ?
1982                             ShenandoahPhaseTimings::full_gc_purge_cldg :
1983                             ShenandoahPhaseTimings::degen_gc_purge_cldg);
1984     ClassLoaderDataGraph::purge(true /* at_safepoint */);
1985   }
1986   // Resize and verify metaspace
1987   MetaspaceGC::compute_new_size();
1988   DEBUG_ONLY(MetaspaceUtils::verify();)
1989 }
1990 
1991 // Weak roots are either pre-evacuated (final mark) or updated (final updaterefs),
1992 // so they should not have forwarded oops.
1993 // However, we do need to "null" dead oops in the roots, if can not be done
1994 // in concurrent cycles.
1995 void ShenandoahHeap::stw_process_weak_roots(bool full_gc) {
1996   uint num_workers = _workers->active_workers();
1997   ShenandoahPhaseTimings::Phase timing_phase = full_gc ?
1998                                                ShenandoahPhaseTimings::full_gc_purge_weak_par :
1999                                                ShenandoahPhaseTimings::degen_gc_purge_weak_par;
2000   ShenandoahGCPhase phase(timing_phase);
2001   ShenandoahGCWorkerPhase worker_phase(timing_phase);
2002   // Cleanup weak roots
2003   if (has_forwarded_objects()) {
2004     ShenandoahForwardedIsAliveClosure is_alive;
2005     ShenandoahUpdateRefsClosure keep_alive;
2006     ShenandoahParallelWeakRootsCleaningTask<ShenandoahForwardedIsAliveClosure, ShenandoahUpdateRefsClosure>
2007       cleaning_task(timing_phase, &is_alive, &keep_alive, num_workers);
2008     _workers->run_task(&cleaning_task);
2009   } else {
2010     ShenandoahIsAliveClosure is_alive;
2011 #ifdef ASSERT
2012     ShenandoahAssertNotForwardedClosure verify_cl;
2013     ShenandoahParallelWeakRootsCleaningTask<ShenandoahIsAliveClosure, ShenandoahAssertNotForwardedClosure>
2014       cleaning_task(timing_phase, &is_alive, &verify_cl, num_workers);
2015 #else
2016     ShenandoahParallelWeakRootsCleaningTask<ShenandoahIsAliveClosure, DoNothingClosure>
2017       cleaning_task(timing_phase, &is_alive, &do_nothing_cl, num_workers);
2018 #endif
2019     _workers->run_task(&cleaning_task);
2020   }
2021 }
2022 
2023 void ShenandoahHeap::parallel_cleaning(bool full_gc) {
2024   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2025   assert(is_stw_gc_in_progress(), "Only for Degenerated and Full GC");
2026   ShenandoahGCPhase phase(full_gc ?
2027                           ShenandoahPhaseTimings::full_gc_purge :
2028                           ShenandoahPhaseTimings::degen_gc_purge);
2029   stw_weak_refs(full_gc);
2030   stw_process_weak_roots(full_gc);
2031   stw_unload_classes(full_gc);
2032 }
2033 
2034 void ShenandoahHeap::set_has_forwarded_objects(bool cond) {
2035   set_gc_state(HAS_FORWARDED, cond);
2036 }
2037 
2038 void ShenandoahHeap::set_unload_classes(bool uc) {
2039   _unload_classes.set_cond(uc);
2040 }
2041 
2042 bool ShenandoahHeap::unload_classes() const {
2043   return _unload_classes.is_set();
2044 }
2045 
2046 address ShenandoahHeap::in_cset_fast_test_addr() {
2047   ShenandoahHeap* heap = ShenandoahHeap::heap();
2048   assert(heap->collection_set() != nullptr, "Sanity");
2049   return (address) heap->collection_set()->biased_map_address();
2050 }
2051 
2052 size_t ShenandoahHeap::bytes_allocated_since_gc_start() const {
2053   return Atomic::load(&_bytes_allocated_since_gc_start);
2054 }
2055 
2056 void ShenandoahHeap::reset_bytes_allocated_since_gc_start() {
2057   Atomic::store(&_bytes_allocated_since_gc_start, (size_t)0);
2058 }
2059 
2060 void ShenandoahHeap::set_degenerated_gc_in_progress(bool in_progress) {
2061   _degenerated_gc_in_progress.set_cond(in_progress);
2062 }
2063 
2064 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) {
2065   _full_gc_in_progress.set_cond(in_progress);
2066 }
2067 
2068 void ShenandoahHeap::set_full_gc_move_in_progress(bool in_progress) {
2069   assert (is_full_gc_in_progress(), "should be");
2070   _full_gc_move_in_progress.set_cond(in_progress);
2071 }
2072 
2073 void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) {
2074   set_gc_state(UPDATEREFS, in_progress);
2075 }
2076 
2077 void ShenandoahHeap::register_nmethod(nmethod* nm) {
2078   ShenandoahCodeRoots::register_nmethod(nm);
2079 }
2080 
2081 void ShenandoahHeap::unregister_nmethod(nmethod* nm) {
2082   ShenandoahCodeRoots::unregister_nmethod(nm);
2083 }
2084 
2085 void ShenandoahHeap::pin_object(JavaThread* thr, oop o) {
2086   heap_region_containing(o)->record_pin();
2087 }
2088 
2089 void ShenandoahHeap::unpin_object(JavaThread* thr, oop o) {
2090   ShenandoahHeapRegion* r = heap_region_containing(o);
2091   assert(r != nullptr, "Sanity");
2092   assert(r->pin_count() > 0, "Region " SIZE_FORMAT " should have non-zero pins", r->index());
2093   r->record_unpin();
2094 }
2095 
2096 void ShenandoahHeap::sync_pinned_region_status() {
2097   ShenandoahHeapLocker locker(lock());
2098 
2099   for (size_t i = 0; i < num_regions(); i++) {
2100     ShenandoahHeapRegion *r = get_region(i);
2101     if (r->is_active()) {
2102       if (r->is_pinned()) {
2103         if (r->pin_count() == 0) {
2104           r->make_unpinned();
2105         }
2106       } else {
2107         if (r->pin_count() > 0) {
2108           r->make_pinned();
2109         }
2110       }
2111     }
2112   }
2113 
2114   assert_pinned_region_status();
2115 }
2116 
2117 #ifdef ASSERT
2118 void ShenandoahHeap::assert_pinned_region_status() {
2119   for (size_t i = 0; i < num_regions(); i++) {
2120     ShenandoahHeapRegion* r = get_region(i);
2121     assert((r->is_pinned() && r->pin_count() > 0) || (!r->is_pinned() && r->pin_count() == 0),
2122            "Region " SIZE_FORMAT " pinning status is inconsistent", i);
2123   }
2124 }
2125 #endif
2126 
2127 ConcurrentGCTimer* ShenandoahHeap::gc_timer() const {
2128   return _gc_timer;
2129 }
2130 
2131 void ShenandoahHeap::prepare_concurrent_roots() {
2132   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2133   assert(!is_stw_gc_in_progress(), "Only concurrent GC");
2134   set_concurrent_strong_root_in_progress(!collection_set()->is_empty());
2135   set_concurrent_weak_root_in_progress(true);
2136   if (unload_classes()) {
2137     _unloader.prepare();
2138   }
2139 }
2140 
2141 void ShenandoahHeap::finish_concurrent_roots() {
2142   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2143   assert(!is_stw_gc_in_progress(), "Only concurrent GC");
2144   if (unload_classes()) {
2145     _unloader.finish();
2146   }
2147 }
2148 
2149 #ifdef ASSERT
2150 void ShenandoahHeap::assert_gc_workers(uint nworkers) {
2151   assert(nworkers > 0 && nworkers <= max_workers(), "Sanity");
2152 
2153   if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
2154     // Use ParallelGCThreads inside safepoints
2155     assert(nworkers == ParallelGCThreads, "Use ParallelGCThreads (%u) within safepoint, not %u",
2156            ParallelGCThreads, nworkers);
2157   } else {
2158     // Use ConcGCThreads outside safepoints
2159     assert(nworkers == ConcGCThreads, "Use ConcGCThreads (%u) outside safepoints, %u",
2160            ConcGCThreads, nworkers);
2161   }
2162 }
2163 #endif
2164 
2165 ShenandoahVerifier* ShenandoahHeap::verifier() {
2166   guarantee(ShenandoahVerify, "Should be enabled");
2167   assert (_verifier != nullptr, "sanity");
2168   return _verifier;
2169 }
2170 
2171 template<bool CONCURRENT>
2172 class ShenandoahUpdateHeapRefsTask : public WorkerTask {
2173 private:
2174   ShenandoahHeap* _heap;
2175   ShenandoahRegionIterator* _regions;
2176 public:
2177   ShenandoahUpdateHeapRefsTask(ShenandoahRegionIterator* regions) :
2178     WorkerTask("Shenandoah Update References"),
2179     _heap(ShenandoahHeap::heap()),
2180     _regions(regions) {
2181   }
2182 
2183   void work(uint worker_id) {
2184     if (CONCURRENT) {
2185       ShenandoahConcurrentWorkerSession worker_session(worker_id);
2186       ShenandoahSuspendibleThreadSetJoiner stsj;
2187       do_work<ShenandoahConcUpdateRefsClosure>(worker_id);
2188     } else {
2189       ShenandoahParallelWorkerSession worker_session(worker_id);
2190       do_work<ShenandoahSTWUpdateRefsClosure>(worker_id);
2191     }
2192   }
2193 
2194 private:
2195   template<class T>
2196   void do_work(uint worker_id) {
2197     T cl;
2198     if (CONCURRENT && (worker_id == 0)) {
2199       // We ask the first worker to replenish the Mutator free set by moving regions previously reserved to hold the
2200       // results of evacuation.  These reserves are no longer necessary because evacuation has completed.
2201       size_t cset_regions = _heap->collection_set()->count();
2202       // We cannot transfer any more regions than will be reclaimed when the existing collection set is recycled because
2203       // we need the reclaimed collection set regions to replenish the collector reserves
2204       _heap->free_set()->move_regions_from_collector_to_mutator(cset_regions);
2205     }
2206     // If !CONCURRENT, there's no value in expanding Mutator free set
2207 
2208     ShenandoahHeapRegion* r = _regions->next();
2209     ShenandoahMarkingContext* const ctx = _heap->complete_marking_context();
2210     while (r != nullptr) {
2211       HeapWord* update_watermark = r->get_update_watermark();
2212       assert (update_watermark >= r->bottom(), "sanity");
2213       if (r->is_active() && !r->is_cset()) {
2214         _heap->marked_object_oop_iterate(r, &cl, update_watermark);
2215       }
2216       if (ShenandoahPacing) {
2217         _heap->pacer()->report_updaterefs(pointer_delta(update_watermark, r->bottom()));
2218       }
2219       if (_heap->check_cancelled_gc_and_yield(CONCURRENT)) {
2220         return;
2221       }
2222       r = _regions->next();
2223     }
2224   }
2225 };
2226 
2227 void ShenandoahHeap::update_heap_references(bool concurrent) {
2228   assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
2229 
2230   if (concurrent) {
2231     ShenandoahUpdateHeapRefsTask<true> task(&_update_refs_iterator);
2232     workers()->run_task(&task);
2233   } else {
2234     ShenandoahUpdateHeapRefsTask<false> task(&_update_refs_iterator);
2235     workers()->run_task(&task);
2236   }
2237 }
2238 
2239 
2240 class ShenandoahFinalUpdateRefsUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
2241 private:
2242   ShenandoahHeapLock* const _lock;
2243 
2244 public:
2245   ShenandoahFinalUpdateRefsUpdateRegionStateClosure() : _lock(ShenandoahHeap::heap()->lock()) {}
2246 
2247   void heap_region_do(ShenandoahHeapRegion* r) {
2248     // Drop unnecessary "pinned" state from regions that does not have CP marks
2249     // anymore, as this would allow trashing them.
2250 
2251     if (r->is_active()) {
2252       if (r->is_pinned()) {
2253         if (r->pin_count() == 0) {
2254           ShenandoahHeapLocker locker(_lock);
2255           r->make_unpinned();
2256         }
2257       } else {
2258         if (r->pin_count() > 0) {
2259           ShenandoahHeapLocker locker(_lock);
2260           r->make_pinned();
2261         }
2262       }
2263     }
2264   }
2265 
2266   bool is_thread_safe() { return true; }
2267 };
2268 
2269 void ShenandoahHeap::update_heap_region_states(bool concurrent) {
2270   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2271   assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
2272 
2273   {
2274     ShenandoahGCPhase phase(concurrent ?
2275                             ShenandoahPhaseTimings::final_update_refs_update_region_states :
2276                             ShenandoahPhaseTimings::degen_gc_final_update_refs_update_region_states);
2277     ShenandoahFinalUpdateRefsUpdateRegionStateClosure cl;
2278     parallel_heap_region_iterate(&cl);
2279 
2280     assert_pinned_region_status();
2281   }
2282 
2283   {
2284     ShenandoahGCPhase phase(concurrent ?
2285                             ShenandoahPhaseTimings::final_update_refs_trash_cset :
2286                             ShenandoahPhaseTimings::degen_gc_final_update_refs_trash_cset);
2287     trash_cset_regions();
2288   }
2289 }
2290 
2291 void ShenandoahHeap::rebuild_free_set(bool concurrent) {
2292   {
2293     ShenandoahGCPhase phase(concurrent ?
2294                             ShenandoahPhaseTimings::final_update_refs_rebuild_freeset :
2295                             ShenandoahPhaseTimings::degen_gc_final_update_refs_rebuild_freeset);
2296     ShenandoahHeapLocker locker(lock());
2297     _free_set->rebuild();
2298   }
2299 }
2300 
2301 void ShenandoahHeap::print_extended_on(outputStream *st) const {
2302   print_on(st);
2303   st->cr();
2304   print_heap_regions_on(st);
2305 }
2306 
2307 bool ShenandoahHeap::is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self) {
2308   size_t slice = r->index() / _bitmap_regions_per_slice;
2309 
2310   size_t regions_from = _bitmap_regions_per_slice * slice;
2311   size_t regions_to   = MIN2(num_regions(), _bitmap_regions_per_slice * (slice + 1));
2312   for (size_t g = regions_from; g < regions_to; g++) {
2313     assert (g / _bitmap_regions_per_slice == slice, "same slice");
2314     if (skip_self && g == r->index()) continue;
2315     if (get_region(g)->is_committed()) {
2316       return true;
2317     }
2318   }
2319   return false;
2320 }
2321 
2322 bool ShenandoahHeap::commit_bitmap_slice(ShenandoahHeapRegion* r) {
2323   shenandoah_assert_heaplocked();
2324 
2325   // Bitmaps in special regions do not need commits
2326   if (_bitmap_region_special) {
2327     return true;
2328   }
2329 
2330   if (is_bitmap_slice_committed(r, true)) {
2331     // Some other region from the group is already committed, meaning the bitmap
2332     // slice is already committed, we exit right away.
2333     return true;
2334   }
2335 
2336   // Commit the bitmap slice:
2337   size_t slice = r->index() / _bitmap_regions_per_slice;
2338   size_t off = _bitmap_bytes_per_slice * slice;
2339   size_t len = _bitmap_bytes_per_slice;
2340   char* start = (char*) _bitmap_region.start() + off;
2341 
2342   if (!os::commit_memory(start, len, false)) {
2343     return false;
2344   }
2345 
2346   if (AlwaysPreTouch) {
2347     os::pretouch_memory(start, start + len, _pretouch_bitmap_page_size);
2348   }
2349 
2350   return true;
2351 }
2352 
2353 bool ShenandoahHeap::uncommit_bitmap_slice(ShenandoahHeapRegion *r) {
2354   shenandoah_assert_heaplocked();
2355 
2356   // Bitmaps in special regions do not need uncommits
2357   if (_bitmap_region_special) {
2358     return true;
2359   }
2360 
2361   if (is_bitmap_slice_committed(r, true)) {
2362     // Some other region from the group is still committed, meaning the bitmap
2363     // slice is should stay committed, exit right away.
2364     return true;
2365   }
2366 
2367   // Uncommit the bitmap slice:
2368   size_t slice = r->index() / _bitmap_regions_per_slice;
2369   size_t off = _bitmap_bytes_per_slice * slice;
2370   size_t len = _bitmap_bytes_per_slice;
2371   if (!os::uncommit_memory((char*)_bitmap_region.start() + off, len)) {
2372     return false;
2373   }
2374   return true;
2375 }
2376 
2377 void ShenandoahHeap::safepoint_synchronize_begin() {
2378   StackWatermarkSet::safepoint_synchronize_begin();
2379   SuspendibleThreadSet::synchronize();
2380 }
2381 
2382 void ShenandoahHeap::safepoint_synchronize_end() {
2383   SuspendibleThreadSet::desynchronize();
2384 }
2385 
2386 void ShenandoahHeap::try_inject_alloc_failure() {
2387   if (ShenandoahAllocFailureALot && !cancelled_gc() && ((os::random() % 1000) > 950)) {
2388     _inject_alloc_failure.set();
2389     os::naked_short_sleep(1);
2390     if (cancelled_gc()) {
2391       log_info(gc)("Allocation failure was successfully injected");
2392     }
2393   }
2394 }
2395 
2396 bool ShenandoahHeap::should_inject_alloc_failure() {
2397   return _inject_alloc_failure.is_set() && _inject_alloc_failure.try_unset();
2398 }
2399 
2400 void ShenandoahHeap::initialize_serviceability() {
2401   _memory_pool = new ShenandoahMemoryPool(this);
2402   _cycle_memory_manager.add_pool(_memory_pool);
2403   _stw_memory_manager.add_pool(_memory_pool);
2404 }
2405 
2406 GrowableArray<GCMemoryManager*> ShenandoahHeap::memory_managers() {
2407   GrowableArray<GCMemoryManager*> memory_managers(2);
2408   memory_managers.append(&_cycle_memory_manager);
2409   memory_managers.append(&_stw_memory_manager);
2410   return memory_managers;
2411 }
2412 
2413 GrowableArray<MemoryPool*> ShenandoahHeap::memory_pools() {
2414   GrowableArray<MemoryPool*> memory_pools(1);
2415   memory_pools.append(_memory_pool);
2416   return memory_pools;
2417 }
2418 
2419 MemoryUsage ShenandoahHeap::memory_usage() {
2420   return _memory_pool->get_memory_usage();
2421 }
2422 
2423 ShenandoahRegionIterator::ShenandoahRegionIterator() :
2424   _heap(ShenandoahHeap::heap()),
2425   _index(0) {}
2426 
2427 ShenandoahRegionIterator::ShenandoahRegionIterator(ShenandoahHeap* heap) :
2428   _heap(heap),
2429   _index(0) {}
2430 
2431 void ShenandoahRegionIterator::reset() {
2432   _index = 0;
2433 }
2434 
2435 bool ShenandoahRegionIterator::has_next() const {
2436   return _index < _heap->num_regions();
2437 }
2438 
2439 char ShenandoahHeap::gc_state() const {
2440   return _gc_state.raw_value();
2441 }
2442 
2443 ShenandoahLiveData* ShenandoahHeap::get_liveness_cache(uint worker_id) {
2444 #ifdef ASSERT
2445   assert(_liveness_cache != nullptr, "sanity");
2446   assert(worker_id < _max_workers, "sanity");
2447   for (uint i = 0; i < num_regions(); i++) {
2448     assert(_liveness_cache[worker_id][i] == 0, "liveness cache should be empty");
2449   }
2450 #endif
2451   return _liveness_cache[worker_id];
2452 }
2453 
2454 void ShenandoahHeap::flush_liveness_cache(uint worker_id) {
2455   assert(worker_id < _max_workers, "sanity");
2456   assert(_liveness_cache != nullptr, "sanity");
2457   ShenandoahLiveData* ld = _liveness_cache[worker_id];
2458   for (uint i = 0; i < num_regions(); i++) {
2459     ShenandoahLiveData live = ld[i];
2460     if (live > 0) {
2461       ShenandoahHeapRegion* r = get_region(i);
2462       r->increase_live_data_gc_words(live);
2463       ld[i] = 0;
2464     }
2465   }
2466 }
2467 
2468 bool ShenandoahHeap::requires_barriers(stackChunkOop obj) const {
2469   if (is_idle()) return false;
2470 
2471   // Objects allocated after marking start are implicitly alive, don't need any barriers during
2472   // marking phase.
2473   if (is_concurrent_mark_in_progress() &&
2474      !marking_context()->allocated_after_mark_start(obj)) {
2475     return true;
2476   }
2477 
2478   // Can not guarantee obj is deeply good.
2479   if (has_forwarded_objects()) {
2480     return true;
2481   }
2482 
2483   return false;
2484 }