1 /*
   2  * Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2013, 2022, Red Hat, Inc. All rights reserved.
   4  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
   5  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   6  *
   7  * This code is free software; you can redistribute it and/or modify it
   8  * under the terms of the GNU General Public License version 2 only, as
   9  * published by the Free Software Foundation.
  10  *
  11  * This code is distributed in the hope that it will be useful, but WITHOUT
  12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  14  * version 2 for more details (a copy is included in the LICENSE file that
  15  * accompanied this code).
  16  *
  17  * You should have received a copy of the GNU General Public License version
  18  * 2 along with this work; if not, write to the Free Software Foundation,
  19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20  *
  21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22  * or visit www.oracle.com if you need additional information or have any
  23  * questions.
  24  *
  25  */
  26 
  27 #include "precompiled.hpp"
  28 #include "memory/allocation.hpp"
  29 #include "memory/universe.hpp"
  30 
  31 #include "gc/shared/classUnloadingContext.hpp"
  32 #include "gc/shared/gcArguments.hpp"
  33 #include "gc/shared/gcTimer.hpp"
  34 #include "gc/shared/gcTraceTime.inline.hpp"
  35 #include "gc/shared/locationPrinter.inline.hpp"
  36 #include "gc/shared/memAllocator.hpp"
  37 #include "gc/shared/plab.hpp"
  38 #include "gc/shared/tlab_globals.hpp"
  39 
  40 #include "gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp"
  41 #include "gc/shenandoah/heuristics/shenandoahYoungHeuristics.hpp"
  42 #include "gc/shenandoah/shenandoahAllocRequest.hpp"
  43 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  44 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
  45 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  46 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  47 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
  48 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  49 #include "gc/shenandoah/shenandoahControlThread.hpp"
  50 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  51 #include "gc/shenandoah/shenandoahGenerationalEvacuationTask.hpp"
  52 #include "gc/shenandoah/shenandoahGenerationalHeap.hpp"
  53 #include "gc/shenandoah/shenandoahGlobalGeneration.hpp"
  54 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  55 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  56 #include "gc/shenandoah/shenandoahHeapRegionClosures.hpp"
  57 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
  58 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  59 #include "gc/shenandoah/shenandoahInitLogger.hpp"
  60 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  61 #include "gc/shenandoah/shenandoahMemoryPool.hpp"
  62 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  63 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
  64 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  65 #include "gc/shenandoah/shenandoahPacer.inline.hpp"
  66 #include "gc/shenandoah/shenandoahPadding.hpp"
  67 #include "gc/shenandoah/shenandoahParallelCleaning.inline.hpp"
  68 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
  69 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
  70 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
  71 #include "gc/shenandoah/shenandoahSTWMark.hpp"
  72 #include "gc/shenandoah/shenandoahUtils.hpp"
  73 #include "gc/shenandoah/shenandoahVerifier.hpp"
  74 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
  75 #include "gc/shenandoah/shenandoahVMOperations.hpp"
  76 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
  77 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  78 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
  79 #include "gc/shenandoah/mode/shenandoahGenerationalMode.hpp"
  80 #include "gc/shenandoah/mode/shenandoahIUMode.hpp"
  81 #include "gc/shenandoah/mode/shenandoahPassiveMode.hpp"
  82 #include "gc/shenandoah/mode/shenandoahSATBMode.hpp"
  83 #include "utilities/globalDefinitions.hpp"
  84 
  85 #if INCLUDE_JFR
  86 #include "gc/shenandoah/shenandoahJfrSupport.hpp"
  87 #endif
  88 
  89 #include "classfile/systemDictionary.hpp"
  90 #include "code/codeCache.hpp"
  91 #include "memory/classLoaderMetaspace.hpp"
  92 #include "memory/metaspaceUtils.hpp"
  93 #include "oops/compressedOops.inline.hpp"
  94 #include "prims/jvmtiTagMap.hpp"
  95 #include "runtime/atomic.hpp"
  96 #include "runtime/globals.hpp"
  97 #include "runtime/interfaceSupport.inline.hpp"
  98 #include "runtime/java.hpp"
  99 #include "runtime/orderAccess.hpp"
 100 #include "runtime/safepointMechanism.hpp"
 101 #include "runtime/vmThread.hpp"
 102 #include "services/mallocTracker.hpp"
 103 #include "services/memTracker.hpp"
 104 #include "utilities/events.hpp"
 105 #include "utilities/powerOfTwo.hpp"
 106 
 107 class ShenandoahPretouchHeapTask : public WorkerTask {
 108 private:
 109   ShenandoahRegionIterator _regions;
 110   const size_t _page_size;
 111 public:
 112   ShenandoahPretouchHeapTask(size_t page_size) :
 113     WorkerTask("Shenandoah Pretouch Heap"),
 114     _page_size(page_size) {}
 115 
 116   virtual void work(uint worker_id) {
 117     ShenandoahHeapRegion* r = _regions.next();
 118     while (r != nullptr) {
 119       if (r->is_committed()) {
 120         os::pretouch_memory(r->bottom(), r->end(), _page_size);
 121       }
 122       r = _regions.next();
 123     }
 124   }
 125 };
 126 
 127 class ShenandoahPretouchBitmapTask : public WorkerTask {
 128 private:
 129   ShenandoahRegionIterator _regions;
 130   char* _bitmap_base;
 131   const size_t _bitmap_size;
 132   const size_t _page_size;
 133 public:
 134   ShenandoahPretouchBitmapTask(char* bitmap_base, size_t bitmap_size, size_t page_size) :
 135     WorkerTask("Shenandoah Pretouch Bitmap"),
 136     _bitmap_base(bitmap_base),
 137     _bitmap_size(bitmap_size),
 138     _page_size(page_size) {}
 139 
 140   virtual void work(uint worker_id) {
 141     ShenandoahHeapRegion* r = _regions.next();
 142     while (r != nullptr) {
 143       size_t start = r->index()       * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
 144       size_t end   = (r->index() + 1) * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
 145       assert (end <= _bitmap_size, "end is sane: " SIZE_FORMAT " < " SIZE_FORMAT, end, _bitmap_size);
 146 
 147       if (r->is_committed()) {
 148         os::pretouch_memory(_bitmap_base + start, _bitmap_base + end, _page_size);
 149       }
 150 
 151       r = _regions.next();
 152     }
 153   }
 154 };
 155 
 156 jint ShenandoahHeap::initialize() {
 157   //
 158   // Figure out heap sizing
 159   //
 160 
 161   size_t init_byte_size = InitialHeapSize;
 162   size_t min_byte_size  = MinHeapSize;
 163   size_t max_byte_size  = MaxHeapSize;
 164   size_t heap_alignment = HeapAlignment;
 165 
 166   size_t reg_size_bytes = ShenandoahHeapRegion::region_size_bytes();
 167 
 168   Universe::check_alignment(max_byte_size,  reg_size_bytes, "Shenandoah heap");
 169   Universe::check_alignment(init_byte_size, reg_size_bytes, "Shenandoah heap");
 170 
 171   _num_regions = ShenandoahHeapRegion::region_count();
 172   assert(_num_regions == (max_byte_size / reg_size_bytes),
 173          "Regions should cover entire heap exactly: " SIZE_FORMAT " != " SIZE_FORMAT "/" SIZE_FORMAT,
 174          _num_regions, max_byte_size, reg_size_bytes);
 175 
 176   size_t num_committed_regions = init_byte_size / reg_size_bytes;
 177   num_committed_regions = MIN2(num_committed_regions, _num_regions);
 178   assert(num_committed_regions <= _num_regions, "sanity");
 179   _initial_size = num_committed_regions * reg_size_bytes;
 180 
 181   size_t num_min_regions = min_byte_size / reg_size_bytes;
 182   num_min_regions = MIN2(num_min_regions, _num_regions);
 183   assert(num_min_regions <= _num_regions, "sanity");
 184   _minimum_size = num_min_regions * reg_size_bytes;
 185 
 186   // Default to max heap size.
 187   _soft_max_size = _num_regions * reg_size_bytes;
 188 
 189   _committed = _initial_size;
 190 
 191   size_t heap_page_size   = UseLargePages ? os::large_page_size() : os::vm_page_size();
 192   size_t bitmap_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
 193   size_t region_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
 194 
 195   //
 196   // Reserve and commit memory for heap
 197   //
 198 
 199   ReservedHeapSpace heap_rs = Universe::reserve_heap(max_byte_size, heap_alignment);
 200   initialize_reserved_region(heap_rs);
 201   _heap_region = MemRegion((HeapWord*)heap_rs.base(), heap_rs.size() / HeapWordSize);
 202   _heap_region_special = heap_rs.special();
 203 
 204   assert((((size_t) base()) & ShenandoahHeapRegion::region_size_bytes_mask()) == 0,
 205          "Misaligned heap: " PTR_FORMAT, p2i(base()));
 206   os::trace_page_sizes_for_requested_size("Heap",
 207                                           max_byte_size, heap_rs.page_size(), heap_alignment,
 208                                           heap_rs.base(), heap_rs.size());
 209 
 210 #if SHENANDOAH_OPTIMIZED_MARKTASK
 211   // The optimized ShenandoahMarkTask takes some bits away from the full object bits.
 212   // Fail if we ever attempt to address more than we can.
 213   if ((uintptr_t)heap_rs.end() >= ShenandoahMarkTask::max_addressable()) {
 214     FormatBuffer<512> buf("Shenandoah reserved [" PTR_FORMAT ", " PTR_FORMAT") for the heap, \n"
 215                           "but max object address is " PTR_FORMAT ". Try to reduce heap size, or try other \n"
 216                           "VM options that allocate heap at lower addresses (HeapBaseMinAddress, AllocateHeapAt, etc).",
 217                 p2i(heap_rs.base()), p2i(heap_rs.end()), ShenandoahMarkTask::max_addressable());
 218     vm_exit_during_initialization("Fatal Error", buf);
 219   }
 220 #endif
 221 
 222   ReservedSpace sh_rs = heap_rs.first_part(max_byte_size);
 223   if (!_heap_region_special) {
 224     os::commit_memory_or_exit(sh_rs.base(), _initial_size, heap_alignment, false,
 225                               "Cannot commit heap memory");
 226   }
 227 
 228   BarrierSet::set_barrier_set(new ShenandoahBarrierSet(this, _heap_region));
 229 
 230   // Now we know the number of regions and heap sizes, initialize the heuristics.
 231   initialize_heuristics();
 232 
 233   assert(_heap_region.byte_size() == heap_rs.size(), "Need to know reserved size for card table");
 234 
 235   //
 236   // Worker threads must be initialized after the barrier is configured
 237   //
 238   _workers = new ShenandoahWorkerThreads("Shenandoah GC Threads", _max_workers);
 239   if (_workers == nullptr) {
 240     vm_exit_during_initialization("Failed necessary allocation.");
 241   } else {
 242     _workers->initialize_workers();
 243   }
 244 
 245   if (ParallelGCThreads > 1) {
 246     _safepoint_workers = new ShenandoahWorkerThreads("Safepoint Cleanup Thread", ParallelGCThreads);
 247     _safepoint_workers->initialize_workers();
 248   }
 249 
 250   //
 251   // Reserve and commit memory for bitmap(s)
 252   //
 253 
 254   size_t bitmap_size_orig = ShenandoahMarkBitMap::compute_size(heap_rs.size());
 255   _bitmap_size = align_up(bitmap_size_orig, bitmap_page_size);
 256 
 257   size_t bitmap_bytes_per_region = reg_size_bytes / ShenandoahMarkBitMap::heap_map_factor();
 258 
 259   guarantee(bitmap_bytes_per_region != 0,
 260             "Bitmap bytes per region should not be zero");
 261   guarantee(is_power_of_2(bitmap_bytes_per_region),
 262             "Bitmap bytes per region should be power of two: " SIZE_FORMAT, bitmap_bytes_per_region);
 263 
 264   if (bitmap_page_size > bitmap_bytes_per_region) {
 265     _bitmap_regions_per_slice = bitmap_page_size / bitmap_bytes_per_region;
 266     _bitmap_bytes_per_slice = bitmap_page_size;
 267   } else {
 268     _bitmap_regions_per_slice = 1;
 269     _bitmap_bytes_per_slice = bitmap_bytes_per_region;
 270   }
 271 
 272   guarantee(_bitmap_regions_per_slice >= 1,
 273             "Should have at least one region per slice: " SIZE_FORMAT,
 274             _bitmap_regions_per_slice);
 275 
 276   guarantee(((_bitmap_bytes_per_slice) % bitmap_page_size) == 0,
 277             "Bitmap slices should be page-granular: bps = " SIZE_FORMAT ", page size = " SIZE_FORMAT,
 278             _bitmap_bytes_per_slice, bitmap_page_size);
 279 
 280   ReservedSpace bitmap(_bitmap_size, bitmap_page_size);
 281   os::trace_page_sizes_for_requested_size("Mark Bitmap",
 282                                           bitmap_size_orig, bitmap.page_size(), bitmap_page_size,
 283                                           bitmap.base(),
 284                                           bitmap.size());
 285   MemTracker::record_virtual_memory_type(bitmap.base(), mtGC);
 286   _bitmap_region = MemRegion((HeapWord*) bitmap.base(), bitmap.size() / HeapWordSize);
 287   _bitmap_region_special = bitmap.special();
 288 
 289   size_t bitmap_init_commit = _bitmap_bytes_per_slice *
 290     align_up(num_committed_regions, _bitmap_regions_per_slice) / _bitmap_regions_per_slice;
 291   bitmap_init_commit = MIN2(_bitmap_size, bitmap_init_commit);
 292   if (!_bitmap_region_special) {
 293     os::commit_memory_or_exit((char *) _bitmap_region.start(), bitmap_init_commit, bitmap_page_size, false,
 294                               "Cannot commit bitmap memory");
 295   }
 296 
 297   _marking_context = new ShenandoahMarkingContext(_heap_region, _bitmap_region, _num_regions);
 298 
 299   if (ShenandoahVerify) {
 300     ReservedSpace verify_bitmap(_bitmap_size, bitmap_page_size);
 301     os::trace_page_sizes_for_requested_size("Verify Bitmap",
 302                                             bitmap_size_orig, verify_bitmap.page_size(), bitmap_page_size,
 303                                             verify_bitmap.base(),
 304                                             verify_bitmap.size());
 305     if (!verify_bitmap.special()) {
 306       os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), bitmap_page_size, false,
 307                                 "Cannot commit verification bitmap memory");
 308     }
 309     MemTracker::record_virtual_memory_type(verify_bitmap.base(), mtGC);
 310     MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize);
 311     _verification_bit_map.initialize(_heap_region, verify_bitmap_region);
 312     _verifier = new ShenandoahVerifier(this, &_verification_bit_map);
 313   }
 314 
 315   // Reserve aux bitmap for use in object_iterate(). We don't commit it here.
 316   size_t aux_bitmap_page_size = bitmap_page_size;
 317 
 318   ReservedSpace aux_bitmap(_bitmap_size, aux_bitmap_page_size);
 319   os::trace_page_sizes_for_requested_size("Aux Bitmap",
 320                                           bitmap_size_orig, aux_bitmap.page_size(), aux_bitmap_page_size,
 321                                           aux_bitmap.base(), aux_bitmap.size());
 322   MemTracker::record_virtual_memory_type(aux_bitmap.base(), mtGC);
 323   _aux_bitmap_region = MemRegion((HeapWord*) aux_bitmap.base(), aux_bitmap.size() / HeapWordSize);
 324   _aux_bitmap_region_special = aux_bitmap.special();
 325   _aux_bit_map.initialize(_heap_region, _aux_bitmap_region);
 326 
 327   //
 328   // Create regions and region sets
 329   //
 330   size_t region_align = align_up(sizeof(ShenandoahHeapRegion), SHENANDOAH_CACHE_LINE_SIZE);
 331   size_t region_storage_size_orig = region_align * _num_regions;
 332   size_t region_storage_size = align_up(region_storage_size_orig,
 333                                         MAX2(region_page_size, os::vm_allocation_granularity()));
 334 
 335   ReservedSpace region_storage(region_storage_size, region_page_size);
 336   os::trace_page_sizes_for_requested_size("Region Storage",
 337                                           region_storage_size_orig, region_storage.page_size(), region_page_size,
 338                                           region_storage.base(), region_storage.size());
 339   MemTracker::record_virtual_memory_type(region_storage.base(), mtGC);
 340   if (!region_storage.special()) {
 341     os::commit_memory_or_exit(region_storage.base(), region_storage_size, region_page_size, false,
 342                               "Cannot commit region memory");
 343   }
 344 
 345   // Try to fit the collection set bitmap at lower addresses. This optimizes code generation for cset checks.
 346   // Go up until a sensible limit (subject to encoding constraints) and try to reserve the space there.
 347   // If not successful, bite a bullet and allocate at whatever address.
 348   {
 349     const size_t cset_align = MAX2<size_t>(os::vm_page_size(), os::vm_allocation_granularity());
 350     const size_t cset_size = align_up(((size_t) sh_rs.base() + sh_rs.size()) >> ShenandoahHeapRegion::region_size_bytes_shift(), cset_align);
 351     const size_t cset_page_size = os::vm_page_size();
 352 
 353     uintptr_t min = round_up_power_of_2(cset_align);
 354     uintptr_t max = (1u << 30u);
 355     ReservedSpace cset_rs;
 356 
 357     for (uintptr_t addr = min; addr <= max; addr <<= 1u) {
 358       char* req_addr = (char*)addr;
 359       assert(is_aligned(req_addr, cset_align), "Should be aligned");
 360       cset_rs = ReservedSpace(cset_size, cset_align, cset_page_size, req_addr);
 361       if (cset_rs.is_reserved()) {
 362         assert(cset_rs.base() == req_addr, "Allocated where requested: " PTR_FORMAT ", " PTR_FORMAT, p2i(cset_rs.base()), addr);
 363         _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
 364         break;
 365       }
 366     }
 367 
 368     if (_collection_set == nullptr) {
 369       cset_rs = ReservedSpace(cset_size, cset_align, os::vm_page_size());
 370       _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
 371     }
 372     os::trace_page_sizes_for_requested_size("Collection Set",
 373                                             cset_size, cset_rs.page_size(), cset_page_size,
 374                                             cset_rs.base(),
 375                                             cset_rs.size());
 376   }
 377 
 378   _regions = NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, _num_regions, mtGC);
 379   _affiliations = NEW_C_HEAP_ARRAY(uint8_t, _num_regions, mtGC);
 380   _free_set = new ShenandoahFreeSet(this, _num_regions);
 381 
 382   {
 383     ShenandoahHeapLocker locker(lock());
 384 
 385     for (size_t i = 0; i < _num_regions; i++) {
 386       HeapWord* start = (HeapWord*)sh_rs.base() + ShenandoahHeapRegion::region_size_words() * i;
 387       bool is_committed = i < num_committed_regions;
 388       void* loc = region_storage.base() + i * region_align;
 389 
 390       ShenandoahHeapRegion* r = new (loc) ShenandoahHeapRegion(start, i, is_committed);
 391       assert(is_aligned(r, SHENANDOAH_CACHE_LINE_SIZE), "Sanity");
 392 
 393       _marking_context->initialize_top_at_mark_start(r);
 394       _regions[i] = r;
 395       assert(!collection_set()->is_in(i), "New region should not be in collection set");
 396 
 397       _affiliations[i] = ShenandoahAffiliation::FREE;
 398     }
 399 
 400     // Initialize to complete
 401     _marking_context->mark_complete();
 402     size_t young_cset_regions, old_cset_regions;
 403 
 404     // We are initializing free set.  We ignore cset region tallies.
 405     size_t first_old, last_old, num_old;
 406     _free_set->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old, last_old, num_old);
 407     _free_set->finish_rebuild(young_cset_regions, old_cset_regions, num_old);
 408   }
 409 
 410   if (AlwaysPreTouch) {
 411     // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads,
 412     // before initialize() below zeroes it with initializing thread. For any given region,
 413     // we touch the region and the corresponding bitmaps from the same thread.
 414     ShenandoahPushWorkerScope scope(workers(), _max_workers, false);
 415 
 416     _pretouch_heap_page_size = heap_page_size;
 417     _pretouch_bitmap_page_size = bitmap_page_size;
 418 
 419     // OS memory managers may want to coalesce back-to-back pages. Make their jobs
 420     // simpler by pre-touching continuous spaces (heap and bitmap) separately.
 421 
 422     ShenandoahPretouchBitmapTask bcl(bitmap.base(), _bitmap_size, _pretouch_bitmap_page_size);
 423     _workers->run_task(&bcl);
 424 
 425     ShenandoahPretouchHeapTask hcl(_pretouch_heap_page_size);
 426     _workers->run_task(&hcl);
 427   }
 428 
 429   //
 430   // Initialize the rest of GC subsystems
 431   //
 432 
 433   _liveness_cache = NEW_C_HEAP_ARRAY(ShenandoahLiveData*, _max_workers, mtGC);
 434   for (uint worker = 0; worker < _max_workers; worker++) {
 435     _liveness_cache[worker] = NEW_C_HEAP_ARRAY(ShenandoahLiveData, _num_regions, mtGC);
 436     Copy::fill_to_bytes(_liveness_cache[worker], _num_regions * sizeof(ShenandoahLiveData));
 437   }
 438 
 439   // There should probably be Shenandoah-specific options for these,
 440   // just as there are G1-specific options.
 441   {
 442     ShenandoahSATBMarkQueueSet& satbqs = ShenandoahBarrierSet::satb_mark_queue_set();
 443     satbqs.set_process_completed_buffers_threshold(20); // G1SATBProcessCompletedThreshold
 444     satbqs.set_buffer_enqueue_threshold_percentage(60); // G1SATBBufferEnqueueingThresholdPercent
 445   }
 446 
 447   _monitoring_support = new ShenandoahMonitoringSupport(this);
 448   _phase_timings = new ShenandoahPhaseTimings(max_workers());
 449   ShenandoahCodeRoots::initialize();
 450 
 451   if (ShenandoahPacing) {
 452     _pacer = new ShenandoahPacer(this);
 453     _pacer->setup_for_idle();
 454   }
 455 
 456   initialize_controller();
 457 
 458   print_init_logger();
 459 
 460   return JNI_OK;
 461 }
 462 
 463 void ShenandoahHeap::initialize_controller() {
 464   _control_thread = new ShenandoahControlThread();
 465 }
 466 
 467 void ShenandoahHeap::print_init_logger() const {
 468   ShenandoahInitLogger::print();
 469 }
 470 
 471 void ShenandoahHeap::initialize_mode() {
 472   if (ShenandoahGCMode != nullptr) {
 473     if (strcmp(ShenandoahGCMode, "satb") == 0) {
 474       _gc_mode = new ShenandoahSATBMode();
 475     } else if (strcmp(ShenandoahGCMode, "iu") == 0) {
 476       _gc_mode = new ShenandoahIUMode();
 477     } else if (strcmp(ShenandoahGCMode, "passive") == 0) {
 478       _gc_mode = new ShenandoahPassiveMode();
 479     } else if (strcmp(ShenandoahGCMode, "generational") == 0) {
 480       _gc_mode = new ShenandoahGenerationalMode();
 481     } else {
 482       vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option");
 483     }
 484   } else {
 485     vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option (null)");
 486   }
 487   _gc_mode->initialize_flags();
 488   if (_gc_mode->is_diagnostic() && !UnlockDiagnosticVMOptions) {
 489     vm_exit_during_initialization(
 490             err_msg("GC mode \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
 491                     _gc_mode->name()));
 492   }
 493   if (_gc_mode->is_experimental() && !UnlockExperimentalVMOptions) {
 494     vm_exit_during_initialization(
 495             err_msg("GC mode \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
 496                     _gc_mode->name()));
 497   }
 498 }
 499 
 500 void ShenandoahHeap::initialize_heuristics() {
 501   _global_generation = new ShenandoahGlobalGeneration(mode()->is_generational(), max_workers(), max_capacity(), max_capacity());
 502   _global_generation->initialize_heuristics(mode());
 503 }
 504 
 505 #ifdef _MSC_VER
 506 #pragma warning( push )
 507 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
 508 #endif
 509 
 510 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
 511   CollectedHeap(),
 512   _gc_generation(nullptr),
 513   _active_generation(nullptr),
 514   _initial_size(0),
 515   _committed(0),
 516   _max_workers(MAX3(ConcGCThreads, ParallelGCThreads, 1U)),
 517   _workers(nullptr),
 518   _safepoint_workers(nullptr),
 519   _heap_region_special(false),
 520   _num_regions(0),
 521   _regions(nullptr),
 522   _affiliations(nullptr),
 523   _gc_state_changed(false),
 524   _gc_no_progress_count(0),
 525   _cancel_requested_time(0),
 526   _update_refs_iterator(this),
 527   _global_generation(nullptr),
 528   _control_thread(nullptr),
 529   _young_generation(nullptr),
 530   _old_generation(nullptr),
 531   _shenandoah_policy(policy),
 532   _gc_mode(nullptr),
 533   _free_set(nullptr),
 534   _pacer(nullptr),
 535   _verifier(nullptr),
 536   _phase_timings(nullptr),
 537   _monitoring_support(nullptr),
 538   _memory_pool(nullptr),
 539   _stw_memory_manager("Shenandoah Pauses"),
 540   _cycle_memory_manager("Shenandoah Cycles"),
 541   _gc_timer(new ConcurrentGCTimer()),
 542   _soft_ref_policy(),
 543   _log_min_obj_alignment_in_bytes(LogMinObjAlignmentInBytes),
 544   _marking_context(nullptr),
 545   _bitmap_size(0),
 546   _bitmap_regions_per_slice(0),
 547   _bitmap_bytes_per_slice(0),
 548   _bitmap_region_special(false),
 549   _aux_bitmap_region_special(false),
 550   _liveness_cache(nullptr),
 551   _collection_set(nullptr)
 552 {
 553   // Initialize GC mode early, many subsequent initialization procedures depend on it
 554   initialize_mode();
 555 }
 556 
 557 #ifdef _MSC_VER
 558 #pragma warning( pop )
 559 #endif
 560 
 561 void ShenandoahHeap::print_on(outputStream* st) const {
 562   st->print_cr("Shenandoah Heap");
 563   st->print_cr(" " SIZE_FORMAT "%s max, " SIZE_FORMAT "%s soft max, " SIZE_FORMAT "%s committed, " SIZE_FORMAT "%s used",
 564                byte_size_in_proper_unit(max_capacity()), proper_unit_for_byte_size(max_capacity()),
 565                byte_size_in_proper_unit(soft_max_capacity()), proper_unit_for_byte_size(soft_max_capacity()),
 566                byte_size_in_proper_unit(committed()),    proper_unit_for_byte_size(committed()),
 567                byte_size_in_proper_unit(used()),         proper_unit_for_byte_size(used()));
 568   st->print_cr(" " SIZE_FORMAT " x " SIZE_FORMAT"%s regions",
 569                num_regions(),
 570                byte_size_in_proper_unit(ShenandoahHeapRegion::region_size_bytes()),
 571                proper_unit_for_byte_size(ShenandoahHeapRegion::region_size_bytes()));
 572 
 573   st->print("Status: ");
 574   if (has_forwarded_objects())                 st->print("has forwarded objects, ");
 575   if (!mode()->is_generational()) {
 576     if (is_concurrent_mark_in_progress())      st->print("marking,");
 577   } else {
 578     if (is_concurrent_old_mark_in_progress())    st->print("old marking, ");
 579     if (is_concurrent_young_mark_in_progress())  st->print("young marking, ");
 580   }
 581   if (is_evacuation_in_progress())             st->print("evacuating, ");
 582   if (is_update_refs_in_progress())            st->print("updating refs, ");
 583   if (is_degenerated_gc_in_progress())         st->print("degenerated gc, ");
 584   if (is_full_gc_in_progress())                st->print("full gc, ");
 585   if (is_full_gc_move_in_progress())           st->print("full gc move, ");
 586   if (is_concurrent_weak_root_in_progress())   st->print("concurrent weak roots, ");
 587   if (is_concurrent_strong_root_in_progress() &&
 588       !is_concurrent_weak_root_in_progress())  st->print("concurrent strong roots, ");
 589 
 590   if (cancelled_gc()) {
 591     st->print("cancelled");
 592   } else {
 593     st->print("not cancelled");
 594   }
 595   st->cr();
 596 
 597   st->print_cr("Reserved region:");
 598   st->print_cr(" - [" PTR_FORMAT ", " PTR_FORMAT ") ",
 599                p2i(reserved_region().start()),
 600                p2i(reserved_region().end()));
 601 
 602   ShenandoahCollectionSet* cset = collection_set();
 603   st->print_cr("Collection set:");
 604   if (cset != nullptr) {
 605     st->print_cr(" - map (vanilla): " PTR_FORMAT, p2i(cset->map_address()));
 606     st->print_cr(" - map (biased):  " PTR_FORMAT, p2i(cset->biased_map_address()));
 607   } else {
 608     st->print_cr(" (null)");
 609   }
 610 
 611   st->cr();
 612   MetaspaceUtils::print_on(st);
 613 
 614   if (Verbose) {
 615     st->cr();
 616     print_heap_regions_on(st);
 617   }
 618 }
 619 
 620 class ShenandoahInitWorkerGCLABClosure : public ThreadClosure {
 621 public:
 622   void do_thread(Thread* thread) {
 623     assert(thread != nullptr, "Sanity");
 624     assert(thread->is_Worker_thread(), "Only worker thread expected");
 625     ShenandoahThreadLocalData::initialize_gclab(thread);
 626   }
 627 };
 628 
 629 void ShenandoahHeap::post_initialize() {
 630   CollectedHeap::post_initialize();
 631 
 632   // Schedule periodic task to report on gc thread CPU utilization
 633   _mmu_tracker.initialize();
 634 
 635   MutexLocker ml(Threads_lock);
 636 
 637   ShenandoahInitWorkerGCLABClosure init_gclabs;
 638   _workers->threads_do(&init_gclabs);
 639 
 640   // gclab can not be initialized early during VM startup, as it can not determinate its max_size.
 641   // Now, we will let WorkerThreads to initialize gclab when new worker is created.
 642   _workers->set_initialize_gclab();
 643   if (_safepoint_workers != nullptr) {
 644     _safepoint_workers->threads_do(&init_gclabs);
 645     _safepoint_workers->set_initialize_gclab();
 646   }
 647 
 648   JFR_ONLY(ShenandoahJFRSupport::register_jfr_type_serializers());
 649 }
 650 
 651 ShenandoahHeuristics* ShenandoahHeap::heuristics() {
 652   return _global_generation->heuristics();
 653 }
 654 
 655 size_t ShenandoahHeap::used() const {
 656   return global_generation()->used();
 657 }
 658 
 659 size_t ShenandoahHeap::committed() const {
 660   return Atomic::load(&_committed);
 661 }
 662 
 663 void ShenandoahHeap::increase_committed(size_t bytes) {
 664   shenandoah_assert_heaplocked_or_safepoint();
 665   _committed += bytes;
 666 }
 667 
 668 void ShenandoahHeap::decrease_committed(size_t bytes) {
 669   shenandoah_assert_heaplocked_or_safepoint();
 670   _committed -= bytes;
 671 }
 672 
 673 // For tracking usage based on allocations, it should be the case that:
 674 // * The sum of regions::used == heap::used
 675 // * The sum of a generation's regions::used == generation::used
 676 // * The sum of a generation's humongous regions::free == generation::humongous_waste
 677 // These invariants are checked by the verifier on GC safepoints.
 678 //
 679 // Additional notes:
 680 // * When a mutator's allocation request causes a region to be retired, the
 681 //   free memory left in that region is considered waste. It does not contribute
 682 //   to the usage, but it _does_ contribute to allocation rate.
 683 // * The bottom of a PLAB must be aligned on card size. In some cases this will
 684 //   require padding in front of the PLAB (a filler object). Because this padding
 685 //   is included in the region's used memory we include the padding in the usage
 686 //   accounting as waste.
 687 // * Mutator allocations are used to compute an allocation rate. They are also
 688 //   sent to the Pacer for those purposes.
 689 // * There are three sources of waste:
 690 //  1. The padding used to align a PLAB on card size
 691 //  2. Region's free is less than minimum TLAB size and is retired
 692 //  3. The unused portion of memory in the last region of a humongous object
 693 void ShenandoahHeap::increase_used(const ShenandoahAllocRequest& req) {
 694   size_t actual_bytes = req.actual_size() * HeapWordSize;
 695   size_t wasted_bytes = req.waste() * HeapWordSize;
 696   ShenandoahGeneration* generation = generation_for(req.affiliation());
 697 
 698   if (req.is_gc_alloc()) {
 699     assert(wasted_bytes == 0 || req.type() == ShenandoahAllocRequest::_alloc_plab, "Only PLABs have waste");
 700     increase_used(generation, actual_bytes + wasted_bytes);
 701   } else {
 702     assert(req.is_mutator_alloc(), "Expected mutator alloc here");
 703     // padding and actual size both count towards allocation counter
 704     generation->increase_allocated(actual_bytes + wasted_bytes);
 705 
 706     // only actual size counts toward usage for mutator allocations
 707     increase_used(generation, actual_bytes);
 708 
 709     // notify pacer of both actual size and waste
 710     notify_mutator_alloc_words(req.actual_size(), req.waste());
 711 
 712     if (wasted_bytes > 0 && ShenandoahHeapRegion::requires_humongous(req.actual_size())) {
 713       increase_humongous_waste(generation,wasted_bytes);
 714     }
 715   }
 716 }
 717 
 718 void ShenandoahHeap::increase_humongous_waste(ShenandoahGeneration* generation, size_t bytes) {
 719   generation->increase_humongous_waste(bytes);
 720   if (!generation->is_global()) {
 721     global_generation()->increase_humongous_waste(bytes);
 722   }
 723 }
 724 
 725 void ShenandoahHeap::decrease_humongous_waste(ShenandoahGeneration* generation, size_t bytes) {
 726   generation->decrease_humongous_waste(bytes);
 727   if (!generation->is_global()) {
 728     global_generation()->decrease_humongous_waste(bytes);
 729   }
 730 }
 731 
 732 void ShenandoahHeap::increase_used(ShenandoahGeneration* generation, size_t bytes) {
 733   generation->increase_used(bytes);
 734   if (!generation->is_global()) {
 735     global_generation()->increase_used(bytes);
 736   }
 737 }
 738 
 739 void ShenandoahHeap::decrease_used(ShenandoahGeneration* generation, size_t bytes) {
 740   generation->decrease_used(bytes);
 741   if (!generation->is_global()) {
 742     global_generation()->decrease_used(bytes);
 743   }
 744 }
 745 
 746 void ShenandoahHeap::notify_mutator_alloc_words(size_t words, size_t waste) {
 747   if (ShenandoahPacing) {
 748     control_thread()->pacing_notify_alloc(words);
 749     if (waste > 0) {
 750       pacer()->claim_for_alloc<true>(waste);
 751     }
 752   }
 753 }
 754 
 755 size_t ShenandoahHeap::capacity() const {
 756   return committed();
 757 }
 758 
 759 size_t ShenandoahHeap::max_capacity() const {
 760   return _num_regions * ShenandoahHeapRegion::region_size_bytes();
 761 }
 762 
 763 size_t ShenandoahHeap::soft_max_capacity() const {
 764   size_t v = Atomic::load(&_soft_max_size);
 765   assert(min_capacity() <= v && v <= max_capacity(),
 766          "Should be in bounds: " SIZE_FORMAT " <= " SIZE_FORMAT " <= " SIZE_FORMAT,
 767          min_capacity(), v, max_capacity());
 768   return v;
 769 }
 770 
 771 void ShenandoahHeap::set_soft_max_capacity(size_t v) {
 772   assert(min_capacity() <= v && v <= max_capacity(),
 773          "Should be in bounds: " SIZE_FORMAT " <= " SIZE_FORMAT " <= " SIZE_FORMAT,
 774          min_capacity(), v, max_capacity());
 775   Atomic::store(&_soft_max_size, v);
 776 }
 777 
 778 size_t ShenandoahHeap::min_capacity() const {
 779   return _minimum_size;
 780 }
 781 
 782 size_t ShenandoahHeap::initial_capacity() const {
 783   return _initial_size;
 784 }
 785 
 786 void ShenandoahHeap::maybe_uncommit(double shrink_before, size_t shrink_until) {
 787   assert (ShenandoahUncommit, "should be enabled");
 788 
 789   // Determine if there is work to do. This avoids taking heap lock if there is
 790   // no work available, avoids spamming logs with superfluous logging messages,
 791   // and minimises the amount of work while locks are taken.
 792 
 793   if (committed() <= shrink_until) return;
 794 
 795   bool has_work = false;
 796   for (size_t i = 0; i < num_regions(); i++) {
 797     ShenandoahHeapRegion* r = get_region(i);
 798     if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
 799       has_work = true;
 800       break;
 801     }
 802   }
 803 
 804   if (has_work) {
 805     static const char* msg = "Concurrent uncommit";
 806     ShenandoahConcurrentPhase gcPhase(msg, ShenandoahPhaseTimings::conc_uncommit, true /* log_heap_usage */);
 807     EventMark em("%s", msg);
 808 
 809     op_uncommit(shrink_before, shrink_until);
 810   }
 811 }
 812 
 813 void ShenandoahHeap::op_uncommit(double shrink_before, size_t shrink_until) {
 814   assert (ShenandoahUncommit, "should be enabled");
 815 
 816   // Application allocates from the beginning of the heap, and GC allocates at
 817   // the end of it. It is more efficient to uncommit from the end, so that applications
 818   // could enjoy the near committed regions. GC allocations are much less frequent,
 819   // and therefore can accept the committing costs.
 820 
 821   size_t count = 0;
 822   for (size_t i = num_regions(); i > 0; i--) { // care about size_t underflow
 823     ShenandoahHeapRegion* r = get_region(i - 1);
 824     if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
 825       ShenandoahHeapLocker locker(lock());
 826       if (r->is_empty_committed()) {
 827         if (committed() < shrink_until + ShenandoahHeapRegion::region_size_bytes()) {
 828           break;
 829         }
 830 
 831         r->make_uncommitted();
 832         count++;
 833       }
 834     }
 835     SpinPause(); // allow allocators to take the lock
 836   }
 837 
 838   if (count > 0) {
 839     notify_heap_changed();
 840   }
 841 }
 842 
 843 bool ShenandoahHeap::check_soft_max_changed() {
 844   size_t new_soft_max = Atomic::load(&SoftMaxHeapSize);
 845   size_t old_soft_max = soft_max_capacity();
 846   if (new_soft_max != old_soft_max) {
 847     new_soft_max = MAX2(min_capacity(), new_soft_max);
 848     new_soft_max = MIN2(max_capacity(), new_soft_max);
 849     if (new_soft_max != old_soft_max) {
 850       log_info(gc)("Soft Max Heap Size: " SIZE_FORMAT "%s -> " SIZE_FORMAT "%s",
 851                    byte_size_in_proper_unit(old_soft_max), proper_unit_for_byte_size(old_soft_max),
 852                    byte_size_in_proper_unit(new_soft_max), proper_unit_for_byte_size(new_soft_max)
 853       );
 854       set_soft_max_capacity(new_soft_max);
 855       return true;
 856     }
 857   }
 858   return false;
 859 }
 860 
 861 void ShenandoahHeap::notify_heap_changed() {
 862   // Update monitoring counters when we took a new region. This amortizes the
 863   // update costs on slow path.
 864   monitoring_support()->notify_heap_changed();
 865   _heap_changed.try_set();
 866 }
 867 
 868 void ShenandoahHeap::set_forced_counters_update(bool value) {
 869   monitoring_support()->set_forced_counters_update(value);
 870 }
 871 
 872 void ShenandoahHeap::handle_force_counters_update() {
 873   monitoring_support()->handle_force_counters_update();
 874 }
 875 
 876 HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) {
 877   // New object should fit the GCLAB size
 878   size_t min_size = MAX2(size, PLAB::min_size());
 879 
 880   // Figure out size of new GCLAB, looking back at heuristics. Expand aggressively.
 881   size_t new_size = ShenandoahThreadLocalData::gclab_size(thread) * 2;
 882 
 883   new_size = MIN2(new_size, PLAB::max_size());
 884   new_size = MAX2(new_size, PLAB::min_size());
 885 
 886   // Record new heuristic value even if we take any shortcut. This captures
 887   // the case when moderately-sized objects always take a shortcut. At some point,
 888   // heuristics should catch up with them.
 889   log_debug(gc, free)("Set new GCLAB size: " SIZE_FORMAT, new_size);
 890   ShenandoahThreadLocalData::set_gclab_size(thread, new_size);
 891 
 892   if (new_size < size) {
 893     // New size still does not fit the object. Fall back to shared allocation.
 894     // This avoids retiring perfectly good GCLABs, when we encounter a large object.
 895     log_debug(gc, free)("New gclab size (" SIZE_FORMAT ") is too small for " SIZE_FORMAT, new_size, size);
 896     return nullptr;
 897   }
 898 
 899   // Retire current GCLAB, and allocate a new one.
 900   PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
 901   gclab->retire();
 902 
 903   size_t actual_size = 0;
 904   HeapWord* gclab_buf = allocate_new_gclab(min_size, new_size, &actual_size);
 905   if (gclab_buf == nullptr) {
 906     return nullptr;
 907   }
 908 
 909   assert (size <= actual_size, "allocation should fit");
 910 
 911   // ...and clear or zap just allocated TLAB, if needed.
 912   if (ZeroTLAB) {
 913     Copy::zero_to_words(gclab_buf, actual_size);
 914   } else if (ZapTLAB) {
 915     // Skip mangling the space corresponding to the object header to
 916     // ensure that the returned space is not considered parsable by
 917     // any concurrent GC thread.
 918     size_t hdr_size = oopDesc::header_size();
 919     Copy::fill_to_words(gclab_buf + hdr_size, actual_size - hdr_size, badHeapWordVal);
 920   }
 921   gclab->set_buf(gclab_buf, actual_size);
 922   return gclab->allocate(size);
 923 }
 924 
 925 // Called from stubs in JIT code or interpreter
 926 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t min_size,
 927                                             size_t requested_size,
 928                                             size_t* actual_size) {
 929   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_tlab(min_size, requested_size);
 930   HeapWord* res = allocate_memory(req);
 931   if (res != nullptr) {
 932     *actual_size = req.actual_size();
 933   } else {
 934     *actual_size = 0;
 935   }
 936   return res;
 937 }
 938 
 939 HeapWord* ShenandoahHeap::allocate_new_gclab(size_t min_size,
 940                                              size_t word_size,
 941                                              size_t* actual_size) {
 942   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_gclab(min_size, word_size);
 943   HeapWord* res = allocate_memory(req);
 944   if (res != nullptr) {
 945     *actual_size = req.actual_size();
 946   } else {
 947     *actual_size = 0;
 948   }
 949   return res;
 950 }
 951 
 952 HeapWord* ShenandoahHeap::allocate_memory(ShenandoahAllocRequest& req) {
 953   intptr_t pacer_epoch = 0;
 954   bool in_new_region = false;
 955   HeapWord* result = nullptr;
 956 
 957   if (req.is_mutator_alloc()) {
 958     if (ShenandoahPacing) {
 959       pacer()->pace_for_alloc(req.size());
 960       pacer_epoch = pacer()->epoch();
 961     }
 962 
 963     if (!ShenandoahAllocFailureALot || !should_inject_alloc_failure()) {
 964       result = allocate_memory_under_lock(req, in_new_region);
 965     }
 966 
 967     // Check that gc overhead is not exceeded.
 968     //
 969     // Shenandoah will grind along for quite a while allocating one
 970     // object at a time using shared (non-tlab) allocations. This check
 971     // is testing that the GC overhead limit has not been exceeded.
 972     // This will notify the collector to start a cycle, but will raise
 973     // an OOME to the mutator if the last Full GCs have not made progress.
 974     // gc_no_progress_count is incremented following each degen or full GC that fails to achieve is_good_progress().
 975     if ((result == nullptr) && !req.is_lab_alloc() && (get_gc_no_progress_count() > ShenandoahNoProgressThreshold)) {
 976       control_thread()->handle_alloc_failure(req, false);
 977       req.set_actual_size(0);
 978       return nullptr;
 979     }
 980 
 981     if (result == nullptr) {
 982       // Block until control thread reacted, then retry allocation.
 983       //
 984       // It might happen that one of the threads requesting allocation would unblock
 985       // way later after GC happened, only to fail the second allocation, because
 986       // other threads have already depleted the free storage. In this case, a better
 987       // strategy is to try again, until at least one full GC has completed.
 988       //
 989       // Stop retrying and return nullptr to cause OOMError exception if our allocation failed even after:
 990       //   a) We experienced a GC that had good progress, or
 991       //   b) We experienced at least one Full GC (whether or not it had good progress)
 992 
 993       size_t original_count = shenandoah_policy()->full_gc_count();
 994       while ((result == nullptr) && (original_count == shenandoah_policy()->full_gc_count())) {
 995         control_thread()->handle_alloc_failure(req, true);
 996         result = allocate_memory_under_lock(req, in_new_region);
 997       }
 998       if (result != nullptr) {
 999         // If our allocation request has been satisifed after it initially failed, we count this as good gc progress
1000         notify_gc_progress();
1001       }
1002       if (log_develop_is_enabled(Debug, gc, alloc)) {
1003         ResourceMark rm;
1004         log_debug(gc, alloc)("Thread: %s, Result: " PTR_FORMAT ", Request: %s, Size: " SIZE_FORMAT
1005                              ", Original: " SIZE_FORMAT ", Latest: " SIZE_FORMAT,
1006                              Thread::current()->name(), p2i(result), req.type_string(), req.size(),
1007                              original_count, get_gc_no_progress_count());
1008       }
1009     }
1010   } else {
1011     assert(req.is_gc_alloc(), "Can only accept GC allocs here");
1012     result = allocate_memory_under_lock(req, in_new_region);
1013     // Do not call handle_alloc_failure() here, because we cannot block.
1014     // The allocation failure would be handled by the LRB slowpath with handle_alloc_failure_evac().
1015   }
1016 
1017   if (in_new_region) {
1018     notify_heap_changed();
1019   }
1020 
1021   if (result == nullptr) {
1022     req.set_actual_size(0);
1023   }
1024 
1025   // This is called regardless of the outcome of the allocation to account
1026   // for any waste created by retiring regions with this request.
1027   increase_used(req);
1028 
1029   if (result != nullptr) {
1030     size_t requested = req.size();
1031     size_t actual = req.actual_size();
1032 
1033     assert (req.is_lab_alloc() || (requested == actual),
1034             "Only LAB allocations are elastic: %s, requested = " SIZE_FORMAT ", actual = " SIZE_FORMAT,
1035             ShenandoahAllocRequest::alloc_type_to_string(req.type()), requested, actual);
1036 
1037     if (req.is_mutator_alloc()) {
1038       // If we requested more than we were granted, give the rest back to pacer.
1039       // This only matters if we are in the same pacing epoch: do not try to unpace
1040       // over the budget for the other phase.
1041       if (ShenandoahPacing && (pacer_epoch > 0) && (requested > actual)) {
1042         pacer()->unpace_for_alloc(pacer_epoch, requested - actual);
1043       }
1044     }
1045   }
1046 
1047   return result;
1048 }
1049 
1050 HeapWord* ShenandoahHeap::allocate_memory_under_lock(ShenandoahAllocRequest& req, bool& in_new_region) {
1051   // If we are dealing with mutator allocation, then we may need to block for safepoint.
1052   // We cannot block for safepoint for GC allocations, because there is a high chance
1053   // we are already running at safepoint or from stack watermark machinery, and we cannot
1054   // block again.
1055   ShenandoahHeapLocker locker(lock(), req.is_mutator_alloc());
1056 
1057   // Make sure the old generation has room for either evacuations or promotions before trying to allocate.
1058   if (req.is_old() && !old_generation()->can_allocate(req)) {
1059     return nullptr;
1060   }
1061 
1062   // If TLAB request size is greater than available, allocate() will attempt to downsize request to fit within available
1063   // memory.
1064   HeapWord* result = _free_set->allocate(req, in_new_region);
1065 
1066   // Record the plab configuration for this result and register the object.
1067   if (result != nullptr && req.is_old()) {
1068     old_generation()->configure_plab_for_current_thread(req);
1069     if (req.type() == ShenandoahAllocRequest::_alloc_shared_gc) {
1070       // Register the newly allocated object while we're holding the global lock since there's no synchronization
1071       // built in to the implementation of register_object().  There are potential races when multiple independent
1072       // threads are allocating objects, some of which might span the same card region.  For example, consider
1073       // a card table's memory region within which three objects are being allocated by three different threads:
1074       //
1075       // objects being "concurrently" allocated:
1076       //    [-----a------][-----b-----][--------------c------------------]
1077       //            [---- card table memory range --------------]
1078       //
1079       // Before any objects are allocated, this card's memory range holds no objects.  Note that allocation of object a
1080       // wants to set the starts-object, first-start, and last-start attributes of the preceding card region.
1081       // Allocation of object b wants to set the starts-object, first-start, and last-start attributes of this card region.
1082       // Allocation of object c also wants to set the starts-object, first-start, and last-start attributes of this
1083       // card region.
1084       //
1085       // The thread allocating b and the thread allocating c can "race" in various ways, resulting in confusion, such as
1086       // last-start representing object b while first-start represents object c.  This is why we need to require all
1087       // register_object() invocations to be "mutually exclusive" with respect to each card's memory range.
1088       old_generation()->card_scan()->register_object(result);
1089     }
1090   }
1091 
1092   return result;
1093 }
1094 
1095 HeapWord* ShenandoahHeap::mem_allocate(size_t size,
1096                                         bool*  gc_overhead_limit_was_exceeded) {
1097   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared(size);
1098   return allocate_memory(req);
1099 }
1100 
1101 MetaWord* ShenandoahHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
1102                                                              size_t size,
1103                                                              Metaspace::MetadataType mdtype) {
1104   MetaWord* result;
1105 
1106   // Inform metaspace OOM to GC heuristics if class unloading is possible.
1107   ShenandoahHeuristics* h = global_generation()->heuristics();
1108   if (h->can_unload_classes()) {
1109     h->record_metaspace_oom();
1110   }
1111 
1112   // Expand and retry allocation
1113   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
1114   if (result != nullptr) {
1115     return result;
1116   }
1117 
1118   // Start full GC
1119   collect(GCCause::_metadata_GC_clear_soft_refs);
1120 
1121   // Retry allocation
1122   result = loader_data->metaspace_non_null()->allocate(size, mdtype);
1123   if (result != nullptr) {
1124     return result;
1125   }
1126 
1127   // Expand and retry allocation
1128   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
1129   if (result != nullptr) {
1130     return result;
1131   }
1132 
1133   // Out of memory
1134   return nullptr;
1135 }
1136 
1137 class ShenandoahConcurrentEvacuateRegionObjectClosure : public ObjectClosure {
1138 private:
1139   ShenandoahHeap* const _heap;
1140   Thread* const _thread;
1141 public:
1142   ShenandoahConcurrentEvacuateRegionObjectClosure(ShenandoahHeap* heap) :
1143     _heap(heap), _thread(Thread::current()) {}
1144 
1145   void do_object(oop p) {
1146     shenandoah_assert_marked(nullptr, p);
1147     if (!p->is_forwarded()) {
1148       _heap->evacuate_object(p, _thread);
1149     }
1150   }
1151 };
1152 
1153 class ShenandoahEvacuationTask : public WorkerTask {
1154 private:
1155   ShenandoahHeap* const _sh;
1156   ShenandoahCollectionSet* const _cs;
1157   bool _concurrent;
1158 public:
1159   ShenandoahEvacuationTask(ShenandoahHeap* sh,
1160                            ShenandoahCollectionSet* cs,
1161                            bool concurrent) :
1162     WorkerTask("Shenandoah Evacuation"),
1163     _sh(sh),
1164     _cs(cs),
1165     _concurrent(concurrent)
1166   {}
1167 
1168   void work(uint worker_id) {
1169     if (_concurrent) {
1170       ShenandoahConcurrentWorkerSession worker_session(worker_id);
1171       ShenandoahSuspendibleThreadSetJoiner stsj;
1172       ShenandoahEvacOOMScope oom_evac_scope;
1173       do_work();
1174     } else {
1175       ShenandoahParallelWorkerSession worker_session(worker_id);
1176       ShenandoahEvacOOMScope oom_evac_scope;
1177       do_work();
1178     }
1179   }
1180 
1181 private:
1182   void do_work() {
1183     ShenandoahConcurrentEvacuateRegionObjectClosure cl(_sh);
1184     ShenandoahHeapRegion* r;
1185     while ((r =_cs->claim_next()) != nullptr) {
1186       assert(r->has_live(), "Region " SIZE_FORMAT " should have been reclaimed early", r->index());
1187       _sh->marked_object_iterate(r, &cl);
1188 
1189       if (ShenandoahPacing) {
1190         _sh->pacer()->report_evac(r->used() >> LogHeapWordSize);
1191       }
1192 
1193       if (_sh->check_cancelled_gc_and_yield(_concurrent)) {
1194         break;
1195       }
1196     }
1197   }
1198 };
1199 
1200 void ShenandoahHeap::evacuate_collection_set(bool concurrent) {
1201   ShenandoahEvacuationTask task(this, _collection_set, concurrent);
1202   workers()->run_task(&task);
1203 }
1204 
1205 oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) {
1206   assert(thread == Thread::current(), "Expected thread parameter to be current thread.");
1207   if (ShenandoahThreadLocalData::is_oom_during_evac(thread)) {
1208     // This thread went through the OOM during evac protocol. It is safe to return
1209     // the forward pointer. It must not attempt to evacuate any other objects.
1210     return ShenandoahBarrierSet::resolve_forwarded(p);
1211   }
1212 
1213   assert(ShenandoahThreadLocalData::is_evac_allowed(thread), "must be enclosed in oom-evac scope");
1214 
1215   ShenandoahHeapRegion* r = heap_region_containing(p);
1216   assert(!r->is_humongous(), "never evacuate humongous objects");
1217 
1218   ShenandoahAffiliation target_gen = r->affiliation();
1219   return try_evacuate_object(p, thread, r, target_gen);
1220 }
1221 
1222 oop ShenandoahHeap::try_evacuate_object(oop p, Thread* thread, ShenandoahHeapRegion* from_region,
1223                                                ShenandoahAffiliation target_gen) {
1224   assert(target_gen == YOUNG_GENERATION, "Only expect evacuations to young in this mode");
1225   assert(from_region->is_young(), "Only expect evacuations from young in this mode");
1226   bool alloc_from_lab = true;
1227   HeapWord* copy = nullptr;
1228   size_t size = p->size();
1229 
1230 #ifdef ASSERT
1231   if (ShenandoahOOMDuringEvacALot &&
1232       (os::random() & 1) == 0) { // Simulate OOM every ~2nd slow-path call
1233     copy = nullptr;
1234   } else {
1235 #endif
1236     if (UseTLAB) {
1237       copy = allocate_from_gclab(thread, size);
1238     }
1239     if (copy == nullptr) {
1240       // If we failed to allocate in LAB, we'll try a shared allocation.
1241       ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared_gc(size, target_gen);
1242       copy = allocate_memory(req);
1243       alloc_from_lab = false;
1244     }
1245 #ifdef ASSERT
1246   }
1247 #endif
1248 
1249   if (copy == nullptr) {
1250     control_thread()->handle_alloc_failure_evac(size);
1251 
1252     _oom_evac_handler.handle_out_of_memory_during_evacuation();
1253 
1254     return ShenandoahBarrierSet::resolve_forwarded(p);
1255   }
1256 
1257   // Copy the object:
1258   Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(p), copy, size);
1259 
1260   // Try to install the new forwarding pointer.
1261   oop copy_val = cast_to_oop(copy);
1262   oop result = ShenandoahForwarding::try_update_forwardee(p, copy_val);
1263   if (result == copy_val) {
1264     // Successfully evacuated. Our copy is now the public one!
1265     ContinuationGCSupport::relativize_stack_chunk(copy_val);
1266     shenandoah_assert_correct(nullptr, copy_val);
1267     return copy_val;
1268   }  else {
1269     // Failed to evacuate. We need to deal with the object that is left behind. Since this
1270     // new allocation is certainly after TAMS, it will be considered live in the next cycle.
1271     // But if it happens to contain references to evacuated regions, those references would
1272     // not get updated for this stale copy during this cycle, and we will crash while scanning
1273     // it the next cycle.
1274     if (alloc_from_lab) {
1275       // For LAB allocations, it is enough to rollback the allocation ptr. Either the next
1276       // object will overwrite this stale copy, or the filler object on LAB retirement will
1277       // do this.
1278       ShenandoahThreadLocalData::gclab(thread)->undo_allocation(copy, size);
1279     } else {
1280       // For non-LAB allocations, we have no way to retract the allocation, and
1281       // have to explicitly overwrite the copy with the filler object. With that overwrite,
1282       // we have to keep the fwdptr initialized and pointing to our (stale) copy.
1283       assert(size >= ShenandoahHeap::min_fill_size(), "previously allocated object known to be larger than min_size");
1284       fill_with_object(copy, size);
1285       shenandoah_assert_correct(nullptr, copy_val);
1286       // For non-LAB allocations, the object has already been registered
1287     }
1288     shenandoah_assert_correct(nullptr, result);
1289     return result;
1290   }
1291 }
1292 
1293 void ShenandoahHeap::trash_cset_regions() {
1294   ShenandoahHeapLocker locker(lock());
1295 
1296   ShenandoahCollectionSet* set = collection_set();
1297   ShenandoahHeapRegion* r;
1298   set->clear_current_index();
1299   while ((r = set->next()) != nullptr) {
1300     r->make_trash();
1301   }
1302   collection_set()->clear();
1303 }
1304 
1305 void ShenandoahHeap::print_heap_regions_on(outputStream* st) const {
1306   st->print_cr("Heap Regions:");
1307   st->print_cr("Region state: EU=empty-uncommitted, EC=empty-committed, R=regular, H=humongous start, HP=pinned humongous start");
1308   st->print_cr("              HC=humongous continuation, CS=collection set, TR=trash, P=pinned, CSP=pinned collection set");
1309   st->print_cr("BTE=bottom/top/end, TAMS=top-at-mark-start");
1310   st->print_cr("UWM=update watermark, U=used");
1311   st->print_cr("T=TLAB allocs, G=GCLAB allocs");
1312   st->print_cr("S=shared allocs, L=live data");
1313   st->print_cr("CP=critical pins");
1314 
1315   for (size_t i = 0; i < num_regions(); i++) {
1316     get_region(i)->print_on(st);
1317   }
1318 }
1319 
1320 size_t ShenandoahHeap::trash_humongous_region_at(ShenandoahHeapRegion* start) {
1321   assert(start->is_humongous_start(), "reclaim regions starting with the first one");
1322 
1323   oop humongous_obj = cast_to_oop(start->bottom());
1324   size_t size = humongous_obj->size();
1325   size_t required_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize);
1326   size_t index = start->index() + required_regions - 1;
1327 
1328   assert(!start->has_live(), "liveness must be zero");
1329 
1330   for(size_t i = 0; i < required_regions; i++) {
1331     // Reclaim from tail. Otherwise, assertion fails when printing region to trace log,
1332     // as it expects that every region belongs to a humongous region starting with a humongous start region.
1333     ShenandoahHeapRegion* region = get_region(index --);
1334 
1335     assert(region->is_humongous(), "expect correct humongous start or continuation");
1336     assert(!region->is_cset(), "Humongous region should not be in collection set");
1337 
1338     region->make_trash_immediate();
1339   }
1340   return required_regions;
1341 }
1342 
1343 class ShenandoahCheckCleanGCLABClosure : public ThreadClosure {
1344 public:
1345   ShenandoahCheckCleanGCLABClosure() {}
1346   void do_thread(Thread* thread) {
1347     PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1348     assert(gclab != nullptr, "GCLAB should be initialized for %s", thread->name());
1349     assert(gclab->words_remaining() == 0, "GCLAB should not need retirement");
1350 
1351     if (ShenandoahHeap::heap()->mode()->is_generational()) {
1352       PLAB* plab = ShenandoahThreadLocalData::plab(thread);
1353       assert(plab != nullptr, "PLAB should be initialized for %s", thread->name());
1354       assert(plab->words_remaining() == 0, "PLAB should not need retirement");
1355     }
1356   }
1357 };
1358 
1359 class ShenandoahRetireGCLABClosure : public ThreadClosure {
1360 private:
1361   bool const _resize;
1362 public:
1363   ShenandoahRetireGCLABClosure(bool resize) : _resize(resize) {}
1364   void do_thread(Thread* thread) {
1365     PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1366     assert(gclab != nullptr, "GCLAB should be initialized for %s", thread->name());
1367     gclab->retire();
1368     if (_resize && ShenandoahThreadLocalData::gclab_size(thread) > 0) {
1369       ShenandoahThreadLocalData::set_gclab_size(thread, 0);
1370     }
1371 
1372     if (ShenandoahHeap::heap()->mode()->is_generational()) {
1373       PLAB* plab = ShenandoahThreadLocalData::plab(thread);
1374       assert(plab != nullptr, "PLAB should be initialized for %s", thread->name());
1375 
1376       // There are two reasons to retire all plabs between old-gen evacuation passes.
1377       //  1. We need to make the plab memory parsable by remembered-set scanning.
1378       //  2. We need to establish a trustworthy UpdateWaterMark value within each old-gen heap region
1379       ShenandoahGenerationalHeap::heap()->retire_plab(plab, thread);
1380       if (_resize && ShenandoahThreadLocalData::plab_size(thread) > 0) {
1381         ShenandoahThreadLocalData::set_plab_size(thread, 0);
1382       }
1383     }
1384   }
1385 };
1386 
1387 void ShenandoahHeap::labs_make_parsable() {
1388   assert(UseTLAB, "Only call with UseTLAB");
1389 
1390   ShenandoahRetireGCLABClosure cl(false);
1391 
1392   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1393     ThreadLocalAllocBuffer& tlab = t->tlab();
1394     tlab.make_parsable();
1395     cl.do_thread(t);
1396   }
1397 
1398   workers()->threads_do(&cl);
1399 }
1400 
1401 void ShenandoahHeap::tlabs_retire(bool resize) {
1402   assert(UseTLAB, "Only call with UseTLAB");
1403   assert(!resize || ResizeTLAB, "Only call for resize when ResizeTLAB is enabled");
1404 
1405   ThreadLocalAllocStats stats;
1406 
1407   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1408     ThreadLocalAllocBuffer& tlab = t->tlab();
1409     tlab.retire(&stats);
1410     if (resize) {
1411       tlab.resize();
1412     }
1413   }
1414 
1415   stats.publish();
1416 
1417 #ifdef ASSERT
1418   ShenandoahCheckCleanGCLABClosure cl;
1419   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1420     cl.do_thread(t);
1421   }
1422   workers()->threads_do(&cl);
1423 #endif
1424 }
1425 
1426 void ShenandoahHeap::gclabs_retire(bool resize) {
1427   assert(UseTLAB, "Only call with UseTLAB");
1428   assert(!resize || ResizeTLAB, "Only call for resize when ResizeTLAB is enabled");
1429 
1430   ShenandoahRetireGCLABClosure cl(resize);
1431   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1432     cl.do_thread(t);
1433   }
1434   workers()->threads_do(&cl);
1435 
1436   if (safepoint_workers() != nullptr) {
1437     safepoint_workers()->threads_do(&cl);
1438   }
1439 }
1440 
1441 // Returns size in bytes
1442 size_t ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const {
1443   // Return the max allowed size, and let the allocation path
1444   // figure out the safe size for current allocation.
1445   return ShenandoahHeapRegion::max_tlab_size_bytes();
1446 }
1447 
1448 size_t ShenandoahHeap::max_tlab_size() const {
1449   // Returns size in words
1450   return ShenandoahHeapRegion::max_tlab_size_words();
1451 }
1452 
1453 void ShenandoahHeap::collect_as_vm_thread(GCCause::Cause cause) {
1454   // These requests are ignored because we can't easily have Shenandoah jump into
1455   // a synchronous (degenerated or full) cycle while it is in the middle of a concurrent
1456   // cycle. We _could_ cancel the concurrent cycle and then try to run a cycle directly
1457   // on the VM thread, but this would confuse the control thread mightily and doesn't
1458   // seem worth the trouble. Instead, we will have the caller thread run (and wait for) a
1459   // concurrent cycle in the prologue of the heap inspect/dump operation. This is how
1460   // other concurrent collectors in the JVM handle this scenario as well.
1461   assert(Thread::current()->is_VM_thread(), "Should be the VM thread");
1462   guarantee(cause == GCCause::_heap_dump || cause == GCCause::_heap_inspection, "Invalid cause");
1463 }
1464 
1465 void ShenandoahHeap::collect(GCCause::Cause cause) {
1466   control_thread()->request_gc(cause);
1467 }
1468 
1469 void ShenandoahHeap::do_full_collection(bool clear_all_soft_refs) {
1470   //assert(false, "Shouldn't need to do full collections");
1471 }
1472 
1473 HeapWord* ShenandoahHeap::block_start(const void* addr) const {
1474   ShenandoahHeapRegion* r = heap_region_containing(addr);
1475   if (r != nullptr) {
1476     return r->block_start(addr);
1477   }
1478   return nullptr;
1479 }
1480 
1481 bool ShenandoahHeap::block_is_obj(const HeapWord* addr) const {
1482   ShenandoahHeapRegion* r = heap_region_containing(addr);
1483   return r->block_is_obj(addr);
1484 }
1485 
1486 bool ShenandoahHeap::print_location(outputStream* st, void* addr) const {
1487   return BlockLocationPrinter<ShenandoahHeap>::print_location(st, addr);
1488 }
1489 
1490 void ShenandoahHeap::prepare_for_verify() {
1491   if (SafepointSynchronize::is_at_safepoint() && UseTLAB) {
1492     labs_make_parsable();
1493   }
1494 }
1495 
1496 void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const {
1497   if (_shenandoah_policy->is_at_shutdown()) {
1498     return;
1499   }
1500 
1501   if (_control_thread != nullptr) {
1502     tcl->do_thread(_control_thread);
1503   }
1504 
1505   workers()->threads_do(tcl);
1506   if (_safepoint_workers != nullptr) {
1507     _safepoint_workers->threads_do(tcl);
1508   }
1509 }
1510 
1511 void ShenandoahHeap::print_tracing_info() const {
1512   LogTarget(Info, gc, stats) lt;
1513   if (lt.is_enabled()) {
1514     ResourceMark rm;
1515     LogStream ls(lt);
1516 
1517     phase_timings()->print_global_on(&ls);
1518 
1519     ls.cr();
1520     ls.cr();
1521 
1522     shenandoah_policy()->print_gc_stats(&ls);
1523 
1524     ls.cr();
1525     ls.cr();
1526   }
1527 }
1528 
1529 void ShenandoahHeap::set_gc_generation(ShenandoahGeneration* generation) {
1530   shenandoah_assert_control_or_vm_thread_at_safepoint();
1531   _gc_generation = generation;
1532 }
1533 
1534 // Active generation may only be set by the VM thread at a safepoint.
1535 void ShenandoahHeap::set_active_generation() {
1536   assert(Thread::current()->is_VM_thread(), "Only the VM Thread");
1537   assert(SafepointSynchronize::is_at_safepoint(), "Only at a safepoint!");
1538   assert(_gc_generation != nullptr, "Will set _active_generation to nullptr");
1539   _active_generation = _gc_generation;
1540 }
1541 
1542 void ShenandoahHeap::on_cycle_start(GCCause::Cause cause, ShenandoahGeneration* generation) {
1543   shenandoah_policy()->record_collection_cause(cause);
1544 
1545   const GCCause::Cause current = gc_cause();
1546   assert(current == GCCause::_no_gc, "Over-writing cause: %s, with: %s",
1547          GCCause::to_string(current), GCCause::to_string(cause));
1548   assert(_gc_generation == nullptr, "Over-writing _gc_generation");
1549 
1550   set_gc_cause(cause);
1551   set_gc_generation(generation);
1552 
1553   generation->heuristics()->record_cycle_start();
1554 }
1555 
1556 void ShenandoahHeap::on_cycle_end(ShenandoahGeneration* generation) {
1557   assert(gc_cause() != GCCause::_no_gc, "cause wasn't set");
1558   assert(_gc_generation != nullptr, "_gc_generation wasn't set");
1559 
1560   generation->heuristics()->record_cycle_end();
1561   if (mode()->is_generational() && generation->is_global()) {
1562     // If we just completed a GLOBAL GC, claim credit for completion of young-gen and old-gen GC as well
1563     young_generation()->heuristics()->record_cycle_end();
1564     old_generation()->heuristics()->record_cycle_end();
1565   }
1566 
1567   set_gc_generation(nullptr);
1568   set_gc_cause(GCCause::_no_gc);
1569 }
1570 
1571 void ShenandoahHeap::verify(VerifyOption vo) {
1572   if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
1573     if (ShenandoahVerify) {
1574       verifier()->verify_generic(vo);
1575     } else {
1576       // TODO: Consider allocating verification bitmaps on demand,
1577       // and turn this on unconditionally.
1578     }
1579   }
1580 }
1581 size_t ShenandoahHeap::tlab_capacity(Thread *thr) const {
1582   return _free_set->capacity();
1583 }
1584 
1585 class ObjectIterateScanRootClosure : public BasicOopIterateClosure {
1586 private:
1587   MarkBitMap* _bitmap;
1588   ShenandoahScanObjectStack* _oop_stack;
1589   ShenandoahHeap* const _heap;
1590   ShenandoahMarkingContext* const _marking_context;
1591 
1592   template <class T>
1593   void do_oop_work(T* p) {
1594     T o = RawAccess<>::oop_load(p);
1595     if (!CompressedOops::is_null(o)) {
1596       oop obj = CompressedOops::decode_not_null(o);
1597       if (_heap->is_concurrent_weak_root_in_progress() && !_marking_context->is_marked(obj)) {
1598         // There may be dead oops in weak roots in concurrent root phase, do not touch them.
1599         return;
1600       }
1601       obj = ShenandoahBarrierSet::barrier_set()->load_reference_barrier(obj);
1602 
1603       assert(oopDesc::is_oop(obj), "must be a valid oop");
1604       if (!_bitmap->is_marked(obj)) {
1605         _bitmap->mark(obj);
1606         _oop_stack->push(obj);
1607       }
1608     }
1609   }
1610 public:
1611   ObjectIterateScanRootClosure(MarkBitMap* bitmap, ShenandoahScanObjectStack* oop_stack) :
1612     _bitmap(bitmap), _oop_stack(oop_stack), _heap(ShenandoahHeap::heap()),
1613     _marking_context(_heap->marking_context()) {}
1614   void do_oop(oop* p)       { do_oop_work(p); }
1615   void do_oop(narrowOop* p) { do_oop_work(p); }
1616 };
1617 
1618 /*
1619  * This is public API, used in preparation of object_iterate().
1620  * Since we don't do linear scan of heap in object_iterate() (see comment below), we don't
1621  * need to make the heap parsable. For Shenandoah-internal linear heap scans that we can
1622  * control, we call SH::tlabs_retire, SH::gclabs_retire.
1623  */
1624 void ShenandoahHeap::ensure_parsability(bool retire_tlabs) {
1625   // No-op.
1626 }
1627 
1628 /*
1629  * Iterates objects in the heap. This is public API, used for, e.g., heap dumping.
1630  *
1631  * We cannot safely iterate objects by doing a linear scan at random points in time. Linear
1632  * scanning needs to deal with dead objects, which may have dead Klass* pointers (e.g.
1633  * calling oopDesc::size() would crash) or dangling reference fields (crashes) etc. Linear
1634  * scanning therefore depends on having a valid marking bitmap to support it. However, we only
1635  * have a valid marking bitmap after successful marking. In particular, we *don't* have a valid
1636  * marking bitmap during marking, after aborted marking or during/after cleanup (when we just
1637  * wiped the bitmap in preparation for next marking).
1638  *
1639  * For all those reasons, we implement object iteration as a single marking traversal, reporting
1640  * objects as we mark+traverse through the heap, starting from GC roots. JVMTI IterateThroughHeap
1641  * is allowed to report dead objects, but is not required to do so.
1642  */
1643 void ShenandoahHeap::object_iterate(ObjectClosure* cl) {
1644   // Reset bitmap
1645   if (!prepare_aux_bitmap_for_iteration())
1646     return;
1647 
1648   ShenandoahScanObjectStack oop_stack;
1649   ObjectIterateScanRootClosure oops(&_aux_bit_map, &oop_stack);
1650   // Seed the stack with root scan
1651   scan_roots_for_iteration(&oop_stack, &oops);
1652 
1653   // Work through the oop stack to traverse heap
1654   while (! oop_stack.is_empty()) {
1655     oop obj = oop_stack.pop();
1656     assert(oopDesc::is_oop(obj), "must be a valid oop");
1657     cl->do_object(obj);
1658     obj->oop_iterate(&oops);
1659   }
1660 
1661   assert(oop_stack.is_empty(), "should be empty");
1662   // Reclaim bitmap
1663   reclaim_aux_bitmap_for_iteration();
1664 }
1665 
1666 bool ShenandoahHeap::prepare_aux_bitmap_for_iteration() {
1667   assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1668 
1669   if (!_aux_bitmap_region_special && !os::commit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size(), false)) {
1670     log_warning(gc)("Could not commit native memory for auxiliary marking bitmap for heap iteration");
1671     return false;
1672   }
1673   // Reset bitmap
1674   _aux_bit_map.clear();
1675   return true;
1676 }
1677 
1678 void ShenandoahHeap::scan_roots_for_iteration(ShenandoahScanObjectStack* oop_stack, ObjectIterateScanRootClosure* oops) {
1679   // Process GC roots according to current GC cycle
1680   // This populates the work stack with initial objects
1681   // It is important to relinquish the associated locks before diving
1682   // into heap dumper
1683   uint n_workers = safepoint_workers() != nullptr ? safepoint_workers()->active_workers() : 1;
1684   ShenandoahHeapIterationRootScanner rp(n_workers);
1685   rp.roots_do(oops);
1686 }
1687 
1688 void ShenandoahHeap::reclaim_aux_bitmap_for_iteration() {
1689   if (!_aux_bitmap_region_special && !os::uncommit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size())) {
1690     log_warning(gc)("Could not uncommit native memory for auxiliary marking bitmap for heap iteration");
1691   }
1692 }
1693 
1694 // Closure for parallelly iterate objects
1695 class ShenandoahObjectIterateParScanClosure : public BasicOopIterateClosure {
1696 private:
1697   MarkBitMap* _bitmap;
1698   ShenandoahObjToScanQueue* _queue;
1699   ShenandoahHeap* const _heap;
1700   ShenandoahMarkingContext* const _marking_context;
1701 
1702   template <class T>
1703   void do_oop_work(T* p) {
1704     T o = RawAccess<>::oop_load(p);
1705     if (!CompressedOops::is_null(o)) {
1706       oop obj = CompressedOops::decode_not_null(o);
1707       if (_heap->is_concurrent_weak_root_in_progress() && !_marking_context->is_marked(obj)) {
1708         // There may be dead oops in weak roots in concurrent root phase, do not touch them.
1709         return;
1710       }
1711       obj = ShenandoahBarrierSet::barrier_set()->load_reference_barrier(obj);
1712 
1713       assert(oopDesc::is_oop(obj), "Must be a valid oop");
1714       if (_bitmap->par_mark(obj)) {
1715         _queue->push(ShenandoahMarkTask(obj));
1716       }
1717     }
1718   }
1719 public:
1720   ShenandoahObjectIterateParScanClosure(MarkBitMap* bitmap, ShenandoahObjToScanQueue* q) :
1721     _bitmap(bitmap), _queue(q), _heap(ShenandoahHeap::heap()),
1722     _marking_context(_heap->marking_context()) {}
1723   void do_oop(oop* p)       { do_oop_work(p); }
1724   void do_oop(narrowOop* p) { do_oop_work(p); }
1725 };
1726 
1727 // Object iterator for parallel heap iteraion.
1728 // The root scanning phase happenes in construction as a preparation of
1729 // parallel marking queues.
1730 // Every worker processes it's own marking queue. work-stealing is used
1731 // to balance workload.
1732 class ShenandoahParallelObjectIterator : public ParallelObjectIteratorImpl {
1733 private:
1734   uint                         _num_workers;
1735   bool                         _init_ready;
1736   MarkBitMap*                  _aux_bit_map;
1737   ShenandoahHeap*              _heap;
1738   ShenandoahScanObjectStack    _roots_stack; // global roots stack
1739   ShenandoahObjToScanQueueSet* _task_queues;
1740 public:
1741   ShenandoahParallelObjectIterator(uint num_workers, MarkBitMap* bitmap) :
1742         _num_workers(num_workers),
1743         _init_ready(false),
1744         _aux_bit_map(bitmap),
1745         _heap(ShenandoahHeap::heap()) {
1746     // Initialize bitmap
1747     _init_ready = _heap->prepare_aux_bitmap_for_iteration();
1748     if (!_init_ready) {
1749       return;
1750     }
1751 
1752     ObjectIterateScanRootClosure oops(_aux_bit_map, &_roots_stack);
1753     _heap->scan_roots_for_iteration(&_roots_stack, &oops);
1754 
1755     _init_ready = prepare_worker_queues();
1756   }
1757 
1758   ~ShenandoahParallelObjectIterator() {
1759     // Reclaim bitmap
1760     _heap->reclaim_aux_bitmap_for_iteration();
1761     // Reclaim queue for workers
1762     if (_task_queues!= nullptr) {
1763       for (uint i = 0; i < _num_workers; ++i) {
1764         ShenandoahObjToScanQueue* q = _task_queues->queue(i);
1765         if (q != nullptr) {
1766           delete q;
1767           _task_queues->register_queue(i, nullptr);
1768         }
1769       }
1770       delete _task_queues;
1771       _task_queues = nullptr;
1772     }
1773   }
1774 
1775   virtual void object_iterate(ObjectClosure* cl, uint worker_id) {
1776     if (_init_ready) {
1777       object_iterate_parallel(cl, worker_id, _task_queues);
1778     }
1779   }
1780 
1781 private:
1782   // Divide global root_stack into worker queues
1783   bool prepare_worker_queues() {
1784     _task_queues = new ShenandoahObjToScanQueueSet((int) _num_workers);
1785     // Initialize queues for every workers
1786     for (uint i = 0; i < _num_workers; ++i) {
1787       ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue();
1788       _task_queues->register_queue(i, task_queue);
1789     }
1790     // Divide roots among the workers. Assume that object referencing distribution
1791     // is related with root kind, use round-robin to make every worker have same chance
1792     // to process every kind of roots
1793     size_t roots_num = _roots_stack.size();
1794     if (roots_num == 0) {
1795       // No work to do
1796       return false;
1797     }
1798 
1799     for (uint j = 0; j < roots_num; j++) {
1800       uint stack_id = j % _num_workers;
1801       oop obj = _roots_stack.pop();
1802       _task_queues->queue(stack_id)->push(ShenandoahMarkTask(obj));
1803     }
1804     return true;
1805   }
1806 
1807   void object_iterate_parallel(ObjectClosure* cl,
1808                                uint worker_id,
1809                                ShenandoahObjToScanQueueSet* queue_set) {
1810     assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1811     assert(queue_set != nullptr, "task queue must not be null");
1812 
1813     ShenandoahObjToScanQueue* q = queue_set->queue(worker_id);
1814     assert(q != nullptr, "object iterate queue must not be null");
1815 
1816     ShenandoahMarkTask t;
1817     ShenandoahObjectIterateParScanClosure oops(_aux_bit_map, q);
1818 
1819     // Work through the queue to traverse heap.
1820     // Steal when there is no task in queue.
1821     while (q->pop(t) || queue_set->steal(worker_id, t)) {
1822       oop obj = t.obj();
1823       assert(oopDesc::is_oop(obj), "must be a valid oop");
1824       cl->do_object(obj);
1825       obj->oop_iterate(&oops);
1826     }
1827     assert(q->is_empty(), "should be empty");
1828   }
1829 };
1830 
1831 ParallelObjectIteratorImpl* ShenandoahHeap::parallel_object_iterator(uint workers) {
1832   return new ShenandoahParallelObjectIterator(workers, &_aux_bit_map);
1833 }
1834 
1835 // Keep alive an object that was loaded with AS_NO_KEEPALIVE.
1836 void ShenandoahHeap::keep_alive(oop obj) {
1837   if (is_concurrent_mark_in_progress() && (obj != nullptr)) {
1838     ShenandoahBarrierSet::barrier_set()->enqueue(obj);
1839   }
1840 }
1841 
1842 void ShenandoahHeap::heap_region_iterate(ShenandoahHeapRegionClosure* blk) const {
1843   for (size_t i = 0; i < num_regions(); i++) {
1844     ShenandoahHeapRegion* current = get_region(i);
1845     blk->heap_region_do(current);
1846   }
1847 }
1848 
1849 class ShenandoahParallelHeapRegionTask : public WorkerTask {
1850 private:
1851   ShenandoahHeap* const _heap;
1852   ShenandoahHeapRegionClosure* const _blk;
1853   size_t const _stride;
1854 
1855   shenandoah_padding(0);
1856   volatile size_t _index;
1857   shenandoah_padding(1);
1858 
1859 public:
1860   ShenandoahParallelHeapRegionTask(ShenandoahHeapRegionClosure* blk, size_t stride) :
1861           WorkerTask("Shenandoah Parallel Region Operation"),
1862           _heap(ShenandoahHeap::heap()), _blk(blk), _stride(stride), _index(0) {}
1863 
1864   void work(uint worker_id) {
1865     ShenandoahParallelWorkerSession worker_session(worker_id);
1866     size_t stride = _stride;
1867 
1868     size_t max = _heap->num_regions();
1869     while (Atomic::load(&_index) < max) {
1870       size_t cur = Atomic::fetch_then_add(&_index, stride, memory_order_relaxed);
1871       size_t start = cur;
1872       size_t end = MIN2(cur + stride, max);
1873       if (start >= max) break;
1874 
1875       for (size_t i = cur; i < end; i++) {
1876         ShenandoahHeapRegion* current = _heap->get_region(i);
1877         _blk->heap_region_do(current);
1878       }
1879     }
1880   }
1881 };
1882 
1883 void ShenandoahHeap::parallel_heap_region_iterate(ShenandoahHeapRegionClosure* blk) const {
1884   assert(blk->is_thread_safe(), "Only thread-safe closures here");
1885   const uint active_workers = workers()->active_workers();
1886   const size_t n_regions = num_regions();
1887   size_t stride = ShenandoahParallelRegionStride;
1888   if (stride == 0 && active_workers > 1) {
1889     // Automatically derive the stride to balance the work between threads
1890     // evenly. Do not try to split work if below the reasonable threshold.
1891     constexpr size_t threshold = 4096;
1892     stride = n_regions <= threshold ?
1893             threshold :
1894             (n_regions + active_workers - 1) / active_workers;
1895   }
1896 
1897   if (n_regions > stride && active_workers > 1) {
1898     ShenandoahParallelHeapRegionTask task(blk, stride);
1899     workers()->run_task(&task);
1900   } else {
1901     heap_region_iterate(blk);
1902   }
1903 }
1904 
1905 class ShenandoahRendezvousClosure : public HandshakeClosure {
1906 public:
1907   inline ShenandoahRendezvousClosure() : HandshakeClosure("ShenandoahRendezvous") {}
1908   inline void do_thread(Thread* thread) {}
1909 };
1910 
1911 void ShenandoahHeap::rendezvous_threads() {
1912   ShenandoahRendezvousClosure cl;
1913   Handshake::execute(&cl);
1914 }
1915 
1916 void ShenandoahHeap::recycle_trash() {
1917   free_set()->recycle_trash();
1918 }
1919 
1920 void ShenandoahHeap::do_class_unloading() {
1921   _unloader.unload();
1922   if (mode()->is_generational()) {
1923     old_generation()->set_parsable(false);
1924   }
1925 }
1926 
1927 void ShenandoahHeap::stw_weak_refs(bool full_gc) {
1928   // Weak refs processing
1929   ShenandoahPhaseTimings::Phase phase = full_gc ? ShenandoahPhaseTimings::full_gc_weakrefs
1930                                                 : ShenandoahPhaseTimings::degen_gc_weakrefs;
1931   ShenandoahTimingsTracker t(phase);
1932   ShenandoahGCWorkerPhase worker_phase(phase);
1933   shenandoah_assert_generations_reconciled();
1934   gc_generation()->ref_processor()->process_references(phase, workers(), false /* concurrent */);
1935 }
1936 
1937 void ShenandoahHeap::prepare_update_heap_references(bool concurrent) {
1938   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
1939 
1940   // Evacuation is over, no GCLABs are needed anymore. GCLABs are under URWM, so we need to
1941   // make them parsable for update code to work correctly. Plus, we can compute new sizes
1942   // for future GCLABs here.
1943   if (UseTLAB) {
1944     ShenandoahGCPhase phase(concurrent ?
1945                             ShenandoahPhaseTimings::init_update_refs_manage_gclabs :
1946                             ShenandoahPhaseTimings::degen_gc_init_update_refs_manage_gclabs);
1947     gclabs_retire(ResizeTLAB);
1948   }
1949 
1950   _update_refs_iterator.reset();
1951 }
1952 
1953 void ShenandoahHeap::propagate_gc_state_to_java_threads() {
1954   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at Shenandoah safepoint");
1955   if (_gc_state_changed) {
1956     _gc_state_changed = false;
1957     char state = gc_state();
1958     for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1959       ShenandoahThreadLocalData::set_gc_state(t, state);
1960     }
1961   }
1962 }
1963 
1964 void ShenandoahHeap::set_gc_state(uint mask, bool value) {
1965   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at Shenandoah safepoint");
1966   _gc_state.set_cond(mask, value);
1967   _gc_state_changed = true;
1968 }
1969 
1970 void ShenandoahHeap::set_concurrent_young_mark_in_progress(bool in_progress) {
1971   uint mask;
1972   assert(!has_forwarded_objects(), "Young marking is not concurrent with evacuation");
1973   if (!in_progress && is_concurrent_old_mark_in_progress()) {
1974     assert(mode()->is_generational(), "Only generational GC has old marking");
1975     assert(_gc_state.is_set(MARKING), "concurrent_old_marking_in_progress implies MARKING");
1976     // If old-marking is in progress when we turn off YOUNG_MARKING, leave MARKING (and OLD_MARKING) on
1977     mask = YOUNG_MARKING;
1978   } else {
1979     mask = MARKING | YOUNG_MARKING;
1980   }
1981   set_gc_state(mask, in_progress);
1982   manage_satb_barrier(in_progress);
1983 }
1984 
1985 void ShenandoahHeap::set_concurrent_old_mark_in_progress(bool in_progress) {
1986 #ifdef ASSERT
1987   // has_forwarded_objects() iff UPDATEREFS or EVACUATION
1988   bool has_forwarded = has_forwarded_objects();
1989   bool updating_or_evacuating = _gc_state.is_set(UPDATEREFS | EVACUATION);
1990   bool evacuating = _gc_state.is_set(EVACUATION);
1991   assert ((has_forwarded == updating_or_evacuating) || (evacuating && !has_forwarded && collection_set()->is_empty()),
1992           "Updating or evacuating iff has forwarded objects, or if evacuation phase is promoting in place without forwarding");
1993 #endif
1994   if (!in_progress && is_concurrent_young_mark_in_progress()) {
1995     // If young-marking is in progress when we turn off OLD_MARKING, leave MARKING (and YOUNG_MARKING) on
1996     assert(_gc_state.is_set(MARKING), "concurrent_young_marking_in_progress implies MARKING");
1997     set_gc_state(OLD_MARKING, in_progress);
1998   } else {
1999     set_gc_state(MARKING | OLD_MARKING, in_progress);
2000   }
2001   manage_satb_barrier(in_progress);
2002 }
2003 
2004 bool ShenandoahHeap::is_prepare_for_old_mark_in_progress() const {
2005   return old_generation()->is_preparing_for_mark();
2006 }
2007 
2008 void ShenandoahHeap::manage_satb_barrier(bool active) {
2009   if (is_concurrent_mark_in_progress()) {
2010     // Ignore request to deactivate barrier while concurrent mark is in progress.
2011     // Do not attempt to re-activate the barrier if it is already active.
2012     if (active && !ShenandoahBarrierSet::satb_mark_queue_set().is_active()) {
2013       ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(active, !active);
2014     }
2015   } else {
2016     // No concurrent marking is in progress so honor request to deactivate,
2017     // but only if the barrier is already active.
2018     if (!active && ShenandoahBarrierSet::satb_mark_queue_set().is_active()) {
2019       ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(active, !active);
2020     }
2021   }
2022 }
2023 
2024 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
2025   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only call this at safepoint");
2026   set_gc_state(EVACUATION, in_progress);
2027 }
2028 
2029 void ShenandoahHeap::set_concurrent_strong_root_in_progress(bool in_progress) {
2030   if (in_progress) {
2031     _concurrent_strong_root_in_progress.set();
2032   } else {
2033     _concurrent_strong_root_in_progress.unset();
2034   }
2035 }
2036 
2037 void ShenandoahHeap::set_concurrent_weak_root_in_progress(bool cond) {
2038   set_gc_state(WEAK_ROOTS, cond);
2039 }
2040 
2041 GCTracer* ShenandoahHeap::tracer() {
2042   return shenandoah_policy()->tracer();
2043 }
2044 
2045 size_t ShenandoahHeap::tlab_used(Thread* thread) const {
2046   return _free_set->used();
2047 }
2048 
2049 bool ShenandoahHeap::try_cancel_gc() {
2050   jbyte prev = _cancelled_gc.cmpxchg(CANCELLED, CANCELLABLE);
2051   return prev == CANCELLABLE;
2052 }
2053 
2054 void ShenandoahHeap::cancel_concurrent_mark() {
2055   if (mode()->is_generational()) {
2056     young_generation()->cancel_marking();
2057     old_generation()->cancel_marking();
2058   }
2059 
2060   global_generation()->cancel_marking();
2061 
2062   ShenandoahBarrierSet::satb_mark_queue_set().abandon_partial_marking();
2063 }
2064 
2065 void ShenandoahHeap::cancel_gc(GCCause::Cause cause) {
2066   if (try_cancel_gc()) {
2067     FormatBuffer<> msg("Cancelling GC: %s", GCCause::to_string(cause));
2068     log_info(gc)("%s", msg.buffer());
2069     Events::log(Thread::current(), "%s", msg.buffer());
2070     _cancel_requested_time = os::elapsedTime();
2071   }
2072 }
2073 
2074 uint ShenandoahHeap::max_workers() {
2075   return _max_workers;
2076 }
2077 
2078 void ShenandoahHeap::stop() {
2079   // The shutdown sequence should be able to terminate when GC is running.
2080 
2081   // Step 0. Notify policy to disable event recording.
2082   _shenandoah_policy->record_shutdown();
2083 
2084   // Step 0a. Stop reporting on gc thread cpu utilization
2085   mmu_tracker()->stop();
2086 
2087   // Step 1. Notify control thread that we are in shutdown.
2088   // Note that we cannot do that with stop(), because stop() is blocking and waits for the actual shutdown.
2089   // Doing stop() here would wait for the normal GC cycle to complete, never falling through to cancel below.
2090   control_thread()->prepare_for_graceful_shutdown();
2091 
2092   // Step 2. Notify GC workers that we are cancelling GC.
2093   cancel_gc(GCCause::_shenandoah_stop_vm);
2094 
2095   // Step 3. Wait until GC worker exits normally.
2096   control_thread()->stop();
2097 }
2098 
2099 void ShenandoahHeap::stw_unload_classes(bool full_gc) {
2100   if (!unload_classes()) return;
2101   ClassUnloadingContext ctx(_workers->active_workers(),
2102                             true /* unregister_nmethods_during_purge */,
2103                             false /* lock_codeblob_free_separately */);
2104 
2105   // Unload classes and purge SystemDictionary.
2106   {
2107     ShenandoahPhaseTimings::Phase phase = full_gc ?
2108                                           ShenandoahPhaseTimings::full_gc_purge_class_unload :
2109                                           ShenandoahPhaseTimings::degen_gc_purge_class_unload;
2110     ShenandoahIsAliveSelector is_alive;
2111     {
2112       CodeCache::UnlinkingScope scope(is_alive.is_alive_closure());
2113       ShenandoahGCPhase gc_phase(phase);
2114       ShenandoahGCWorkerPhase worker_phase(phase);
2115       bool unloading_occurred = SystemDictionary::do_unloading(gc_timer());
2116 
2117       uint num_workers = _workers->active_workers();
2118       ShenandoahClassUnloadingTask unlink_task(phase, num_workers, unloading_occurred);
2119       _workers->run_task(&unlink_task);
2120     }
2121     // Release unloaded nmethods's memory.
2122     ClassUnloadingContext::context()->purge_and_free_nmethods();
2123   }
2124 
2125   {
2126     ShenandoahGCPhase phase(full_gc ?
2127                             ShenandoahPhaseTimings::full_gc_purge_cldg :
2128                             ShenandoahPhaseTimings::degen_gc_purge_cldg);
2129     ClassLoaderDataGraph::purge(true /* at_safepoint */);
2130   }
2131   // Resize and verify metaspace
2132   MetaspaceGC::compute_new_size();
2133   DEBUG_ONLY(MetaspaceUtils::verify();)
2134 }
2135 
2136 // Weak roots are either pre-evacuated (final mark) or updated (final updaterefs),
2137 // so they should not have forwarded oops.
2138 // However, we do need to "null" dead oops in the roots, if can not be done
2139 // in concurrent cycles.
2140 void ShenandoahHeap::stw_process_weak_roots(bool full_gc) {
2141   uint num_workers = _workers->active_workers();
2142   ShenandoahPhaseTimings::Phase timing_phase = full_gc ?
2143                                                ShenandoahPhaseTimings::full_gc_purge_weak_par :
2144                                                ShenandoahPhaseTimings::degen_gc_purge_weak_par;
2145   ShenandoahGCPhase phase(timing_phase);
2146   ShenandoahGCWorkerPhase worker_phase(timing_phase);
2147   // Cleanup weak roots
2148   if (has_forwarded_objects()) {
2149     ShenandoahForwardedIsAliveClosure is_alive;
2150     ShenandoahUpdateRefsClosure keep_alive;
2151     ShenandoahParallelWeakRootsCleaningTask<ShenandoahForwardedIsAliveClosure, ShenandoahUpdateRefsClosure>
2152       cleaning_task(timing_phase, &is_alive, &keep_alive, num_workers);
2153     _workers->run_task(&cleaning_task);
2154   } else {
2155     ShenandoahIsAliveClosure is_alive;
2156 #ifdef ASSERT
2157     ShenandoahAssertNotForwardedClosure verify_cl;
2158     ShenandoahParallelWeakRootsCleaningTask<ShenandoahIsAliveClosure, ShenandoahAssertNotForwardedClosure>
2159       cleaning_task(timing_phase, &is_alive, &verify_cl, num_workers);
2160 #else
2161     ShenandoahParallelWeakRootsCleaningTask<ShenandoahIsAliveClosure, DoNothingClosure>
2162       cleaning_task(timing_phase, &is_alive, &do_nothing_cl, num_workers);
2163 #endif
2164     _workers->run_task(&cleaning_task);
2165   }
2166 }
2167 
2168 void ShenandoahHeap::parallel_cleaning(bool full_gc) {
2169   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2170   assert(is_stw_gc_in_progress(), "Only for Degenerated and Full GC");
2171   ShenandoahGCPhase phase(full_gc ?
2172                           ShenandoahPhaseTimings::full_gc_purge :
2173                           ShenandoahPhaseTimings::degen_gc_purge);
2174   stw_weak_refs(full_gc);
2175   stw_process_weak_roots(full_gc);
2176   stw_unload_classes(full_gc);
2177 }
2178 
2179 void ShenandoahHeap::set_has_forwarded_objects(bool cond) {
2180   set_gc_state(HAS_FORWARDED, cond);
2181 }
2182 
2183 void ShenandoahHeap::set_unload_classes(bool uc) {
2184   _unload_classes.set_cond(uc);
2185 }
2186 
2187 bool ShenandoahHeap::unload_classes() const {
2188   return _unload_classes.is_set();
2189 }
2190 
2191 address ShenandoahHeap::in_cset_fast_test_addr() {
2192   ShenandoahHeap* heap = ShenandoahHeap::heap();
2193   assert(heap->collection_set() != nullptr, "Sanity");
2194   return (address) heap->collection_set()->biased_map_address();
2195 }
2196 
2197 void ShenandoahHeap::reset_bytes_allocated_since_gc_start() {
2198   if (mode()->is_generational()) {
2199     young_generation()->reset_bytes_allocated_since_gc_start();
2200     old_generation()->reset_bytes_allocated_since_gc_start();
2201   }
2202 
2203   global_generation()->reset_bytes_allocated_since_gc_start();
2204 }
2205 
2206 void ShenandoahHeap::set_degenerated_gc_in_progress(bool in_progress) {
2207   _degenerated_gc_in_progress.set_cond(in_progress);
2208 }
2209 
2210 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) {
2211   _full_gc_in_progress.set_cond(in_progress);
2212 }
2213 
2214 void ShenandoahHeap::set_full_gc_move_in_progress(bool in_progress) {
2215   assert (is_full_gc_in_progress(), "should be");
2216   _full_gc_move_in_progress.set_cond(in_progress);
2217 }
2218 
2219 void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) {
2220   set_gc_state(UPDATEREFS, in_progress);
2221 }
2222 
2223 void ShenandoahHeap::register_nmethod(nmethod* nm) {
2224   ShenandoahCodeRoots::register_nmethod(nm);
2225 }
2226 
2227 void ShenandoahHeap::unregister_nmethod(nmethod* nm) {
2228   ShenandoahCodeRoots::unregister_nmethod(nm);
2229 }
2230 
2231 void ShenandoahHeap::pin_object(JavaThread* thr, oop o) {
2232   heap_region_containing(o)->record_pin();
2233 }
2234 
2235 void ShenandoahHeap::unpin_object(JavaThread* thr, oop o) {
2236   ShenandoahHeapRegion* r = heap_region_containing(o);
2237   assert(r != nullptr, "Sanity");
2238   assert(r->pin_count() > 0, "Region " SIZE_FORMAT " should have non-zero pins", r->index());
2239   r->record_unpin();
2240 }
2241 
2242 void ShenandoahHeap::sync_pinned_region_status() {
2243   ShenandoahHeapLocker locker(lock());
2244 
2245   for (size_t i = 0; i < num_regions(); i++) {
2246     ShenandoahHeapRegion *r = get_region(i);
2247     if (r->is_active()) {
2248       if (r->is_pinned()) {
2249         if (r->pin_count() == 0) {
2250           r->make_unpinned();
2251         }
2252       } else {
2253         if (r->pin_count() > 0) {
2254           r->make_pinned();
2255         }
2256       }
2257     }
2258   }
2259 
2260   assert_pinned_region_status();
2261 }
2262 
2263 #ifdef ASSERT
2264 void ShenandoahHeap::assert_pinned_region_status() {
2265   for (size_t i = 0; i < num_regions(); i++) {
2266     ShenandoahHeapRegion* r = get_region(i);
2267     shenandoah_assert_generations_reconciled();
2268     if (gc_generation()->contains(r)) {
2269       assert((r->is_pinned() && r->pin_count() > 0) || (!r->is_pinned() && r->pin_count() == 0),
2270              "Region " SIZE_FORMAT " pinning status is inconsistent", i);
2271     }
2272   }
2273 }
2274 #endif
2275 
2276 ConcurrentGCTimer* ShenandoahHeap::gc_timer() const {
2277   return _gc_timer;
2278 }
2279 
2280 void ShenandoahHeap::prepare_concurrent_roots() {
2281   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2282   assert(!is_stw_gc_in_progress(), "Only concurrent GC");
2283   set_concurrent_strong_root_in_progress(!collection_set()->is_empty());
2284   set_concurrent_weak_root_in_progress(true);
2285   if (unload_classes()) {
2286     _unloader.prepare();
2287   }
2288 }
2289 
2290 void ShenandoahHeap::finish_concurrent_roots() {
2291   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2292   assert(!is_stw_gc_in_progress(), "Only concurrent GC");
2293   if (unload_classes()) {
2294     _unloader.finish();
2295   }
2296 }
2297 
2298 #ifdef ASSERT
2299 void ShenandoahHeap::assert_gc_workers(uint nworkers) {
2300   assert(nworkers > 0 && nworkers <= max_workers(), "Sanity");
2301 
2302   if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
2303     // Use ParallelGCThreads inside safepoints
2304     assert(nworkers == ParallelGCThreads, "Use ParallelGCThreads (%u) within safepoint, not %u",
2305            ParallelGCThreads, nworkers);
2306   } else {
2307     // Use ConcGCThreads outside safepoints
2308     assert(nworkers == ConcGCThreads, "Use ConcGCThreads (%u) outside safepoints, %u",
2309            ConcGCThreads, nworkers);
2310   }
2311 }
2312 #endif
2313 
2314 ShenandoahVerifier* ShenandoahHeap::verifier() {
2315   guarantee(ShenandoahVerify, "Should be enabled");
2316   assert (_verifier != nullptr, "sanity");
2317   return _verifier;
2318 }
2319 
2320 template<bool CONCURRENT>
2321 class ShenandoahUpdateHeapRefsTask : public WorkerTask {
2322 private:
2323   ShenandoahHeap* _heap;
2324   ShenandoahRegionIterator* _regions;
2325 public:
2326   explicit ShenandoahUpdateHeapRefsTask(ShenandoahRegionIterator* regions) :
2327     WorkerTask("Shenandoah Update References"),
2328     _heap(ShenandoahHeap::heap()),
2329     _regions(regions) {
2330   }
2331 
2332   void work(uint worker_id) {
2333     if (CONCURRENT) {
2334       ShenandoahConcurrentWorkerSession worker_session(worker_id);
2335       ShenandoahSuspendibleThreadSetJoiner stsj;
2336       do_work<ShenandoahConcUpdateRefsClosure>(worker_id);
2337     } else {
2338       ShenandoahParallelWorkerSession worker_session(worker_id);
2339       do_work<ShenandoahSTWUpdateRefsClosure>(worker_id);
2340     }
2341   }
2342 
2343 private:
2344   template<class T>
2345   void do_work(uint worker_id) {
2346     if (CONCURRENT && (worker_id == 0)) {
2347       // We ask the first worker to replenish the Mutator free set by moving regions previously reserved to hold the
2348       // results of evacuation.  These reserves are no longer necessary because evacuation has completed.
2349       size_t cset_regions = _heap->collection_set()->count();
2350 
2351       // Now that evacuation is done, we can reassign any regions that had been reserved to hold the results of evacuation
2352       // to the mutator free set.  At the end of GC, we will have cset_regions newly evacuated fully empty regions from
2353       // which we will be able to replenish the Collector free set and the OldCollector free set in preparation for the
2354       // next GC cycle.
2355       _heap->free_set()->move_regions_from_collector_to_mutator(cset_regions);
2356     }
2357     // If !CONCURRENT, there's no value in expanding Mutator free set
2358     T cl;
2359     ShenandoahHeapRegion* r = _regions->next();
2360     while (r != nullptr) {
2361       HeapWord* update_watermark = r->get_update_watermark();
2362       assert (update_watermark >= r->bottom(), "sanity");
2363       if (r->is_active() && !r->is_cset()) {
2364         _heap->marked_object_oop_iterate(r, &cl, update_watermark);
2365         if (ShenandoahPacing) {
2366           _heap->pacer()->report_updaterefs(pointer_delta(update_watermark, r->bottom()));
2367         }
2368       }
2369       if (_heap->check_cancelled_gc_and_yield(CONCURRENT)) {
2370         return;
2371       }
2372       r = _regions->next();
2373     }
2374   }
2375 };
2376 
2377 void ShenandoahHeap::update_heap_references(bool concurrent) {
2378   assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
2379 
2380   if (concurrent) {
2381     ShenandoahUpdateHeapRefsTask<true> task(&_update_refs_iterator);
2382     workers()->run_task(&task);
2383   } else {
2384     ShenandoahUpdateHeapRefsTask<false> task(&_update_refs_iterator);
2385     workers()->run_task(&task);
2386   }
2387 }
2388 
2389 void ShenandoahHeap::update_heap_region_states(bool concurrent) {
2390   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2391   assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
2392 
2393   {
2394     ShenandoahGCPhase phase(concurrent ?
2395                             ShenandoahPhaseTimings::final_update_refs_update_region_states :
2396                             ShenandoahPhaseTimings::degen_gc_final_update_refs_update_region_states);
2397 
2398     final_update_refs_update_region_states();
2399 
2400     assert_pinned_region_status();
2401   }
2402 
2403   {
2404     ShenandoahGCPhase phase(concurrent ?
2405                             ShenandoahPhaseTimings::final_update_refs_trash_cset :
2406                             ShenandoahPhaseTimings::degen_gc_final_update_refs_trash_cset);
2407     trash_cset_regions();
2408   }
2409 }
2410 
2411 void ShenandoahHeap::final_update_refs_update_region_states() {
2412   ShenandoahSynchronizePinnedRegionStates cl;
2413   parallel_heap_region_iterate(&cl);
2414 }
2415 
2416 void ShenandoahHeap::rebuild_free_set(bool concurrent) {
2417   ShenandoahGCPhase phase(concurrent ?
2418                           ShenandoahPhaseTimings::final_update_refs_rebuild_freeset :
2419                           ShenandoahPhaseTimings::degen_gc_final_update_refs_rebuild_freeset);
2420   ShenandoahHeapLocker locker(lock());
2421   size_t young_cset_regions, old_cset_regions;
2422   size_t first_old_region, last_old_region, old_region_count;
2423   _free_set->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old_region, last_old_region, old_region_count);
2424   // If there are no old regions, first_old_region will be greater than last_old_region
2425   assert((first_old_region > last_old_region) ||
2426          ((last_old_region + 1 - first_old_region >= old_region_count) &&
2427           get_region(first_old_region)->is_old() && get_region(last_old_region)->is_old()),
2428          "sanity: old_region_count: " SIZE_FORMAT ", first_old_region: " SIZE_FORMAT ", last_old_region: " SIZE_FORMAT,
2429          old_region_count, first_old_region, last_old_region);
2430 
2431   if (mode()->is_generational()) {
2432 #ifdef ASSERT
2433     if (ShenandoahVerify) {
2434       verifier()->verify_before_rebuilding_free_set();
2435     }
2436 #endif
2437 
2438     // The computation of bytes_of_allocation_runway_before_gc_trigger is quite conservative so consider all of this
2439     // available for transfer to old. Note that transfer of humongous regions does not impact available.
2440     ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::heap();
2441     size_t allocation_runway = gen_heap->young_generation()->heuristics()->bytes_of_allocation_runway_before_gc_trigger(young_cset_regions);
2442     gen_heap->compute_old_generation_balance(allocation_runway, old_cset_regions);
2443 
2444     // Total old_available may have been expanded to hold anticipated promotions.  We trigger if the fragmented available
2445     // memory represents more than 16 regions worth of data.  Note that fragmentation may increase when we promote regular
2446     // regions in place when many of these regular regions have an abundant amount of available memory within them.  Fragmentation
2447     // will decrease as promote-by-copy consumes the available memory within these partially consumed regions.
2448     //
2449     // We consider old-gen to have excessive fragmentation if more than 12.5% of old-gen is free memory that resides
2450     // within partially consumed regions of memory.
2451   }
2452   // Rebuild free set based on adjusted generation sizes.
2453   _free_set->finish_rebuild(young_cset_regions, old_cset_regions, old_region_count);
2454 
2455   if (mode()->is_generational()) {
2456     ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::heap();
2457     ShenandoahOldGeneration* old_gen = gen_heap->old_generation();
2458     old_gen->heuristics()->evaluate_triggers(first_old_region, last_old_region, old_region_count, num_regions());
2459   }
2460 }
2461 
2462 void ShenandoahHeap::print_extended_on(outputStream *st) const {
2463   print_on(st);
2464   st->cr();
2465   print_heap_regions_on(st);
2466 }
2467 
2468 bool ShenandoahHeap::is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self) {
2469   size_t slice = r->index() / _bitmap_regions_per_slice;
2470 
2471   size_t regions_from = _bitmap_regions_per_slice * slice;
2472   size_t regions_to   = MIN2(num_regions(), _bitmap_regions_per_slice * (slice + 1));
2473   for (size_t g = regions_from; g < regions_to; g++) {
2474     assert (g / _bitmap_regions_per_slice == slice, "same slice");
2475     if (skip_self && g == r->index()) continue;
2476     if (get_region(g)->is_committed()) {
2477       return true;
2478     }
2479   }
2480   return false;
2481 }
2482 
2483 bool ShenandoahHeap::commit_bitmap_slice(ShenandoahHeapRegion* r) {
2484   shenandoah_assert_heaplocked();
2485 
2486   // Bitmaps in special regions do not need commits
2487   if (_bitmap_region_special) {
2488     return true;
2489   }
2490 
2491   if (is_bitmap_slice_committed(r, true)) {
2492     // Some other region from the group is already committed, meaning the bitmap
2493     // slice is already committed, we exit right away.
2494     return true;
2495   }
2496 
2497   // Commit the bitmap slice:
2498   size_t slice = r->index() / _bitmap_regions_per_slice;
2499   size_t off = _bitmap_bytes_per_slice * slice;
2500   size_t len = _bitmap_bytes_per_slice;
2501   char* start = (char*) _bitmap_region.start() + off;
2502 
2503   if (!os::commit_memory(start, len, false)) {
2504     return false;
2505   }
2506 
2507   if (AlwaysPreTouch) {
2508     os::pretouch_memory(start, start + len, _pretouch_bitmap_page_size);
2509   }
2510 
2511   return true;
2512 }
2513 
2514 bool ShenandoahHeap::uncommit_bitmap_slice(ShenandoahHeapRegion *r) {
2515   shenandoah_assert_heaplocked();
2516 
2517   // Bitmaps in special regions do not need uncommits
2518   if (_bitmap_region_special) {
2519     return true;
2520   }
2521 
2522   if (is_bitmap_slice_committed(r, true)) {
2523     // Some other region from the group is still committed, meaning the bitmap
2524     // slice is should stay committed, exit right away.
2525     return true;
2526   }
2527 
2528   // Uncommit the bitmap slice:
2529   size_t slice = r->index() / _bitmap_regions_per_slice;
2530   size_t off = _bitmap_bytes_per_slice * slice;
2531   size_t len = _bitmap_bytes_per_slice;
2532   if (!os::uncommit_memory((char*)_bitmap_region.start() + off, len)) {
2533     return false;
2534   }
2535   return true;
2536 }
2537 
2538 void ShenandoahHeap::safepoint_synchronize_begin() {
2539   SuspendibleThreadSet::synchronize();
2540 }
2541 
2542 void ShenandoahHeap::safepoint_synchronize_end() {
2543   SuspendibleThreadSet::desynchronize();
2544 }
2545 
2546 void ShenandoahHeap::try_inject_alloc_failure() {
2547   if (ShenandoahAllocFailureALot && !cancelled_gc() && ((os::random() % 1000) > 950)) {
2548     _inject_alloc_failure.set();
2549     os::naked_short_sleep(1);
2550     if (cancelled_gc()) {
2551       log_info(gc)("Allocation failure was successfully injected");
2552     }
2553   }
2554 }
2555 
2556 bool ShenandoahHeap::should_inject_alloc_failure() {
2557   return _inject_alloc_failure.is_set() && _inject_alloc_failure.try_unset();
2558 }
2559 
2560 void ShenandoahHeap::initialize_serviceability() {
2561   _memory_pool = new ShenandoahMemoryPool(this);
2562   _cycle_memory_manager.add_pool(_memory_pool);
2563   _stw_memory_manager.add_pool(_memory_pool);
2564 }
2565 
2566 GrowableArray<GCMemoryManager*> ShenandoahHeap::memory_managers() {
2567   GrowableArray<GCMemoryManager*> memory_managers(2);
2568   memory_managers.append(&_cycle_memory_manager);
2569   memory_managers.append(&_stw_memory_manager);
2570   return memory_managers;
2571 }
2572 
2573 GrowableArray<MemoryPool*> ShenandoahHeap::memory_pools() {
2574   GrowableArray<MemoryPool*> memory_pools(1);
2575   memory_pools.append(_memory_pool);
2576   return memory_pools;
2577 }
2578 
2579 MemoryUsage ShenandoahHeap::memory_usage() {
2580   return MemoryUsage(_initial_size, used(), committed(), max_capacity());
2581 }
2582 
2583 ShenandoahRegionIterator::ShenandoahRegionIterator() :
2584   _heap(ShenandoahHeap::heap()),
2585   _index(0) {}
2586 
2587 ShenandoahRegionIterator::ShenandoahRegionIterator(ShenandoahHeap* heap) :
2588   _heap(heap),
2589   _index(0) {}
2590 
2591 void ShenandoahRegionIterator::reset() {
2592   _index = 0;
2593 }
2594 
2595 bool ShenandoahRegionIterator::has_next() const {
2596   return _index < _heap->num_regions();
2597 }
2598 
2599 char ShenandoahHeap::gc_state() const {
2600   return _gc_state.raw_value();
2601 }
2602 
2603 ShenandoahLiveData* ShenandoahHeap::get_liveness_cache(uint worker_id) {
2604 #ifdef ASSERT
2605   assert(_liveness_cache != nullptr, "sanity");
2606   assert(worker_id < _max_workers, "sanity");
2607   for (uint i = 0; i < num_regions(); i++) {
2608     assert(_liveness_cache[worker_id][i] == 0, "liveness cache should be empty");
2609   }
2610 #endif
2611   return _liveness_cache[worker_id];
2612 }
2613 
2614 void ShenandoahHeap::flush_liveness_cache(uint worker_id) {
2615   assert(worker_id < _max_workers, "sanity");
2616   assert(_liveness_cache != nullptr, "sanity");
2617   ShenandoahLiveData* ld = _liveness_cache[worker_id];
2618   for (uint i = 0; i < num_regions(); i++) {
2619     ShenandoahLiveData live = ld[i];
2620     if (live > 0) {
2621       ShenandoahHeapRegion* r = get_region(i);
2622       r->increase_live_data_gc_words(live);
2623       ld[i] = 0;
2624     }
2625   }
2626 }
2627 
2628 bool ShenandoahHeap::requires_barriers(stackChunkOop obj) const {
2629   if (is_idle()) return false;
2630 
2631   // Objects allocated after marking start are implicitly alive, don't need any barriers during
2632   // marking phase.
2633   if (is_concurrent_mark_in_progress() &&
2634      !marking_context()->allocated_after_mark_start(obj)) {
2635     return true;
2636   }
2637 
2638   // Can not guarantee obj is deeply good.
2639   if (has_forwarded_objects()) {
2640     return true;
2641   }
2642 
2643   return false;
2644 }
2645 
2646 ShenandoahGeneration* ShenandoahHeap::generation_for(ShenandoahAffiliation affiliation) const {
2647   if (!mode()->is_generational()) {
2648     return global_generation();
2649   } else if (affiliation == YOUNG_GENERATION) {
2650     return young_generation();
2651   } else if (affiliation == OLD_GENERATION) {
2652     return old_generation();
2653   }
2654 
2655   ShouldNotReachHere();
2656   return nullptr;
2657 }
2658 
2659 void ShenandoahHeap::log_heap_status(const char* msg) const {
2660   if (mode()->is_generational()) {
2661     young_generation()->log_status(msg);
2662     old_generation()->log_status(msg);
2663   } else {
2664     global_generation()->log_status(msg);
2665   }
2666 }