1 /*
   2  * Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2013, 2022, Red Hat, Inc. All rights reserved.
   4  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
   5  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   6  *
   7  * This code is free software; you can redistribute it and/or modify it
   8  * under the terms of the GNU General Public License version 2 only, as
   9  * published by the Free Software Foundation.
  10  *
  11  * This code is distributed in the hope that it will be useful, but WITHOUT
  12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  14  * version 2 for more details (a copy is included in the LICENSE file that
  15  * accompanied this code).
  16  *
  17  * You should have received a copy of the GNU General Public License version
  18  * 2 along with this work; if not, write to the Free Software Foundation,
  19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20  *
  21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22  * or visit www.oracle.com if you need additional information or have any
  23  * questions.
  24  *
  25  */
  26 
  27 #include "precompiled.hpp"
  28 #include "memory/allocation.hpp"
  29 #include "memory/universe.hpp"
  30 
  31 #include "gc/shared/classUnloadingContext.hpp"
  32 #include "gc/shared/fullGCForwarding.hpp"
  33 #include "gc/shared/gcArguments.hpp"
  34 #include "gc/shared/gcTimer.hpp"
  35 #include "gc/shared/gcTraceTime.inline.hpp"
  36 #include "gc/shared/locationPrinter.inline.hpp"
  37 #include "gc/shared/memAllocator.hpp"
  38 #include "gc/shared/plab.hpp"
  39 #include "gc/shared/tlab_globals.hpp"
  40 
  41 #include "gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp"
  42 #include "gc/shenandoah/heuristics/shenandoahYoungHeuristics.hpp"
  43 #include "gc/shenandoah/shenandoahAllocRequest.hpp"
  44 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  45 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
  46 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  47 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  48 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
  49 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  50 #include "gc/shenandoah/shenandoahControlThread.hpp"
  51 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  52 #include "gc/shenandoah/shenandoahGenerationalEvacuationTask.hpp"
  53 #include "gc/shenandoah/shenandoahGenerationalHeap.hpp"
  54 #include "gc/shenandoah/shenandoahGlobalGeneration.hpp"
  55 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  56 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  57 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
  58 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  59 #include "gc/shenandoah/shenandoahInitLogger.hpp"
  60 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  61 #include "gc/shenandoah/shenandoahMemoryPool.hpp"
  62 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  63 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
  64 #include "gc/shenandoah/shenandoahPacer.inline.hpp"
  65 #include "gc/shenandoah/shenandoahPadding.hpp"
  66 #include "gc/shenandoah/shenandoahParallelCleaning.inline.hpp"
  67 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
  68 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
  69 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
  70 #include "gc/shenandoah/shenandoahSTWMark.hpp"
  71 #include "gc/shenandoah/shenandoahUtils.hpp"
  72 #include "gc/shenandoah/shenandoahVerifier.hpp"
  73 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
  74 #include "gc/shenandoah/shenandoahVMOperations.hpp"
  75 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
  76 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  77 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
  78 #include "gc/shenandoah/mode/shenandoahGenerationalMode.hpp"
  79 #include "gc/shenandoah/mode/shenandoahPassiveMode.hpp"
  80 #include "gc/shenandoah/mode/shenandoahSATBMode.hpp"
  81 #include "utilities/globalDefinitions.hpp"
  82 
  83 #if INCLUDE_JFR
  84 #include "gc/shenandoah/shenandoahJfrSupport.hpp"
  85 #endif
  86 
  87 #include "cds/archiveHeapWriter.hpp"
  88 #include "classfile/systemDictionary.hpp"
  89 #include "code/codeCache.hpp"
  90 #include "memory/classLoaderMetaspace.hpp"
  91 #include "memory/metaspaceUtils.hpp"
  92 #include "nmt/mallocTracker.hpp"
  93 #include "nmt/memTracker.hpp"
  94 #include "oops/compressedOops.inline.hpp"
  95 #include "prims/jvmtiTagMap.hpp"
  96 #include "runtime/atomic.hpp"
  97 #include "runtime/globals.hpp"
  98 #include "runtime/interfaceSupport.inline.hpp"
  99 #include "runtime/java.hpp"
 100 #include "runtime/orderAccess.hpp"
 101 #include "runtime/safepointMechanism.hpp"
 102 #include "runtime/stackWatermarkSet.hpp"
 103 #include "runtime/vmThread.hpp"
 104 #include "utilities/events.hpp"
 105 #include "utilities/powerOfTwo.hpp"
 106 
 107 class ShenandoahPretouchHeapTask : public WorkerTask {
 108 private:
 109   ShenandoahRegionIterator _regions;
 110   const size_t _page_size;
 111 public:
 112   ShenandoahPretouchHeapTask(size_t page_size) :
 113     WorkerTask("Shenandoah Pretouch Heap"),
 114     _page_size(page_size) {}
 115 
 116   virtual void work(uint worker_id) {
 117     ShenandoahHeapRegion* r = _regions.next();
 118     while (r != nullptr) {
 119       if (r->is_committed()) {
 120         os::pretouch_memory(r->bottom(), r->end(), _page_size);
 121       }
 122       r = _regions.next();
 123     }
 124   }
 125 };
 126 
 127 class ShenandoahPretouchBitmapTask : public WorkerTask {
 128 private:
 129   ShenandoahRegionIterator _regions;
 130   char* _bitmap_base;
 131   const size_t _bitmap_size;
 132   const size_t _page_size;
 133 public:
 134   ShenandoahPretouchBitmapTask(char* bitmap_base, size_t bitmap_size, size_t page_size) :
 135     WorkerTask("Shenandoah Pretouch Bitmap"),
 136     _bitmap_base(bitmap_base),
 137     _bitmap_size(bitmap_size),
 138     _page_size(page_size) {}
 139 
 140   virtual void work(uint worker_id) {
 141     ShenandoahHeapRegion* r = _regions.next();
 142     while (r != nullptr) {
 143       size_t start = r->index()       * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
 144       size_t end   = (r->index() + 1) * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
 145       assert (end <= _bitmap_size, "end is sane: " SIZE_FORMAT " < " SIZE_FORMAT, end, _bitmap_size);
 146 
 147       if (r->is_committed()) {
 148         os::pretouch_memory(_bitmap_base + start, _bitmap_base + end, _page_size);
 149       }
 150 
 151       r = _regions.next();
 152     }
 153   }
 154 };
 155 
 156 jint ShenandoahHeap::initialize() {
 157   //
 158   // Figure out heap sizing
 159   //
 160 
 161   size_t init_byte_size = InitialHeapSize;
 162   size_t min_byte_size  = MinHeapSize;
 163   size_t max_byte_size  = MaxHeapSize;
 164   size_t heap_alignment = HeapAlignment;
 165 
 166   size_t reg_size_bytes = ShenandoahHeapRegion::region_size_bytes();
 167 
 168   Universe::check_alignment(max_byte_size,  reg_size_bytes, "Shenandoah heap");
 169   Universe::check_alignment(init_byte_size, reg_size_bytes, "Shenandoah heap");
 170 
 171   _num_regions = ShenandoahHeapRegion::region_count();
 172   assert(_num_regions == (max_byte_size / reg_size_bytes),
 173          "Regions should cover entire heap exactly: " SIZE_FORMAT " != " SIZE_FORMAT "/" SIZE_FORMAT,
 174          _num_regions, max_byte_size, reg_size_bytes);
 175 
 176   size_t num_committed_regions = init_byte_size / reg_size_bytes;
 177   num_committed_regions = MIN2(num_committed_regions, _num_regions);
 178   assert(num_committed_regions <= _num_regions, "sanity");
 179   _initial_size = num_committed_regions * reg_size_bytes;
 180 
 181   size_t num_min_regions = min_byte_size / reg_size_bytes;
 182   num_min_regions = MIN2(num_min_regions, _num_regions);
 183   assert(num_min_regions <= _num_regions, "sanity");
 184   _minimum_size = num_min_regions * reg_size_bytes;
 185 
 186   // Default to max heap size.
 187   _soft_max_size = _num_regions * reg_size_bytes;
 188 
 189   _committed = _initial_size;
 190 
 191   size_t heap_page_size   = UseLargePages ? os::large_page_size() : os::vm_page_size();
 192   size_t bitmap_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
 193   size_t region_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
 194 
 195   //
 196   // Reserve and commit memory for heap
 197   //
 198 
 199   ReservedHeapSpace heap_rs = Universe::reserve_heap(max_byte_size, heap_alignment);
 200   initialize_reserved_region(heap_rs);
 201   _heap_region = MemRegion((HeapWord*)heap_rs.base(), heap_rs.size() / HeapWordSize);
 202   _heap_region_special = heap_rs.special();
 203 
 204   assert((((size_t) base()) & ShenandoahHeapRegion::region_size_bytes_mask()) == 0,
 205          "Misaligned heap: " PTR_FORMAT, p2i(base()));
 206   os::trace_page_sizes_for_requested_size("Heap",
 207                                           max_byte_size, heap_alignment,
 208                                           heap_rs.base(),
 209                                           heap_rs.size(), heap_rs.page_size());
 210 
 211 #if SHENANDOAH_OPTIMIZED_MARKTASK
 212   // The optimized ShenandoahMarkTask takes some bits away from the full object bits.
 213   // Fail if we ever attempt to address more than we can.
 214   if ((uintptr_t)heap_rs.end() >= ShenandoahMarkTask::max_addressable()) {
 215     FormatBuffer<512> buf("Shenandoah reserved [" PTR_FORMAT ", " PTR_FORMAT") for the heap, \n"
 216                           "but max object address is " PTR_FORMAT ". Try to reduce heap size, or try other \n"
 217                           "VM options that allocate heap at lower addresses (HeapBaseMinAddress, AllocateHeapAt, etc).",
 218                 p2i(heap_rs.base()), p2i(heap_rs.end()), ShenandoahMarkTask::max_addressable());
 219     vm_exit_during_initialization("Fatal Error", buf);
 220   }
 221 #endif
 222 
 223   ReservedSpace sh_rs = heap_rs.first_part(max_byte_size);
 224   if (!_heap_region_special) {
 225     os::commit_memory_or_exit(sh_rs.base(), _initial_size, heap_alignment, false,
 226                               "Cannot commit heap memory");
 227   }
 228 
 229   BarrierSet::set_barrier_set(new ShenandoahBarrierSet(this, _heap_region));
 230 
 231   // Now we know the number of regions and heap sizes, initialize the heuristics.
 232   initialize_heuristics();
 233 
 234   assert(_heap_region.byte_size() == heap_rs.size(), "Need to know reserved size for card table");
 235 
 236   //
 237   // Worker threads must be initialized after the barrier is configured
 238   //
 239   _workers = new ShenandoahWorkerThreads("Shenandoah GC Threads", _max_workers);
 240   if (_workers == nullptr) {
 241     vm_exit_during_initialization("Failed necessary allocation.");
 242   } else {
 243     _workers->initialize_workers();
 244   }
 245 
 246   if (ParallelGCThreads > 1) {
 247     _safepoint_workers = new ShenandoahWorkerThreads("Safepoint Cleanup Thread", ParallelGCThreads);
 248     _safepoint_workers->initialize_workers();
 249   }
 250 
 251   //
 252   // Reserve and commit memory for bitmap(s)
 253   //
 254 
 255   size_t bitmap_size_orig = ShenandoahMarkBitMap::compute_size(heap_rs.size());
 256   _bitmap_size = align_up(bitmap_size_orig, bitmap_page_size);
 257 
 258   size_t bitmap_bytes_per_region = reg_size_bytes / ShenandoahMarkBitMap::heap_map_factor();
 259 
 260   guarantee(bitmap_bytes_per_region != 0,
 261             "Bitmap bytes per region should not be zero");
 262   guarantee(is_power_of_2(bitmap_bytes_per_region),
 263             "Bitmap bytes per region should be power of two: " SIZE_FORMAT, bitmap_bytes_per_region);
 264 
 265   if (bitmap_page_size > bitmap_bytes_per_region) {
 266     _bitmap_regions_per_slice = bitmap_page_size / bitmap_bytes_per_region;
 267     _bitmap_bytes_per_slice = bitmap_page_size;
 268   } else {
 269     _bitmap_regions_per_slice = 1;
 270     _bitmap_bytes_per_slice = bitmap_bytes_per_region;
 271   }
 272 
 273   guarantee(_bitmap_regions_per_slice >= 1,
 274             "Should have at least one region per slice: " SIZE_FORMAT,
 275             _bitmap_regions_per_slice);
 276 
 277   guarantee(((_bitmap_bytes_per_slice) % bitmap_page_size) == 0,
 278             "Bitmap slices should be page-granular: bps = " SIZE_FORMAT ", page size = " SIZE_FORMAT,
 279             _bitmap_bytes_per_slice, bitmap_page_size);
 280 
 281   ReservedSpace bitmap(_bitmap_size, bitmap_page_size);
 282   os::trace_page_sizes_for_requested_size("Mark Bitmap",
 283                                           bitmap_size_orig, bitmap_page_size,
 284                                           bitmap.base(),
 285                                           bitmap.size(), bitmap.page_size());
 286   MemTracker::record_virtual_memory_tag(bitmap.base(), mtGC);
 287   _bitmap_region = MemRegion((HeapWord*) bitmap.base(), bitmap.size() / HeapWordSize);
 288   _bitmap_region_special = bitmap.special();
 289 
 290   size_t bitmap_init_commit = _bitmap_bytes_per_slice *
 291     align_up(num_committed_regions, _bitmap_regions_per_slice) / _bitmap_regions_per_slice;
 292   bitmap_init_commit = MIN2(_bitmap_size, bitmap_init_commit);
 293   if (!_bitmap_region_special) {
 294     os::commit_memory_or_exit((char *) _bitmap_region.start(), bitmap_init_commit, bitmap_page_size, false,
 295                               "Cannot commit bitmap memory");
 296   }
 297 
 298   _marking_context = new ShenandoahMarkingContext(_heap_region, _bitmap_region, _num_regions);
 299 
 300   if (ShenandoahVerify) {
 301     ReservedSpace verify_bitmap(_bitmap_size, bitmap_page_size);
 302     os::trace_page_sizes_for_requested_size("Verify Bitmap",
 303                                             bitmap_size_orig, bitmap_page_size,
 304                                             verify_bitmap.base(),
 305                                             verify_bitmap.size(), verify_bitmap.page_size());
 306     if (!verify_bitmap.special()) {
 307       os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), bitmap_page_size, false,
 308                                 "Cannot commit verification bitmap memory");
 309     }
 310     MemTracker::record_virtual_memory_tag(verify_bitmap.base(), mtGC);
 311     MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize);
 312     _verification_bit_map.initialize(_heap_region, verify_bitmap_region);
 313     _verifier = new ShenandoahVerifier(this, &_verification_bit_map);
 314   }
 315 
 316   // Reserve aux bitmap for use in object_iterate(). We don't commit it here.
 317   size_t aux_bitmap_page_size = bitmap_page_size;
 318 
 319   ReservedSpace aux_bitmap(_bitmap_size, aux_bitmap_page_size);
 320   os::trace_page_sizes_for_requested_size("Aux Bitmap",
 321                                           bitmap_size_orig, aux_bitmap_page_size,
 322                                           aux_bitmap.base(),
 323                                           aux_bitmap.size(), aux_bitmap.page_size());
 324   MemTracker::record_virtual_memory_tag(aux_bitmap.base(), mtGC);
 325   _aux_bitmap_region = MemRegion((HeapWord*) aux_bitmap.base(), aux_bitmap.size() / HeapWordSize);
 326   _aux_bitmap_region_special = aux_bitmap.special();
 327   _aux_bit_map.initialize(_heap_region, _aux_bitmap_region);
 328 
 329   //
 330   // Create regions and region sets
 331   //
 332   size_t region_align = align_up(sizeof(ShenandoahHeapRegion), SHENANDOAH_CACHE_LINE_SIZE);
 333   size_t region_storage_size_orig = region_align * _num_regions;
 334   size_t region_storage_size = align_up(region_storage_size_orig,
 335                                         MAX2(region_page_size, os::vm_allocation_granularity()));
 336 
 337   ReservedSpace region_storage(region_storage_size, region_page_size);
 338   os::trace_page_sizes_for_requested_size("Region Storage",
 339                                           region_storage_size_orig, region_page_size,
 340                                           region_storage.base(),
 341                                           region_storage.size(), region_storage.page_size());
 342   MemTracker::record_virtual_memory_tag(region_storage.base(), mtGC);
 343   if (!region_storage.special()) {
 344     os::commit_memory_or_exit(region_storage.base(), region_storage_size, region_page_size, false,
 345                               "Cannot commit region memory");
 346   }
 347 
 348   // Try to fit the collection set bitmap at lower addresses. This optimizes code generation for cset checks.
 349   // Go up until a sensible limit (subject to encoding constraints) and try to reserve the space there.
 350   // If not successful, bite a bullet and allocate at whatever address.
 351   {
 352     const size_t cset_align = MAX2<size_t>(os::vm_page_size(), os::vm_allocation_granularity());
 353     const size_t cset_size = align_up(((size_t) sh_rs.base() + sh_rs.size()) >> ShenandoahHeapRegion::region_size_bytes_shift(), cset_align);
 354     const size_t cset_page_size = os::vm_page_size();
 355 
 356     uintptr_t min = round_up_power_of_2(cset_align);
 357     uintptr_t max = (1u << 30u);
 358     ReservedSpace cset_rs;
 359 
 360     for (uintptr_t addr = min; addr <= max; addr <<= 1u) {
 361       char* req_addr = (char*)addr;
 362       assert(is_aligned(req_addr, cset_align), "Should be aligned");
 363       cset_rs = ReservedSpace(cset_size, cset_align, cset_page_size, req_addr);
 364       if (cset_rs.is_reserved()) {
 365         assert(cset_rs.base() == req_addr, "Allocated where requested: " PTR_FORMAT ", " PTR_FORMAT, p2i(cset_rs.base()), addr);
 366         _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
 367         break;
 368       }
 369     }
 370 
 371     if (_collection_set == nullptr) {
 372       cset_rs = ReservedSpace(cset_size, cset_align, os::vm_page_size());
 373       _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
 374     }
 375     os::trace_page_sizes_for_requested_size("Collection Set",
 376                                             cset_size, cset_page_size,
 377                                             cset_rs.base(),
 378                                             cset_rs.size(), cset_rs.page_size());
 379   }
 380 
 381   _regions = NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, _num_regions, mtGC);
 382   _affiliations = NEW_C_HEAP_ARRAY(uint8_t, _num_regions, mtGC);
 383   _free_set = new ShenandoahFreeSet(this, _num_regions);
 384 
 385   {
 386     ShenandoahHeapLocker locker(lock());
 387 
 388     for (size_t i = 0; i < _num_regions; i++) {
 389       HeapWord* start = (HeapWord*)sh_rs.base() + ShenandoahHeapRegion::region_size_words() * i;
 390       bool is_committed = i < num_committed_regions;
 391       void* loc = region_storage.base() + i * region_align;
 392 
 393       ShenandoahHeapRegion* r = new (loc) ShenandoahHeapRegion(start, i, is_committed);
 394       assert(is_aligned(r, SHENANDOAH_CACHE_LINE_SIZE), "Sanity");
 395 
 396       _marking_context->initialize_top_at_mark_start(r);
 397       _regions[i] = r;
 398       assert(!collection_set()->is_in(i), "New region should not be in collection set");
 399 
 400       _affiliations[i] = ShenandoahAffiliation::FREE;
 401     }
 402 
 403     // Initialize to complete
 404     _marking_context->mark_complete();
 405     size_t young_cset_regions, old_cset_regions;
 406 
 407     // We are initializing free set.  We ignore cset region tallies.
 408     size_t first_old, last_old, num_old;
 409     _free_set->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old, last_old, num_old);
 410     _free_set->finish_rebuild(young_cset_regions, old_cset_regions, num_old);
 411   }
 412 
 413   if (AlwaysPreTouch) {
 414     // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads,
 415     // before initialize() below zeroes it with initializing thread. For any given region,
 416     // we touch the region and the corresponding bitmaps from the same thread.
 417     ShenandoahPushWorkerScope scope(workers(), _max_workers, false);
 418 
 419     _pretouch_heap_page_size = heap_page_size;
 420     _pretouch_bitmap_page_size = bitmap_page_size;
 421 
 422     // OS memory managers may want to coalesce back-to-back pages. Make their jobs
 423     // simpler by pre-touching continuous spaces (heap and bitmap) separately.
 424 
 425     ShenandoahPretouchBitmapTask bcl(bitmap.base(), _bitmap_size, _pretouch_bitmap_page_size);
 426     _workers->run_task(&bcl);
 427 
 428     ShenandoahPretouchHeapTask hcl(_pretouch_heap_page_size);
 429     _workers->run_task(&hcl);
 430   }
 431 
 432   //
 433   // Initialize the rest of GC subsystems
 434   //
 435 
 436   _liveness_cache = NEW_C_HEAP_ARRAY(ShenandoahLiveData*, _max_workers, mtGC);
 437   for (uint worker = 0; worker < _max_workers; worker++) {
 438     _liveness_cache[worker] = NEW_C_HEAP_ARRAY(ShenandoahLiveData, _num_regions, mtGC);
 439     Copy::fill_to_bytes(_liveness_cache[worker], _num_regions * sizeof(ShenandoahLiveData));
 440   }
 441 
 442   // There should probably be Shenandoah-specific options for these,
 443   // just as there are G1-specific options.
 444   {
 445     ShenandoahSATBMarkQueueSet& satbqs = ShenandoahBarrierSet::satb_mark_queue_set();
 446     satbqs.set_process_completed_buffers_threshold(20); // G1SATBProcessCompletedThreshold
 447     satbqs.set_buffer_enqueue_threshold_percentage(60); // G1SATBBufferEnqueueingThresholdPercent
 448   }
 449 
 450   _monitoring_support = new ShenandoahMonitoringSupport(this);
 451   _phase_timings = new ShenandoahPhaseTimings(max_workers());
 452   ShenandoahCodeRoots::initialize();
 453 
 454   if (ShenandoahPacing) {
 455     _pacer = new ShenandoahPacer(this);
 456     _pacer->setup_for_idle();
 457   }
 458 
 459   initialize_controller();
 460 
 461   print_init_logger();
 462 
 463   FullGCForwarding::initialize(_heap_region);
 464 
 465   return JNI_OK;
 466 }
 467 
 468 void ShenandoahHeap::initialize_controller() {
 469   _control_thread = new ShenandoahControlThread();
 470 }
 471 
 472 void ShenandoahHeap::print_init_logger() const {
 473   ShenandoahInitLogger::print();
 474 }
 475 
 476 void ShenandoahHeap::initialize_mode() {
 477   if (ShenandoahGCMode != nullptr) {
 478     if (strcmp(ShenandoahGCMode, "satb") == 0) {
 479       _gc_mode = new ShenandoahSATBMode();
 480     } else if (strcmp(ShenandoahGCMode, "passive") == 0) {
 481       _gc_mode = new ShenandoahPassiveMode();
 482     } else if (strcmp(ShenandoahGCMode, "generational") == 0) {
 483       _gc_mode = new ShenandoahGenerationalMode();
 484     } else {
 485       vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option");
 486     }
 487   } else {
 488     vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option (null)");
 489   }
 490   _gc_mode->initialize_flags();
 491   if (_gc_mode->is_diagnostic() && !UnlockDiagnosticVMOptions) {
 492     vm_exit_during_initialization(
 493             err_msg("GC mode \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
 494                     _gc_mode->name()));
 495   }
 496   if (_gc_mode->is_experimental() && !UnlockExperimentalVMOptions) {
 497     vm_exit_during_initialization(
 498             err_msg("GC mode \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
 499                     _gc_mode->name()));
 500   }
 501 }
 502 
 503 void ShenandoahHeap::initialize_heuristics() {
 504   _global_generation = new ShenandoahGlobalGeneration(mode()->is_generational(), max_workers(), max_capacity(), max_capacity());
 505   _global_generation->initialize_heuristics(mode());
 506 }
 507 
 508 #ifdef _MSC_VER
 509 #pragma warning( push )
 510 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
 511 #endif
 512 
 513 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
 514   CollectedHeap(),
 515   _gc_generation(nullptr),
 516   _active_generation(nullptr),
 517   _initial_size(0),
 518   _committed(0),
 519   _max_workers(MAX3(ConcGCThreads, ParallelGCThreads, 1U)),
 520   _workers(nullptr),
 521   _safepoint_workers(nullptr),
 522   _heap_region_special(false),
 523   _num_regions(0),
 524   _regions(nullptr),
 525   _affiliations(nullptr),
 526   _gc_state_changed(false),
 527   _gc_no_progress_count(0),
 528   _cancel_requested_time(0),
 529   _update_refs_iterator(this),
 530   _global_generation(nullptr),
 531   _control_thread(nullptr),
 532   _young_generation(nullptr),
 533   _old_generation(nullptr),
 534   _shenandoah_policy(policy),
 535   _gc_mode(nullptr),
 536   _free_set(nullptr),
 537   _pacer(nullptr),
 538   _verifier(nullptr),
 539   _phase_timings(nullptr),
 540   _mmu_tracker(),
 541   _monitoring_support(nullptr),
 542   _memory_pool(nullptr),
 543   _stw_memory_manager("Shenandoah Pauses"),
 544   _cycle_memory_manager("Shenandoah Cycles"),
 545   _gc_timer(new ConcurrentGCTimer()),
 546   _log_min_obj_alignment_in_bytes(LogMinObjAlignmentInBytes),
 547   _marking_context(nullptr),
 548   _bitmap_size(0),
 549   _bitmap_regions_per_slice(0),
 550   _bitmap_bytes_per_slice(0),
 551   _bitmap_region_special(false),
 552   _aux_bitmap_region_special(false),
 553   _liveness_cache(nullptr),
 554   _collection_set(nullptr)
 555 {
 556   // Initialize GC mode early, many subsequent initialization procedures depend on it
 557   initialize_mode();
 558 }
 559 
 560 #ifdef _MSC_VER
 561 #pragma warning( pop )
 562 #endif
 563 
 564 void ShenandoahHeap::print_on(outputStream* st) const {
 565   st->print_cr("Shenandoah Heap");
 566   st->print_cr(" " SIZE_FORMAT "%s max, " SIZE_FORMAT "%s soft max, " SIZE_FORMAT "%s committed, " SIZE_FORMAT "%s used",
 567                byte_size_in_proper_unit(max_capacity()), proper_unit_for_byte_size(max_capacity()),
 568                byte_size_in_proper_unit(soft_max_capacity()), proper_unit_for_byte_size(soft_max_capacity()),
 569                byte_size_in_proper_unit(committed()),    proper_unit_for_byte_size(committed()),
 570                byte_size_in_proper_unit(used()),         proper_unit_for_byte_size(used()));
 571   st->print_cr(" " SIZE_FORMAT " x " SIZE_FORMAT"%s regions",
 572                num_regions(),
 573                byte_size_in_proper_unit(ShenandoahHeapRegion::region_size_bytes()),
 574                proper_unit_for_byte_size(ShenandoahHeapRegion::region_size_bytes()));
 575 
 576   st->print("Status: ");
 577   if (has_forwarded_objects())                 st->print("has forwarded objects, ");
 578   if (!mode()->is_generational()) {
 579     if (is_concurrent_mark_in_progress())      st->print("marking,");
 580   } else {
 581     if (is_concurrent_old_mark_in_progress())    st->print("old marking, ");
 582     if (is_concurrent_young_mark_in_progress())  st->print("young marking, ");
 583   }
 584   if (is_evacuation_in_progress())             st->print("evacuating, ");
 585   if (is_update_refs_in_progress())            st->print("updating refs, ");
 586   if (is_degenerated_gc_in_progress())         st->print("degenerated gc, ");
 587   if (is_full_gc_in_progress())                st->print("full gc, ");
 588   if (is_full_gc_move_in_progress())           st->print("full gc move, ");
 589   if (is_concurrent_weak_root_in_progress())   st->print("concurrent weak roots, ");
 590   if (is_concurrent_strong_root_in_progress() &&
 591       !is_concurrent_weak_root_in_progress())  st->print("concurrent strong roots, ");
 592 
 593   if (cancelled_gc()) {
 594     st->print("cancelled");
 595   } else {
 596     st->print("not cancelled");
 597   }
 598   st->cr();
 599 
 600   st->print_cr("Reserved region:");
 601   st->print_cr(" - [" PTR_FORMAT ", " PTR_FORMAT ") ",
 602                p2i(reserved_region().start()),
 603                p2i(reserved_region().end()));
 604 
 605   ShenandoahCollectionSet* cset = collection_set();
 606   st->print_cr("Collection set:");
 607   if (cset != nullptr) {
 608     st->print_cr(" - map (vanilla): " PTR_FORMAT, p2i(cset->map_address()));
 609     st->print_cr(" - map (biased):  " PTR_FORMAT, p2i(cset->biased_map_address()));
 610   } else {
 611     st->print_cr(" (null)");
 612   }
 613 
 614   st->cr();
 615   MetaspaceUtils::print_on(st);
 616 
 617   if (Verbose) {
 618     st->cr();
 619     print_heap_regions_on(st);
 620   }
 621 }
 622 
 623 class ShenandoahInitWorkerGCLABClosure : public ThreadClosure {
 624 public:
 625   void do_thread(Thread* thread) {
 626     assert(thread != nullptr, "Sanity");
 627     assert(thread->is_Worker_thread(), "Only worker thread expected");
 628     ShenandoahThreadLocalData::initialize_gclab(thread);
 629   }
 630 };
 631 
 632 void ShenandoahHeap::post_initialize() {
 633   CollectedHeap::post_initialize();
 634   _mmu_tracker.initialize();
 635 
 636   MutexLocker ml(Threads_lock);
 637 
 638   ShenandoahInitWorkerGCLABClosure init_gclabs;
 639   _workers->threads_do(&init_gclabs);
 640 
 641   // gclab can not be initialized early during VM startup, as it can not determinate its max_size.
 642   // Now, we will let WorkerThreads to initialize gclab when new worker is created.
 643   _workers->set_initialize_gclab();
 644   if (_safepoint_workers != nullptr) {
 645     _safepoint_workers->threads_do(&init_gclabs);
 646     _safepoint_workers->set_initialize_gclab();
 647   }
 648 
 649   JFR_ONLY(ShenandoahJFRSupport::register_jfr_type_serializers();)
 650 }
 651 
 652 ShenandoahHeuristics* ShenandoahHeap::heuristics() {
 653   return _global_generation->heuristics();
 654 }
 655 
 656 size_t ShenandoahHeap::used() const {
 657   return global_generation()->used();
 658 }
 659 
 660 size_t ShenandoahHeap::committed() const {
 661   return Atomic::load(&_committed);
 662 }
 663 
 664 void ShenandoahHeap::increase_committed(size_t bytes) {
 665   shenandoah_assert_heaplocked_or_safepoint();
 666   _committed += bytes;
 667 }
 668 
 669 void ShenandoahHeap::decrease_committed(size_t bytes) {
 670   shenandoah_assert_heaplocked_or_safepoint();
 671   _committed -= bytes;
 672 }
 673 
 674 // For tracking usage based on allocations, it should be the case that:
 675 // * The sum of regions::used == heap::used
 676 // * The sum of a generation's regions::used == generation::used
 677 // * The sum of a generation's humongous regions::free == generation::humongous_waste
 678 // These invariants are checked by the verifier on GC safepoints.
 679 //
 680 // Additional notes:
 681 // * When a mutator's allocation request causes a region to be retired, the
 682 //   free memory left in that region is considered waste. It does not contribute
 683 //   to the usage, but it _does_ contribute to allocation rate.
 684 // * The bottom of a PLAB must be aligned on card size. In some cases this will
 685 //   require padding in front of the PLAB (a filler object). Because this padding
 686 //   is included in the region's used memory we include the padding in the usage
 687 //   accounting as waste.
 688 // * Mutator allocations are used to compute an allocation rate. They are also
 689 //   sent to the Pacer for those purposes.
 690 // * There are three sources of waste:
 691 //  1. The padding used to align a PLAB on card size
 692 //  2. Region's free is less than minimum TLAB size and is retired
 693 //  3. The unused portion of memory in the last region of a humongous object
 694 void ShenandoahHeap::increase_used(const ShenandoahAllocRequest& req) {
 695   size_t actual_bytes = req.actual_size() * HeapWordSize;
 696   size_t wasted_bytes = req.waste() * HeapWordSize;
 697   ShenandoahGeneration* generation = generation_for(req.affiliation());
 698 
 699   if (req.is_gc_alloc()) {
 700     assert(wasted_bytes == 0 || req.type() == ShenandoahAllocRequest::_alloc_plab, "Only PLABs have waste");
 701     increase_used(generation, actual_bytes + wasted_bytes);
 702   } else {
 703     assert(req.is_mutator_alloc(), "Expected mutator alloc here");
 704     // padding and actual size both count towards allocation counter
 705     generation->increase_allocated(actual_bytes + wasted_bytes);
 706 
 707     // only actual size counts toward usage for mutator allocations
 708     increase_used(generation, actual_bytes);
 709 
 710     // notify pacer of both actual size and waste
 711     notify_mutator_alloc_words(req.actual_size(), req.waste());
 712 
 713     if (wasted_bytes > 0 && ShenandoahHeapRegion::requires_humongous(req.actual_size())) {
 714       increase_humongous_waste(generation,wasted_bytes);
 715     }
 716   }
 717 }
 718 
 719 void ShenandoahHeap::increase_humongous_waste(ShenandoahGeneration* generation, size_t bytes) {
 720   generation->increase_humongous_waste(bytes);
 721   if (!generation->is_global()) {
 722     global_generation()->increase_humongous_waste(bytes);
 723   }
 724 }
 725 
 726 void ShenandoahHeap::decrease_humongous_waste(ShenandoahGeneration* generation, size_t bytes) {
 727   generation->decrease_humongous_waste(bytes);
 728   if (!generation->is_global()) {
 729     global_generation()->decrease_humongous_waste(bytes);
 730   }
 731 }
 732 
 733 void ShenandoahHeap::increase_used(ShenandoahGeneration* generation, size_t bytes) {
 734   generation->increase_used(bytes);
 735   if (!generation->is_global()) {
 736     global_generation()->increase_used(bytes);
 737   }
 738 }
 739 
 740 void ShenandoahHeap::decrease_used(ShenandoahGeneration* generation, size_t bytes) {
 741   generation->decrease_used(bytes);
 742   if (!generation->is_global()) {
 743     global_generation()->decrease_used(bytes);
 744   }
 745 }
 746 
 747 void ShenandoahHeap::notify_mutator_alloc_words(size_t words, size_t waste) {
 748   if (ShenandoahPacing) {
 749     control_thread()->pacing_notify_alloc(words);
 750     if (waste > 0) {
 751       pacer()->claim_for_alloc<true>(waste);
 752     }
 753   }
 754 }
 755 
 756 size_t ShenandoahHeap::capacity() const {
 757   return committed();
 758 }
 759 
 760 size_t ShenandoahHeap::max_capacity() const {
 761   return _num_regions * ShenandoahHeapRegion::region_size_bytes();
 762 }
 763 
 764 size_t ShenandoahHeap::soft_max_capacity() const {
 765   size_t v = Atomic::load(&_soft_max_size);
 766   assert(min_capacity() <= v && v <= max_capacity(),
 767          "Should be in bounds: " SIZE_FORMAT " <= " SIZE_FORMAT " <= " SIZE_FORMAT,
 768          min_capacity(), v, max_capacity());
 769   return v;
 770 }
 771 
 772 void ShenandoahHeap::set_soft_max_capacity(size_t v) {
 773   assert(min_capacity() <= v && v <= max_capacity(),
 774          "Should be in bounds: " SIZE_FORMAT " <= " SIZE_FORMAT " <= " SIZE_FORMAT,
 775          min_capacity(), v, max_capacity());
 776   Atomic::store(&_soft_max_size, v);
 777 }
 778 
 779 size_t ShenandoahHeap::min_capacity() const {
 780   return _minimum_size;
 781 }
 782 
 783 size_t ShenandoahHeap::initial_capacity() const {
 784   return _initial_size;
 785 }
 786 
 787 bool ShenandoahHeap::is_in(const void* p) const {
 788   if (is_in_reserved(p)) {
 789     if (is_full_gc_move_in_progress()) {
 790       // Full GC move is running, we do not have a consistent region
 791       // information yet. But we know the pointer is in heap.
 792       return true;
 793     }
 794     // Now check if we point to a live section in active region.
 795     ShenandoahHeapRegion* r = heap_region_containing(p);
 796     return (r->is_active() && p < r->top());
 797   } else {
 798     return false;
 799   }
 800 }
 801 
 802 void ShenandoahHeap::maybe_uncommit(double shrink_before, size_t shrink_until) {
 803   assert (ShenandoahUncommit, "should be enabled");
 804 
 805   // Determine if there is work to do. This avoids taking heap lock if there is
 806   // no work available, avoids spamming logs with superfluous logging messages,
 807   // and minimises the amount of work while locks are taken.
 808 
 809   if (committed() <= shrink_until) return;
 810 
 811   bool has_work = false;
 812   for (size_t i = 0; i < num_regions(); i++) {
 813     ShenandoahHeapRegion* r = get_region(i);
 814     if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
 815       has_work = true;
 816       break;
 817     }
 818   }
 819 
 820   if (has_work) {
 821     static const char* msg = "Concurrent uncommit";
 822     ShenandoahConcurrentPhase gcPhase(msg, ShenandoahPhaseTimings::conc_uncommit, true /* log_heap_usage */);
 823     EventMark em("%s", msg);
 824 
 825     op_uncommit(shrink_before, shrink_until);
 826   }
 827 }
 828 
 829 void ShenandoahHeap::op_uncommit(double shrink_before, size_t shrink_until) {
 830   assert (ShenandoahUncommit, "should be enabled");
 831 
 832   // Application allocates from the beginning of the heap, and GC allocates at
 833   // the end of it. It is more efficient to uncommit from the end, so that applications
 834   // could enjoy the near committed regions. GC allocations are much less frequent,
 835   // and therefore can accept the committing costs.
 836 
 837   size_t count = 0;
 838   for (size_t i = num_regions(); i > 0; i--) { // care about size_t underflow
 839     ShenandoahHeapRegion* r = get_region(i - 1);
 840     if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
 841       ShenandoahHeapLocker locker(lock());
 842       if (r->is_empty_committed()) {
 843         if (committed() < shrink_until + ShenandoahHeapRegion::region_size_bytes()) {
 844           break;
 845         }
 846 
 847         r->make_uncommitted();
 848         count++;
 849       }
 850     }
 851     SpinPause(); // allow allocators to take the lock
 852   }
 853 
 854   if (count > 0) {
 855     notify_heap_changed();
 856   }
 857 }
 858 
 859 bool ShenandoahHeap::check_soft_max_changed() {
 860   size_t new_soft_max = Atomic::load(&SoftMaxHeapSize);
 861   size_t old_soft_max = soft_max_capacity();
 862   if (new_soft_max != old_soft_max) {
 863     new_soft_max = MAX2(min_capacity(), new_soft_max);
 864     new_soft_max = MIN2(max_capacity(), new_soft_max);
 865     if (new_soft_max != old_soft_max) {
 866       log_info(gc)("Soft Max Heap Size: " SIZE_FORMAT "%s -> " SIZE_FORMAT "%s",
 867                    byte_size_in_proper_unit(old_soft_max), proper_unit_for_byte_size(old_soft_max),
 868                    byte_size_in_proper_unit(new_soft_max), proper_unit_for_byte_size(new_soft_max)
 869       );
 870       set_soft_max_capacity(new_soft_max);
 871       return true;
 872     }
 873   }
 874   return false;
 875 }
 876 
 877 void ShenandoahHeap::notify_heap_changed() {
 878   // Update monitoring counters when we took a new region. This amortizes the
 879   // update costs on slow path.
 880   monitoring_support()->notify_heap_changed();
 881   _heap_changed.set();
 882 }
 883 
 884 void ShenandoahHeap::set_forced_counters_update(bool value) {
 885   monitoring_support()->set_forced_counters_update(value);
 886 }
 887 
 888 void ShenandoahHeap::handle_force_counters_update() {
 889   monitoring_support()->handle_force_counters_update();
 890 }
 891 
 892 HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) {
 893   // New object should fit the GCLAB size
 894   size_t min_size = MAX2(size, PLAB::min_size());
 895 
 896   // Figure out size of new GCLAB, looking back at heuristics. Expand aggressively.
 897   size_t new_size = ShenandoahThreadLocalData::gclab_size(thread) * 2;
 898 
 899   new_size = MIN2(new_size, PLAB::max_size());
 900   new_size = MAX2(new_size, PLAB::min_size());
 901 
 902   // Record new heuristic value even if we take any shortcut. This captures
 903   // the case when moderately-sized objects always take a shortcut. At some point,
 904   // heuristics should catch up with them.
 905   log_debug(gc, free)("Set new GCLAB size: " SIZE_FORMAT, new_size);
 906   ShenandoahThreadLocalData::set_gclab_size(thread, new_size);
 907 
 908   if (new_size < size) {
 909     // New size still does not fit the object. Fall back to shared allocation.
 910     // This avoids retiring perfectly good GCLABs, when we encounter a large object.
 911     log_debug(gc, free)("New gclab size (" SIZE_FORMAT ") is too small for " SIZE_FORMAT, new_size, size);
 912     return nullptr;
 913   }
 914 
 915   // Retire current GCLAB, and allocate a new one.
 916   PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
 917   gclab->retire();
 918 
 919   size_t actual_size = 0;
 920   HeapWord* gclab_buf = allocate_new_gclab(min_size, new_size, &actual_size);
 921   if (gclab_buf == nullptr) {
 922     return nullptr;
 923   }
 924 
 925   assert (size <= actual_size, "allocation should fit");
 926 
 927   // ...and clear or zap just allocated TLAB, if needed.
 928   if (ZeroTLAB) {
 929     Copy::zero_to_words(gclab_buf, actual_size);
 930   } else if (ZapTLAB) {
 931     // Skip mangling the space corresponding to the object header to
 932     // ensure that the returned space is not considered parsable by
 933     // any concurrent GC thread.
 934     size_t hdr_size = oopDesc::header_size();
 935     Copy::fill_to_words(gclab_buf + hdr_size, actual_size - hdr_size, badHeapWordVal);
 936   }
 937   gclab->set_buf(gclab_buf, actual_size);
 938   return gclab->allocate(size);
 939 }
 940 
 941 // Called from stubs in JIT code or interpreter
 942 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t min_size,
 943                                             size_t requested_size,
 944                                             size_t* actual_size) {
 945   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_tlab(min_size, requested_size);
 946   HeapWord* res = allocate_memory(req);
 947   if (res != nullptr) {
 948     *actual_size = req.actual_size();
 949   } else {
 950     *actual_size = 0;
 951   }
 952   return res;
 953 }
 954 
 955 HeapWord* ShenandoahHeap::allocate_new_gclab(size_t min_size,
 956                                              size_t word_size,
 957                                              size_t* actual_size) {
 958   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_gclab(min_size, word_size);
 959   HeapWord* res = allocate_memory(req);
 960   if (res != nullptr) {
 961     *actual_size = req.actual_size();
 962   } else {
 963     *actual_size = 0;
 964   }
 965   return res;
 966 }
 967 
 968 HeapWord* ShenandoahHeap::allocate_memory(ShenandoahAllocRequest& req) {
 969   intptr_t pacer_epoch = 0;
 970   bool in_new_region = false;
 971   HeapWord* result = nullptr;
 972 
 973   if (req.is_mutator_alloc()) {
 974     if (ShenandoahPacing) {
 975       pacer()->pace_for_alloc(req.size());
 976       pacer_epoch = pacer()->epoch();
 977     }
 978 
 979     if (!ShenandoahAllocFailureALot || !should_inject_alloc_failure()) {
 980       result = allocate_memory_under_lock(req, in_new_region);
 981     }
 982 
 983     // Check that gc overhead is not exceeded.
 984     //
 985     // Shenandoah will grind along for quite a while allocating one
 986     // object at a time using shared (non-tlab) allocations. This check
 987     // is testing that the GC overhead limit has not been exceeded.
 988     // This will notify the collector to start a cycle, but will raise
 989     // an OOME to the mutator if the last Full GCs have not made progress.
 990     // gc_no_progress_count is incremented following each degen or full GC that fails to achieve is_good_progress().
 991     if (result == nullptr && !req.is_lab_alloc() && get_gc_no_progress_count() > ShenandoahNoProgressThreshold) {
 992       control_thread()->handle_alloc_failure(req, false);
 993       req.set_actual_size(0);
 994       return nullptr;
 995     }
 996 
 997     if (result == nullptr) {
 998       // Block until control thread reacted, then retry allocation.
 999       //
1000       // It might happen that one of the threads requesting allocation would unblock
1001       // way later after GC happened, only to fail the second allocation, because
1002       // other threads have already depleted the free storage. In this case, a better
1003       // strategy is to try again, until at least one full GC has completed.
1004       //
1005       // Stop retrying and return nullptr to cause OOMError exception if our allocation failed even after:
1006       //   a) We experienced a GC that had good progress, or
1007       //   b) We experienced at least one Full GC (whether or not it had good progress)
1008 
1009       size_t original_count = shenandoah_policy()->full_gc_count();
1010       while ((result == nullptr) && (original_count == shenandoah_policy()->full_gc_count())) {
1011         control_thread()->handle_alloc_failure(req, true);
1012         result = allocate_memory_under_lock(req, in_new_region);
1013       }
1014       if (result != nullptr) {
1015         // If our allocation request has been satisifed after it initially failed, we count this as good gc progress
1016         notify_gc_progress();
1017       }
1018       if (log_develop_is_enabled(Debug, gc, alloc)) {
1019         ResourceMark rm;
1020         log_debug(gc, alloc)("Thread: %s, Result: " PTR_FORMAT ", Request: %s, Size: " SIZE_FORMAT
1021                              ", Original: " SIZE_FORMAT ", Latest: " SIZE_FORMAT,
1022                              Thread::current()->name(), p2i(result), req.type_string(), req.size(),
1023                              original_count, get_gc_no_progress_count());
1024       }
1025     }
1026   } else {
1027     assert(req.is_gc_alloc(), "Can only accept GC allocs here");
1028     result = allocate_memory_under_lock(req, in_new_region);
1029     // Do not call handle_alloc_failure() here, because we cannot block.
1030     // The allocation failure would be handled by the LRB slowpath with handle_alloc_failure_evac().
1031   }
1032 
1033   if (in_new_region) {
1034     notify_heap_changed();
1035   }
1036 
1037   if (result == nullptr) {
1038     req.set_actual_size(0);
1039   }
1040 
1041   // This is called regardless of the outcome of the allocation to account
1042   // for any waste created by retiring regions with this request.
1043   increase_used(req);
1044 
1045   if (result != nullptr) {
1046     size_t requested = req.size();
1047     size_t actual = req.actual_size();
1048 
1049     assert (req.is_lab_alloc() || (requested == actual),
1050             "Only LAB allocations are elastic: %s, requested = " SIZE_FORMAT ", actual = " SIZE_FORMAT,
1051             ShenandoahAllocRequest::alloc_type_to_string(req.type()), requested, actual);
1052 
1053     if (req.is_mutator_alloc()) {
1054       // If we requested more than we were granted, give the rest back to pacer.
1055       // This only matters if we are in the same pacing epoch: do not try to unpace
1056       // over the budget for the other phase.
1057       if (ShenandoahPacing && (pacer_epoch > 0) && (requested > actual)) {
1058         pacer()->unpace_for_alloc(pacer_epoch, requested - actual);
1059       }
1060     }
1061   }
1062 
1063   return result;
1064 }
1065 
1066 HeapWord* ShenandoahHeap::allocate_memory_under_lock(ShenandoahAllocRequest& req, bool& in_new_region) {
1067   // If we are dealing with mutator allocation, then we may need to block for safepoint.
1068   // We cannot block for safepoint for GC allocations, because there is a high chance
1069   // we are already running at safepoint or from stack watermark machinery, and we cannot
1070   // block again.
1071   ShenandoahHeapLocker locker(lock(), req.is_mutator_alloc());
1072 
1073   // Make sure the old generation has room for either evacuations or promotions before trying to allocate.
1074   if (req.is_old() && !old_generation()->can_allocate(req)) {
1075     return nullptr;
1076   }
1077 
1078   // If TLAB request size is greater than available, allocate() will attempt to downsize request to fit within available
1079   // memory.
1080   HeapWord* result = _free_set->allocate(req, in_new_region);
1081 
1082   // Record the plab configuration for this result and register the object.
1083   if (result != nullptr && req.is_old()) {
1084     old_generation()->configure_plab_for_current_thread(req);
1085     if (req.type() == ShenandoahAllocRequest::_alloc_shared_gc) {
1086       // Register the newly allocated object while we're holding the global lock since there's no synchronization
1087       // built in to the implementation of register_object().  There are potential races when multiple independent
1088       // threads are allocating objects, some of which might span the same card region.  For example, consider
1089       // a card table's memory region within which three objects are being allocated by three different threads:
1090       //
1091       // objects being "concurrently" allocated:
1092       //    [-----a------][-----b-----][--------------c------------------]
1093       //            [---- card table memory range --------------]
1094       //
1095       // Before any objects are allocated, this card's memory range holds no objects.  Note that allocation of object a
1096       // wants to set the starts-object, first-start, and last-start attributes of the preceding card region.
1097       // Allocation of object b wants to set the starts-object, first-start, and last-start attributes of this card region.
1098       // Allocation of object c also wants to set the starts-object, first-start, and last-start attributes of this
1099       // card region.
1100       //
1101       // The thread allocating b and the thread allocating c can "race" in various ways, resulting in confusion, such as
1102       // last-start representing object b while first-start represents object c.  This is why we need to require all
1103       // register_object() invocations to be "mutually exclusive" with respect to each card's memory range.
1104       old_generation()->card_scan()->register_object(result);
1105     }
1106   }
1107 
1108   return result;
1109 }
1110 
1111 HeapWord* ShenandoahHeap::mem_allocate(size_t size,
1112                                         bool*  gc_overhead_limit_was_exceeded) {
1113   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared(size);
1114   return allocate_memory(req);
1115 }
1116 
1117 MetaWord* ShenandoahHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
1118                                                              size_t size,
1119                                                              Metaspace::MetadataType mdtype) {
1120   MetaWord* result;
1121 
1122   // Inform metaspace OOM to GC heuristics if class unloading is possible.
1123   ShenandoahHeuristics* h = global_generation()->heuristics();
1124   if (h->can_unload_classes()) {
1125     h->record_metaspace_oom();
1126   }
1127 
1128   // Expand and retry allocation
1129   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
1130   if (result != nullptr) {
1131     return result;
1132   }
1133 
1134   // Start full GC
1135   collect(GCCause::_metadata_GC_clear_soft_refs);
1136 
1137   // Retry allocation
1138   result = loader_data->metaspace_non_null()->allocate(size, mdtype);
1139   if (result != nullptr) {
1140     return result;
1141   }
1142 
1143   // Expand and retry allocation
1144   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
1145   if (result != nullptr) {
1146     return result;
1147   }
1148 
1149   // Out of memory
1150   return nullptr;
1151 }
1152 
1153 class ShenandoahConcurrentEvacuateRegionObjectClosure : public ObjectClosure {
1154 private:
1155   ShenandoahHeap* const _heap;
1156   Thread* const _thread;
1157 public:
1158   ShenandoahConcurrentEvacuateRegionObjectClosure(ShenandoahHeap* heap) :
1159     _heap(heap), _thread(Thread::current()) {}
1160 
1161   void do_object(oop p) {
1162     shenandoah_assert_marked(nullptr, p);
1163     if (!p->is_forwarded()) {
1164       _heap->evacuate_object(p, _thread);
1165     }
1166   }
1167 };
1168 
1169 class ShenandoahEvacuationTask : public WorkerTask {
1170 private:
1171   ShenandoahHeap* const _sh;
1172   ShenandoahCollectionSet* const _cs;
1173   bool _concurrent;
1174 public:
1175   ShenandoahEvacuationTask(ShenandoahHeap* sh,
1176                            ShenandoahCollectionSet* cs,
1177                            bool concurrent) :
1178     WorkerTask("Shenandoah Evacuation"),
1179     _sh(sh),
1180     _cs(cs),
1181     _concurrent(concurrent)
1182   {}
1183 
1184   void work(uint worker_id) {
1185     if (_concurrent) {
1186       ShenandoahConcurrentWorkerSession worker_session(worker_id);
1187       ShenandoahSuspendibleThreadSetJoiner stsj;
1188       ShenandoahEvacOOMScope oom_evac_scope;
1189       do_work();
1190     } else {
1191       ShenandoahParallelWorkerSession worker_session(worker_id);
1192       ShenandoahEvacOOMScope oom_evac_scope;
1193       do_work();
1194     }
1195   }
1196 
1197 private:
1198   void do_work() {
1199     ShenandoahConcurrentEvacuateRegionObjectClosure cl(_sh);
1200     ShenandoahHeapRegion* r;
1201     while ((r =_cs->claim_next()) != nullptr) {
1202       assert(r->has_live(), "Region " SIZE_FORMAT " should have been reclaimed early", r->index());
1203       _sh->marked_object_iterate(r, &cl);
1204 
1205       if (ShenandoahPacing) {
1206         _sh->pacer()->report_evac(r->used() >> LogHeapWordSize);
1207       }
1208 
1209       if (_sh->check_cancelled_gc_and_yield(_concurrent)) {
1210         break;
1211       }
1212     }
1213   }
1214 };
1215 
1216 void ShenandoahHeap::evacuate_collection_set(bool concurrent) {
1217   ShenandoahEvacuationTask task(this, _collection_set, concurrent);
1218   workers()->run_task(&task);
1219 }
1220 
1221 oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) {
1222   assert(thread == Thread::current(), "Expected thread parameter to be current thread.");
1223   if (ShenandoahThreadLocalData::is_oom_during_evac(thread)) {
1224     // This thread went through the OOM during evac protocol. It is safe to return
1225     // the forward pointer. It must not attempt to evacuate any other objects.
1226     return ShenandoahBarrierSet::resolve_forwarded(p);
1227   }
1228 
1229   assert(ShenandoahThreadLocalData::is_evac_allowed(thread), "must be enclosed in oom-evac scope");
1230 
1231   ShenandoahHeapRegion* r = heap_region_containing(p);
1232   assert(!r->is_humongous(), "never evacuate humongous objects");
1233 
1234   ShenandoahAffiliation target_gen = r->affiliation();
1235   return try_evacuate_object(p, thread, r, target_gen);
1236 }
1237 
1238 oop ShenandoahHeap::try_evacuate_object(oop p, Thread* thread, ShenandoahHeapRegion* from_region,
1239                                                ShenandoahAffiliation target_gen) {
1240   assert(target_gen == YOUNG_GENERATION, "Only expect evacuations to young in this mode");
1241   assert(from_region->is_young(), "Only expect evacuations from young in this mode");
1242   bool alloc_from_lab = true;
1243   HeapWord* copy = nullptr;
1244   size_t size = ShenandoahForwarding::size(p);
1245 
1246 #ifdef ASSERT
1247   if (ShenandoahOOMDuringEvacALot &&
1248       (os::random() & 1) == 0) { // Simulate OOM every ~2nd slow-path call
1249     copy = nullptr;
1250   } else {
1251 #endif
1252     if (UseTLAB) {
1253       copy = allocate_from_gclab(thread, size);
1254     }
1255     if (copy == nullptr) {
1256       // If we failed to allocate in LAB, we'll try a shared allocation.
1257       ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared_gc(size, target_gen);
1258       copy = allocate_memory(req);
1259       alloc_from_lab = false;
1260     }
1261 #ifdef ASSERT
1262   }
1263 #endif
1264 
1265   if (copy == nullptr) {
1266     control_thread()->handle_alloc_failure_evac(size);
1267 
1268     _oom_evac_handler.handle_out_of_memory_during_evacuation();
1269 
1270     return ShenandoahBarrierSet::resolve_forwarded(p);
1271   }
1272 
1273   // Copy the object:
1274   Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(p), copy, size);
1275 
1276   // Try to install the new forwarding pointer.
1277   oop copy_val = cast_to_oop(copy);
1278   oop result = ShenandoahForwarding::try_update_forwardee(p, copy_val);
1279   if (result == copy_val) {
1280     // Successfully evacuated. Our copy is now the public one!
1281     ContinuationGCSupport::relativize_stack_chunk(copy_val);
1282     shenandoah_assert_correct(nullptr, copy_val);
1283     return copy_val;
1284   }  else {
1285     // Failed to evacuate. We need to deal with the object that is left behind. Since this
1286     // new allocation is certainly after TAMS, it will be considered live in the next cycle.
1287     // But if it happens to contain references to evacuated regions, those references would
1288     // not get updated for this stale copy during this cycle, and we will crash while scanning
1289     // it the next cycle.
1290     if (alloc_from_lab) {
1291       // For LAB allocations, it is enough to rollback the allocation ptr. Either the next
1292       // object will overwrite this stale copy, or the filler object on LAB retirement will
1293       // do this.
1294       ShenandoahThreadLocalData::gclab(thread)->undo_allocation(copy, size);
1295     } else {
1296       // For non-LAB allocations, we have no way to retract the allocation, and
1297       // have to explicitly overwrite the copy with the filler object. With that overwrite,
1298       // we have to keep the fwdptr initialized and pointing to our (stale) copy.
1299       assert(size >= ShenandoahHeap::min_fill_size(), "previously allocated object known to be larger than min_size");
1300       fill_with_object(copy, size);
1301       shenandoah_assert_correct(nullptr, copy_val);
1302       // For non-LAB allocations, the object has already been registered
1303     }
1304     shenandoah_assert_correct(nullptr, result);
1305     return result;
1306   }
1307 }
1308 
1309 void ShenandoahHeap::trash_cset_regions() {
1310   ShenandoahHeapLocker locker(lock());
1311 
1312   ShenandoahCollectionSet* set = collection_set();
1313   ShenandoahHeapRegion* r;
1314   set->clear_current_index();
1315   while ((r = set->next()) != nullptr) {
1316     r->make_trash();
1317   }
1318   collection_set()->clear();
1319 }
1320 
1321 void ShenandoahHeap::print_heap_regions_on(outputStream* st) const {
1322   st->print_cr("Heap Regions:");
1323   st->print_cr("Region state: EU=empty-uncommitted, EC=empty-committed, R=regular, H=humongous start, HP=pinned humongous start");
1324   st->print_cr("              HC=humongous continuation, CS=collection set, TR=trash, P=pinned, CSP=pinned collection set");
1325   st->print_cr("BTE=bottom/top/end, TAMS=top-at-mark-start");
1326   st->print_cr("UWM=update watermark, U=used");
1327   st->print_cr("T=TLAB allocs, G=GCLAB allocs");
1328   st->print_cr("S=shared allocs, L=live data");
1329   st->print_cr("CP=critical pins");
1330 
1331   for (size_t i = 0; i < num_regions(); i++) {
1332     get_region(i)->print_on(st);
1333   }
1334 }
1335 
1336 size_t ShenandoahHeap::trash_humongous_region_at(ShenandoahHeapRegion* start) {
1337   assert(start->is_humongous_start(), "reclaim regions starting with the first one");
1338 
1339   oop humongous_obj = cast_to_oop(start->bottom());
1340   size_t size = humongous_obj->size();
1341   size_t required_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize);
1342   size_t index = start->index() + required_regions - 1;
1343 
1344   assert(!start->has_live(), "liveness must be zero");
1345 
1346   for(size_t i = 0; i < required_regions; i++) {
1347     // Reclaim from tail. Otherwise, assertion fails when printing region to trace log,
1348     // as it expects that every region belongs to a humongous region starting with a humongous start region.
1349     ShenandoahHeapRegion* region = get_region(index --);
1350 
1351     assert(region->is_humongous(), "expect correct humongous start or continuation");
1352     assert(!region->is_cset(), "Humongous region should not be in collection set");
1353 
1354     region->make_trash_immediate();
1355   }
1356   return required_regions;
1357 }
1358 
1359 class ShenandoahCheckCleanGCLABClosure : public ThreadClosure {
1360 public:
1361   ShenandoahCheckCleanGCLABClosure() {}
1362   void do_thread(Thread* thread) {
1363     PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1364     assert(gclab != nullptr, "GCLAB should be initialized for %s", thread->name());
1365     assert(gclab->words_remaining() == 0, "GCLAB should not need retirement");
1366 
1367     if (ShenandoahHeap::heap()->mode()->is_generational()) {
1368       PLAB* plab = ShenandoahThreadLocalData::plab(thread);
1369       assert(plab != nullptr, "PLAB should be initialized for %s", thread->name());
1370       assert(plab->words_remaining() == 0, "PLAB should not need retirement");
1371     }
1372   }
1373 };
1374 
1375 class ShenandoahRetireGCLABClosure : public ThreadClosure {
1376 private:
1377   bool const _resize;
1378 public:
1379   ShenandoahRetireGCLABClosure(bool resize) : _resize(resize) {}
1380   void do_thread(Thread* thread) {
1381     PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1382     assert(gclab != nullptr, "GCLAB should be initialized for %s", thread->name());
1383     gclab->retire();
1384     if (_resize && ShenandoahThreadLocalData::gclab_size(thread) > 0) {
1385       ShenandoahThreadLocalData::set_gclab_size(thread, 0);
1386     }
1387 
1388     if (ShenandoahHeap::heap()->mode()->is_generational()) {
1389       PLAB* plab = ShenandoahThreadLocalData::plab(thread);
1390       assert(plab != nullptr, "PLAB should be initialized for %s", thread->name());
1391 
1392       // There are two reasons to retire all plabs between old-gen evacuation passes.
1393       //  1. We need to make the plab memory parsable by remembered-set scanning.
1394       //  2. We need to establish a trustworthy UpdateWaterMark value within each old-gen heap region
1395       ShenandoahGenerationalHeap::heap()->retire_plab(plab, thread);
1396       if (_resize && ShenandoahThreadLocalData::plab_size(thread) > 0) {
1397         ShenandoahThreadLocalData::set_plab_size(thread, 0);
1398       }
1399     }
1400   }
1401 };
1402 
1403 void ShenandoahHeap::labs_make_parsable() {
1404   assert(UseTLAB, "Only call with UseTLAB");
1405 
1406   ShenandoahRetireGCLABClosure cl(false);
1407 
1408   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1409     ThreadLocalAllocBuffer& tlab = t->tlab();
1410     tlab.make_parsable();
1411     cl.do_thread(t);
1412   }
1413 
1414   workers()->threads_do(&cl);
1415 }
1416 
1417 void ShenandoahHeap::tlabs_retire(bool resize) {
1418   assert(UseTLAB, "Only call with UseTLAB");
1419   assert(!resize || ResizeTLAB, "Only call for resize when ResizeTLAB is enabled");
1420 
1421   ThreadLocalAllocStats stats;
1422 
1423   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1424     ThreadLocalAllocBuffer& tlab = t->tlab();
1425     tlab.retire(&stats);
1426     if (resize) {
1427       tlab.resize();
1428     }
1429   }
1430 
1431   stats.publish();
1432 
1433 #ifdef ASSERT
1434   ShenandoahCheckCleanGCLABClosure cl;
1435   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1436     cl.do_thread(t);
1437   }
1438   workers()->threads_do(&cl);
1439 #endif
1440 }
1441 
1442 void ShenandoahHeap::gclabs_retire(bool resize) {
1443   assert(UseTLAB, "Only call with UseTLAB");
1444   assert(!resize || ResizeTLAB, "Only call for resize when ResizeTLAB is enabled");
1445 
1446   ShenandoahRetireGCLABClosure cl(resize);
1447   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1448     cl.do_thread(t);
1449   }
1450   workers()->threads_do(&cl);
1451 
1452   if (safepoint_workers() != nullptr) {
1453     safepoint_workers()->threads_do(&cl);
1454   }
1455 }
1456 
1457 // Returns size in bytes
1458 size_t ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const {
1459   // Return the max allowed size, and let the allocation path
1460   // figure out the safe size for current allocation.
1461   return ShenandoahHeapRegion::max_tlab_size_bytes();
1462 }
1463 
1464 size_t ShenandoahHeap::max_tlab_size() const {
1465   // Returns size in words
1466   return ShenandoahHeapRegion::max_tlab_size_words();
1467 }
1468 
1469 void ShenandoahHeap::collect(GCCause::Cause cause) {
1470   control_thread()->request_gc(cause);
1471 }
1472 
1473 void ShenandoahHeap::do_full_collection(bool clear_all_soft_refs) {
1474   //assert(false, "Shouldn't need to do full collections");
1475 }
1476 
1477 HeapWord* ShenandoahHeap::block_start(const void* addr) const {
1478   ShenandoahHeapRegion* r = heap_region_containing(addr);
1479   if (r != nullptr) {
1480     return r->block_start(addr);
1481   }
1482   return nullptr;
1483 }
1484 
1485 bool ShenandoahHeap::block_is_obj(const HeapWord* addr) const {
1486   ShenandoahHeapRegion* r = heap_region_containing(addr);
1487   return r->block_is_obj(addr);
1488 }
1489 
1490 bool ShenandoahHeap::print_location(outputStream* st, void* addr) const {
1491   return BlockLocationPrinter<ShenandoahHeap>::print_location(st, addr);
1492 }
1493 
1494 void ShenandoahHeap::prepare_for_verify() {
1495   if (SafepointSynchronize::is_at_safepoint() && UseTLAB) {
1496     labs_make_parsable();
1497   }
1498 }
1499 
1500 void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const {
1501   if (_shenandoah_policy->is_at_shutdown()) {
1502     return;
1503   }
1504 
1505   if (_control_thread != nullptr) {
1506     tcl->do_thread(_control_thread);
1507   }
1508 
1509   workers()->threads_do(tcl);
1510   if (_safepoint_workers != nullptr) {
1511     _safepoint_workers->threads_do(tcl);
1512   }
1513 }
1514 
1515 void ShenandoahHeap::print_tracing_info() const {
1516   LogTarget(Info, gc, stats) lt;
1517   if (lt.is_enabled()) {
1518     ResourceMark rm;
1519     LogStream ls(lt);
1520 
1521     phase_timings()->print_global_on(&ls);
1522 
1523     ls.cr();
1524     ls.cr();
1525 
1526     shenandoah_policy()->print_gc_stats(&ls);
1527 
1528     ls.cr();
1529     ls.cr();
1530   }
1531 }
1532 
1533 void ShenandoahHeap::set_gc_generation(ShenandoahGeneration* generation) {
1534   shenandoah_assert_control_or_vm_thread_at_safepoint();
1535   _gc_generation = generation;
1536 }
1537 
1538 // Active generation may only be set by the VM thread at a safepoint.
1539 void ShenandoahHeap::set_active_generation() {
1540   assert(Thread::current()->is_VM_thread(), "Only the VM Thread");
1541   assert(SafepointSynchronize::is_at_safepoint(), "Only at a safepoint!");
1542   assert(_gc_generation != nullptr, "Will set _active_generation to nullptr");
1543   _active_generation = _gc_generation;
1544 }
1545 
1546 void ShenandoahHeap::on_cycle_start(GCCause::Cause cause, ShenandoahGeneration* generation) {
1547   shenandoah_policy()->record_collection_cause(cause);
1548 
1549   assert(gc_cause()  == GCCause::_no_gc, "Over-writing cause");
1550   assert(_gc_generation == nullptr, "Over-writing _gc_generation");
1551 
1552   set_gc_cause(cause);
1553   set_gc_generation(generation);
1554 
1555   generation->heuristics()->record_cycle_start();
1556 }
1557 
1558 void ShenandoahHeap::on_cycle_end(ShenandoahGeneration* generation) {
1559   assert(gc_cause() != GCCause::_no_gc, "cause wasn't set");
1560   assert(_gc_generation != nullptr, "_gc_generation wasn't set");
1561 
1562   generation->heuristics()->record_cycle_end();
1563   if (mode()->is_generational() && generation->is_global()) {
1564     // If we just completed a GLOBAL GC, claim credit for completion of young-gen and old-gen GC as well
1565     young_generation()->heuristics()->record_cycle_end();
1566     old_generation()->heuristics()->record_cycle_end();
1567   }
1568 
1569   set_gc_generation(nullptr);
1570   set_gc_cause(GCCause::_no_gc);
1571 }
1572 
1573 void ShenandoahHeap::verify(VerifyOption vo) {
1574   if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
1575     if (ShenandoahVerify) {
1576       verifier()->verify_generic(vo);
1577     } else {
1578       // TODO: Consider allocating verification bitmaps on demand,
1579       // and turn this on unconditionally.
1580     }
1581   }
1582 }
1583 size_t ShenandoahHeap::tlab_capacity(Thread *thr) const {
1584   return _free_set->capacity();
1585 }
1586 
1587 class ObjectIterateScanRootClosure : public BasicOopIterateClosure {
1588 private:
1589   MarkBitMap* _bitmap;
1590   ShenandoahScanObjectStack* _oop_stack;
1591   ShenandoahHeap* const _heap;
1592   ShenandoahMarkingContext* const _marking_context;
1593 
1594   template <class T>
1595   void do_oop_work(T* p) {
1596     T o = RawAccess<>::oop_load(p);
1597     if (!CompressedOops::is_null(o)) {
1598       oop obj = CompressedOops::decode_not_null(o);
1599       if (_heap->is_concurrent_weak_root_in_progress() && !_marking_context->is_marked(obj)) {
1600         // There may be dead oops in weak roots in concurrent root phase, do not touch them.
1601         return;
1602       }
1603       obj = ShenandoahBarrierSet::barrier_set()->load_reference_barrier(obj);
1604 
1605       assert(oopDesc::is_oop(obj), "must be a valid oop");
1606       if (!_bitmap->is_marked(obj)) {
1607         _bitmap->mark(obj);
1608         _oop_stack->push(obj);
1609       }
1610     }
1611   }
1612 public:
1613   ObjectIterateScanRootClosure(MarkBitMap* bitmap, ShenandoahScanObjectStack* oop_stack) :
1614     _bitmap(bitmap), _oop_stack(oop_stack), _heap(ShenandoahHeap::heap()),
1615     _marking_context(_heap->marking_context()) {}
1616   void do_oop(oop* p)       { do_oop_work(p); }
1617   void do_oop(narrowOop* p) { do_oop_work(p); }
1618 };
1619 
1620 /*
1621  * This is public API, used in preparation of object_iterate().
1622  * Since we don't do linear scan of heap in object_iterate() (see comment below), we don't
1623  * need to make the heap parsable. For Shenandoah-internal linear heap scans that we can
1624  * control, we call SH::tlabs_retire, SH::gclabs_retire.
1625  */
1626 void ShenandoahHeap::ensure_parsability(bool retire_tlabs) {
1627   // No-op.
1628 }
1629 
1630 /*
1631  * Iterates objects in the heap. This is public API, used for, e.g., heap dumping.
1632  *
1633  * We cannot safely iterate objects by doing a linear scan at random points in time. Linear
1634  * scanning needs to deal with dead objects, which may have dead Klass* pointers (e.g.
1635  * calling oopDesc::size() would crash) or dangling reference fields (crashes) etc. Linear
1636  * scanning therefore depends on having a valid marking bitmap to support it. However, we only
1637  * have a valid marking bitmap after successful marking. In particular, we *don't* have a valid
1638  * marking bitmap during marking, after aborted marking or during/after cleanup (when we just
1639  * wiped the bitmap in preparation for next marking).
1640  *
1641  * For all those reasons, we implement object iteration as a single marking traversal, reporting
1642  * objects as we mark+traverse through the heap, starting from GC roots. JVMTI IterateThroughHeap
1643  * is allowed to report dead objects, but is not required to do so.
1644  */
1645 void ShenandoahHeap::object_iterate(ObjectClosure* cl) {
1646   // Reset bitmap
1647   if (!prepare_aux_bitmap_for_iteration())
1648     return;
1649 
1650   ShenandoahScanObjectStack oop_stack;
1651   ObjectIterateScanRootClosure oops(&_aux_bit_map, &oop_stack);
1652   // Seed the stack with root scan
1653   scan_roots_for_iteration(&oop_stack, &oops);
1654 
1655   // Work through the oop stack to traverse heap
1656   while (! oop_stack.is_empty()) {
1657     oop obj = oop_stack.pop();
1658     assert(oopDesc::is_oop(obj), "must be a valid oop");
1659     cl->do_object(obj);
1660     obj->oop_iterate(&oops);
1661   }
1662 
1663   assert(oop_stack.is_empty(), "should be empty");
1664   // Reclaim bitmap
1665   reclaim_aux_bitmap_for_iteration();
1666 }
1667 
1668 bool ShenandoahHeap::prepare_aux_bitmap_for_iteration() {
1669   assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1670 
1671   if (!_aux_bitmap_region_special && !os::commit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size(), false)) {
1672     log_warning(gc)("Could not commit native memory for auxiliary marking bitmap for heap iteration");
1673     return false;
1674   }
1675   // Reset bitmap
1676   _aux_bit_map.clear();
1677   return true;
1678 }
1679 
1680 void ShenandoahHeap::scan_roots_for_iteration(ShenandoahScanObjectStack* oop_stack, ObjectIterateScanRootClosure* oops) {
1681   // Process GC roots according to current GC cycle
1682   // This populates the work stack with initial objects
1683   // It is important to relinquish the associated locks before diving
1684   // into heap dumper
1685   uint n_workers = safepoint_workers() != nullptr ? safepoint_workers()->active_workers() : 1;
1686   ShenandoahHeapIterationRootScanner rp(n_workers);
1687   rp.roots_do(oops);
1688 }
1689 
1690 void ShenandoahHeap::reclaim_aux_bitmap_for_iteration() {
1691   if (!_aux_bitmap_region_special && !os::uncommit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size())) {
1692     log_warning(gc)("Could not uncommit native memory for auxiliary marking bitmap for heap iteration");
1693   }
1694 }
1695 
1696 // Closure for parallelly iterate objects
1697 class ShenandoahObjectIterateParScanClosure : public BasicOopIterateClosure {
1698 private:
1699   MarkBitMap* _bitmap;
1700   ShenandoahObjToScanQueue* _queue;
1701   ShenandoahHeap* const _heap;
1702   ShenandoahMarkingContext* const _marking_context;
1703 
1704   template <class T>
1705   void do_oop_work(T* p) {
1706     T o = RawAccess<>::oop_load(p);
1707     if (!CompressedOops::is_null(o)) {
1708       oop obj = CompressedOops::decode_not_null(o);
1709       if (_heap->is_concurrent_weak_root_in_progress() && !_marking_context->is_marked(obj)) {
1710         // There may be dead oops in weak roots in concurrent root phase, do not touch them.
1711         return;
1712       }
1713       obj = ShenandoahBarrierSet::barrier_set()->load_reference_barrier(obj);
1714 
1715       assert(oopDesc::is_oop(obj), "Must be a valid oop");
1716       if (_bitmap->par_mark(obj)) {
1717         _queue->push(ShenandoahMarkTask(obj));
1718       }
1719     }
1720   }
1721 public:
1722   ShenandoahObjectIterateParScanClosure(MarkBitMap* bitmap, ShenandoahObjToScanQueue* q) :
1723     _bitmap(bitmap), _queue(q), _heap(ShenandoahHeap::heap()),
1724     _marking_context(_heap->marking_context()) {}
1725   void do_oop(oop* p)       { do_oop_work(p); }
1726   void do_oop(narrowOop* p) { do_oop_work(p); }
1727 };
1728 
1729 // Object iterator for parallel heap iteraion.
1730 // The root scanning phase happenes in construction as a preparation of
1731 // parallel marking queues.
1732 // Every worker processes it's own marking queue. work-stealing is used
1733 // to balance workload.
1734 class ShenandoahParallelObjectIterator : public ParallelObjectIteratorImpl {
1735 private:
1736   uint                         _num_workers;
1737   bool                         _init_ready;
1738   MarkBitMap*                  _aux_bit_map;
1739   ShenandoahHeap*              _heap;
1740   ShenandoahScanObjectStack    _roots_stack; // global roots stack
1741   ShenandoahObjToScanQueueSet* _task_queues;
1742 public:
1743   ShenandoahParallelObjectIterator(uint num_workers, MarkBitMap* bitmap) :
1744         _num_workers(num_workers),
1745         _init_ready(false),
1746         _aux_bit_map(bitmap),
1747         _heap(ShenandoahHeap::heap()) {
1748     // Initialize bitmap
1749     _init_ready = _heap->prepare_aux_bitmap_for_iteration();
1750     if (!_init_ready) {
1751       return;
1752     }
1753 
1754     ObjectIterateScanRootClosure oops(_aux_bit_map, &_roots_stack);
1755     _heap->scan_roots_for_iteration(&_roots_stack, &oops);
1756 
1757     _init_ready = prepare_worker_queues();
1758   }
1759 
1760   ~ShenandoahParallelObjectIterator() {
1761     // Reclaim bitmap
1762     _heap->reclaim_aux_bitmap_for_iteration();
1763     // Reclaim queue for workers
1764     if (_task_queues!= nullptr) {
1765       for (uint i = 0; i < _num_workers; ++i) {
1766         ShenandoahObjToScanQueue* q = _task_queues->queue(i);
1767         if (q != nullptr) {
1768           delete q;
1769           _task_queues->register_queue(i, nullptr);
1770         }
1771       }
1772       delete _task_queues;
1773       _task_queues = nullptr;
1774     }
1775   }
1776 
1777   virtual void object_iterate(ObjectClosure* cl, uint worker_id) {
1778     if (_init_ready) {
1779       object_iterate_parallel(cl, worker_id, _task_queues);
1780     }
1781   }
1782 
1783 private:
1784   // Divide global root_stack into worker queues
1785   bool prepare_worker_queues() {
1786     _task_queues = new ShenandoahObjToScanQueueSet((int) _num_workers);
1787     // Initialize queues for every workers
1788     for (uint i = 0; i < _num_workers; ++i) {
1789       ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue();
1790       _task_queues->register_queue(i, task_queue);
1791     }
1792     // Divide roots among the workers. Assume that object referencing distribution
1793     // is related with root kind, use round-robin to make every worker have same chance
1794     // to process every kind of roots
1795     size_t roots_num = _roots_stack.size();
1796     if (roots_num == 0) {
1797       // No work to do
1798       return false;
1799     }
1800 
1801     for (uint j = 0; j < roots_num; j++) {
1802       uint stack_id = j % _num_workers;
1803       oop obj = _roots_stack.pop();
1804       _task_queues->queue(stack_id)->push(ShenandoahMarkTask(obj));
1805     }
1806     return true;
1807   }
1808 
1809   void object_iterate_parallel(ObjectClosure* cl,
1810                                uint worker_id,
1811                                ShenandoahObjToScanQueueSet* queue_set) {
1812     assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1813     assert(queue_set != nullptr, "task queue must not be null");
1814 
1815     ShenandoahObjToScanQueue* q = queue_set->queue(worker_id);
1816     assert(q != nullptr, "object iterate queue must not be null");
1817 
1818     ShenandoahMarkTask t;
1819     ShenandoahObjectIterateParScanClosure oops(_aux_bit_map, q);
1820 
1821     // Work through the queue to traverse heap.
1822     // Steal when there is no task in queue.
1823     while (q->pop(t) || queue_set->steal(worker_id, t)) {
1824       oop obj = t.obj();
1825       assert(oopDesc::is_oop(obj), "must be a valid oop");
1826       cl->do_object(obj);
1827       obj->oop_iterate(&oops);
1828     }
1829     assert(q->is_empty(), "should be empty");
1830   }
1831 };
1832 
1833 ParallelObjectIteratorImpl* ShenandoahHeap::parallel_object_iterator(uint workers) {
1834   return new ShenandoahParallelObjectIterator(workers, &_aux_bit_map);
1835 }
1836 
1837 // Keep alive an object that was loaded with AS_NO_KEEPALIVE.
1838 void ShenandoahHeap::keep_alive(oop obj) {
1839   if (is_concurrent_mark_in_progress() && (obj != nullptr)) {
1840     ShenandoahBarrierSet::barrier_set()->enqueue(obj);
1841   }
1842 }
1843 
1844 void ShenandoahHeap::heap_region_iterate(ShenandoahHeapRegionClosure* blk) const {
1845   for (size_t i = 0; i < num_regions(); i++) {
1846     ShenandoahHeapRegion* current = get_region(i);
1847     blk->heap_region_do(current);
1848   }
1849 }
1850 
1851 class ShenandoahParallelHeapRegionTask : public WorkerTask {
1852 private:
1853   ShenandoahHeap* const _heap;
1854   ShenandoahHeapRegionClosure* const _blk;
1855   size_t const _stride;
1856 
1857   shenandoah_padding(0);
1858   volatile size_t _index;
1859   shenandoah_padding(1);
1860 
1861 public:
1862   ShenandoahParallelHeapRegionTask(ShenandoahHeapRegionClosure* blk, size_t stride) :
1863           WorkerTask("Shenandoah Parallel Region Operation"),
1864           _heap(ShenandoahHeap::heap()), _blk(blk), _stride(stride), _index(0) {}
1865 
1866   void work(uint worker_id) {
1867     ShenandoahParallelWorkerSession worker_session(worker_id);
1868     size_t stride = _stride;
1869 
1870     size_t max = _heap->num_regions();
1871     while (Atomic::load(&_index) < max) {
1872       size_t cur = Atomic::fetch_then_add(&_index, stride, memory_order_relaxed);
1873       size_t start = cur;
1874       size_t end = MIN2(cur + stride, max);
1875       if (start >= max) break;
1876 
1877       for (size_t i = cur; i < end; i++) {
1878         ShenandoahHeapRegion* current = _heap->get_region(i);
1879         _blk->heap_region_do(current);
1880       }
1881     }
1882   }
1883 };
1884 
1885 void ShenandoahHeap::parallel_heap_region_iterate(ShenandoahHeapRegionClosure* blk) const {
1886   assert(blk->is_thread_safe(), "Only thread-safe closures here");
1887   const uint active_workers = workers()->active_workers();
1888   const size_t n_regions = num_regions();
1889   size_t stride = ShenandoahParallelRegionStride;
1890   if (stride == 0 && active_workers > 1) {
1891     // Automatically derive the stride to balance the work between threads
1892     // evenly. Do not try to split work if below the reasonable threshold.
1893     constexpr size_t threshold = 4096;
1894     stride = n_regions <= threshold ?
1895             threshold :
1896             (n_regions + active_workers - 1) / active_workers;
1897   }
1898 
1899   if (n_regions > stride && active_workers > 1) {
1900     ShenandoahParallelHeapRegionTask task(blk, stride);
1901     workers()->run_task(&task);
1902   } else {
1903     heap_region_iterate(blk);
1904   }
1905 }
1906 
1907 class ShenandoahRendezvousClosure : public HandshakeClosure {
1908 public:
1909   inline ShenandoahRendezvousClosure(const char* name) : HandshakeClosure(name) {}
1910   inline void do_thread(Thread* thread) {}
1911 };
1912 
1913 void ShenandoahHeap::rendezvous_threads(const char* name) {
1914   ShenandoahRendezvousClosure cl(name);
1915   Handshake::execute(&cl);
1916 }
1917 
1918 void ShenandoahHeap::recycle_trash() {
1919   free_set()->recycle_trash();
1920 }
1921 
1922 void ShenandoahHeap::do_class_unloading() {
1923   _unloader.unload();
1924   if (mode()->is_generational()) {
1925     old_generation()->set_parsable(false);
1926   }
1927 }
1928 
1929 void ShenandoahHeap::stw_weak_refs(bool full_gc) {
1930   // Weak refs processing
1931   ShenandoahPhaseTimings::Phase phase = full_gc ? ShenandoahPhaseTimings::full_gc_weakrefs
1932                                                 : ShenandoahPhaseTimings::degen_gc_weakrefs;
1933   ShenandoahTimingsTracker t(phase);
1934   ShenandoahGCWorkerPhase worker_phase(phase);
1935   shenandoah_assert_generations_reconciled();
1936   gc_generation()->ref_processor()->process_references(phase, workers(), false /* concurrent */);
1937 }
1938 
1939 void ShenandoahHeap::prepare_update_heap_references(bool concurrent) {
1940   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
1941 
1942   // Evacuation is over, no GCLABs are needed anymore. GCLABs are under URWM, so we need to
1943   // make them parsable for update code to work correctly. Plus, we can compute new sizes
1944   // for future GCLABs here.
1945   if (UseTLAB) {
1946     ShenandoahGCPhase phase(concurrent ?
1947                             ShenandoahPhaseTimings::init_update_refs_manage_gclabs :
1948                             ShenandoahPhaseTimings::degen_gc_init_update_refs_manage_gclabs);
1949     gclabs_retire(ResizeTLAB);
1950   }
1951 
1952   _update_refs_iterator.reset();
1953 }
1954 
1955 void ShenandoahHeap::propagate_gc_state_to_java_threads() {
1956   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at Shenandoah safepoint");
1957   if (_gc_state_changed) {
1958     _gc_state_changed = false;
1959     char state = gc_state();
1960     for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1961       ShenandoahThreadLocalData::set_gc_state(t, state);
1962     }
1963   }
1964 }
1965 
1966 void ShenandoahHeap::set_gc_state(uint mask, bool value) {
1967   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at Shenandoah safepoint");
1968   _gc_state.set_cond(mask, value);
1969   _gc_state_changed = true;
1970   // Check that if concurrent weak root is set then active_gen isn't null
1971   assert(!is_concurrent_weak_root_in_progress() || active_generation() != nullptr, "Error");
1972   shenandoah_assert_generations_reconciled();
1973 }
1974 
1975 void ShenandoahHeap::set_concurrent_young_mark_in_progress(bool in_progress) {
1976   uint mask;
1977   assert(!has_forwarded_objects(), "Young marking is not concurrent with evacuation");
1978   if (!in_progress && is_concurrent_old_mark_in_progress()) {
1979     assert(mode()->is_generational(), "Only generational GC has old marking");
1980     assert(_gc_state.is_set(MARKING), "concurrent_old_marking_in_progress implies MARKING");
1981     // If old-marking is in progress when we turn off YOUNG_MARKING, leave MARKING (and OLD_MARKING) on
1982     mask = YOUNG_MARKING;
1983   } else {
1984     mask = MARKING | YOUNG_MARKING;
1985   }
1986   set_gc_state(mask, in_progress);
1987   manage_satb_barrier(in_progress);
1988 }
1989 
1990 void ShenandoahHeap::set_concurrent_old_mark_in_progress(bool in_progress) {
1991 #ifdef ASSERT
1992   // has_forwarded_objects() iff UPDATEREFS or EVACUATION
1993   bool has_forwarded = has_forwarded_objects();
1994   bool updating_or_evacuating = _gc_state.is_set(UPDATEREFS | EVACUATION);
1995   bool evacuating = _gc_state.is_set(EVACUATION);
1996   assert ((has_forwarded == updating_or_evacuating) || (evacuating && !has_forwarded && collection_set()->is_empty()),
1997           "Updating or evacuating iff has forwarded objects, or if evacuation phase is promoting in place without forwarding");
1998 #endif
1999   if (!in_progress && is_concurrent_young_mark_in_progress()) {
2000     // If young-marking is in progress when we turn off OLD_MARKING, leave MARKING (and YOUNG_MARKING) on
2001     assert(_gc_state.is_set(MARKING), "concurrent_young_marking_in_progress implies MARKING");
2002     set_gc_state(OLD_MARKING, in_progress);
2003   } else {
2004     set_gc_state(MARKING | OLD_MARKING, in_progress);
2005   }
2006   manage_satb_barrier(in_progress);
2007 }
2008 
2009 bool ShenandoahHeap::is_prepare_for_old_mark_in_progress() const {
2010   return old_generation()->is_preparing_for_mark();
2011 }
2012 
2013 void ShenandoahHeap::manage_satb_barrier(bool active) {
2014   if (is_concurrent_mark_in_progress()) {
2015     // Ignore request to deactivate barrier while concurrent mark is in progress.
2016     // Do not attempt to re-activate the barrier if it is already active.
2017     if (active && !ShenandoahBarrierSet::satb_mark_queue_set().is_active()) {
2018       ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(active, !active);
2019     }
2020   } else {
2021     // No concurrent marking is in progress so honor request to deactivate,
2022     // but only if the barrier is already active.
2023     if (!active && ShenandoahBarrierSet::satb_mark_queue_set().is_active()) {
2024       ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(active, !active);
2025     }
2026   }
2027 }
2028 
2029 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
2030   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only call this at safepoint");
2031   set_gc_state(EVACUATION, in_progress);
2032 }
2033 
2034 void ShenandoahHeap::set_concurrent_strong_root_in_progress(bool in_progress) {
2035   if (in_progress) {
2036     _concurrent_strong_root_in_progress.set();
2037   } else {
2038     _concurrent_strong_root_in_progress.unset();
2039   }
2040 }
2041 
2042 void ShenandoahHeap::set_concurrent_weak_root_in_progress(bool cond) {
2043   set_gc_state(WEAK_ROOTS, cond);
2044 }
2045 
2046 GCTracer* ShenandoahHeap::tracer() {
2047   return shenandoah_policy()->tracer();
2048 }
2049 
2050 size_t ShenandoahHeap::tlab_used(Thread* thread) const {
2051   return _free_set->used();
2052 }
2053 
2054 bool ShenandoahHeap::try_cancel_gc() {
2055   jbyte prev = _cancelled_gc.cmpxchg(CANCELLED, CANCELLABLE);
2056   return prev == CANCELLABLE;
2057 }
2058 
2059 void ShenandoahHeap::cancel_concurrent_mark() {
2060   if (mode()->is_generational()) {
2061     young_generation()->cancel_marking();
2062     old_generation()->cancel_marking();
2063   }
2064 
2065   global_generation()->cancel_marking();
2066 
2067   ShenandoahBarrierSet::satb_mark_queue_set().abandon_partial_marking();
2068 }
2069 
2070 void ShenandoahHeap::cancel_gc(GCCause::Cause cause) {
2071   if (try_cancel_gc()) {
2072     FormatBuffer<> msg("Cancelling GC: %s", GCCause::to_string(cause));
2073     log_info(gc)("%s", msg.buffer());
2074     Events::log(Thread::current(), "%s", msg.buffer());
2075     _cancel_requested_time = os::elapsedTime();
2076   }
2077 }
2078 
2079 uint ShenandoahHeap::max_workers() {
2080   return _max_workers;
2081 }
2082 
2083 void ShenandoahHeap::stop() {
2084   // The shutdown sequence should be able to terminate when GC is running.
2085 
2086   // Step 0. Notify policy to disable event recording and prevent visiting gc threads during shutdown
2087   _shenandoah_policy->record_shutdown();
2088 
2089   // Step 1. Notify control thread that we are in shutdown.
2090   // Note that we cannot do that with stop(), because stop() is blocking and waits for the actual shutdown.
2091   // Doing stop() here would wait for the normal GC cycle to complete, never falling through to cancel below.
2092   control_thread()->prepare_for_graceful_shutdown();
2093 
2094   // Step 2. Notify GC workers that we are cancelling GC.
2095   cancel_gc(GCCause::_shenandoah_stop_vm);
2096 
2097   // Step 3. Wait until GC worker exits normally.
2098   control_thread()->stop();
2099 }
2100 
2101 void ShenandoahHeap::stw_unload_classes(bool full_gc) {
2102   if (!unload_classes()) return;
2103   ClassUnloadingContext ctx(_workers->active_workers(),
2104                             true /* unregister_nmethods_during_purge */,
2105                             false /* lock_nmethod_free_separately */);
2106 
2107   // Unload classes and purge SystemDictionary.
2108   {
2109     ShenandoahPhaseTimings::Phase phase = full_gc ?
2110                                           ShenandoahPhaseTimings::full_gc_purge_class_unload :
2111                                           ShenandoahPhaseTimings::degen_gc_purge_class_unload;
2112     ShenandoahIsAliveSelector is_alive;
2113     {
2114       CodeCache::UnlinkingScope scope(is_alive.is_alive_closure());
2115       ShenandoahGCPhase gc_phase(phase);
2116       ShenandoahGCWorkerPhase worker_phase(phase);
2117       bool unloading_occurred = SystemDictionary::do_unloading(gc_timer());
2118 
2119       uint num_workers = _workers->active_workers();
2120       ShenandoahClassUnloadingTask unlink_task(phase, num_workers, unloading_occurred);
2121       _workers->run_task(&unlink_task);
2122     }
2123     // Release unloaded nmethods's memory.
2124     ClassUnloadingContext::context()->purge_and_free_nmethods();
2125   }
2126 
2127   {
2128     ShenandoahGCPhase phase(full_gc ?
2129                             ShenandoahPhaseTimings::full_gc_purge_cldg :
2130                             ShenandoahPhaseTimings::degen_gc_purge_cldg);
2131     ClassLoaderDataGraph::purge(true /* at_safepoint */);
2132   }
2133   // Resize and verify metaspace
2134   MetaspaceGC::compute_new_size();
2135   DEBUG_ONLY(MetaspaceUtils::verify();)
2136 }
2137 
2138 // Weak roots are either pre-evacuated (final mark) or updated (final updaterefs),
2139 // so they should not have forwarded oops.
2140 // However, we do need to "null" dead oops in the roots, if can not be done
2141 // in concurrent cycles.
2142 void ShenandoahHeap::stw_process_weak_roots(bool full_gc) {
2143   uint num_workers = _workers->active_workers();
2144   ShenandoahPhaseTimings::Phase timing_phase = full_gc ?
2145                                                ShenandoahPhaseTimings::full_gc_purge_weak_par :
2146                                                ShenandoahPhaseTimings::degen_gc_purge_weak_par;
2147   ShenandoahGCPhase phase(timing_phase);
2148   ShenandoahGCWorkerPhase worker_phase(timing_phase);
2149   // Cleanup weak roots
2150   if (has_forwarded_objects()) {
2151     ShenandoahForwardedIsAliveClosure is_alive;
2152     ShenandoahNonConcUpdateRefsClosure keep_alive;
2153     ShenandoahParallelWeakRootsCleaningTask<ShenandoahForwardedIsAliveClosure, ShenandoahNonConcUpdateRefsClosure>
2154       cleaning_task(timing_phase, &is_alive, &keep_alive, num_workers);
2155     _workers->run_task(&cleaning_task);
2156   } else {
2157     ShenandoahIsAliveClosure is_alive;
2158 #ifdef ASSERT
2159     ShenandoahAssertNotForwardedClosure verify_cl;
2160     ShenandoahParallelWeakRootsCleaningTask<ShenandoahIsAliveClosure, ShenandoahAssertNotForwardedClosure>
2161       cleaning_task(timing_phase, &is_alive, &verify_cl, num_workers);
2162 #else
2163     ShenandoahParallelWeakRootsCleaningTask<ShenandoahIsAliveClosure, DoNothingClosure>
2164       cleaning_task(timing_phase, &is_alive, &do_nothing_cl, num_workers);
2165 #endif
2166     _workers->run_task(&cleaning_task);
2167   }
2168 }
2169 
2170 void ShenandoahHeap::parallel_cleaning(bool full_gc) {
2171   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2172   assert(is_stw_gc_in_progress(), "Only for Degenerated and Full GC");
2173   ShenandoahGCPhase phase(full_gc ?
2174                           ShenandoahPhaseTimings::full_gc_purge :
2175                           ShenandoahPhaseTimings::degen_gc_purge);
2176   stw_weak_refs(full_gc);
2177   stw_process_weak_roots(full_gc);
2178   stw_unload_classes(full_gc);
2179 }
2180 
2181 void ShenandoahHeap::set_has_forwarded_objects(bool cond) {
2182   set_gc_state(HAS_FORWARDED, cond);
2183 }
2184 
2185 void ShenandoahHeap::set_unload_classes(bool uc) {
2186   _unload_classes.set_cond(uc);
2187 }
2188 
2189 bool ShenandoahHeap::unload_classes() const {
2190   return _unload_classes.is_set();
2191 }
2192 
2193 address ShenandoahHeap::in_cset_fast_test_addr() {
2194   ShenandoahHeap* heap = ShenandoahHeap::heap();
2195   assert(heap->collection_set() != nullptr, "Sanity");
2196   return (address) heap->collection_set()->biased_map_address();
2197 }
2198 
2199 void ShenandoahHeap::reset_bytes_allocated_since_gc_start() {
2200   if (mode()->is_generational()) {
2201     young_generation()->reset_bytes_allocated_since_gc_start();
2202     old_generation()->reset_bytes_allocated_since_gc_start();
2203   }
2204 
2205   global_generation()->reset_bytes_allocated_since_gc_start();
2206 }
2207 
2208 void ShenandoahHeap::set_degenerated_gc_in_progress(bool in_progress) {
2209   _degenerated_gc_in_progress.set_cond(in_progress);
2210 }
2211 
2212 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) {
2213   _full_gc_in_progress.set_cond(in_progress);
2214 }
2215 
2216 void ShenandoahHeap::set_full_gc_move_in_progress(bool in_progress) {
2217   assert (is_full_gc_in_progress(), "should be");
2218   _full_gc_move_in_progress.set_cond(in_progress);
2219 }
2220 
2221 void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) {
2222   set_gc_state(UPDATEREFS, in_progress);
2223 }
2224 
2225 void ShenandoahHeap::register_nmethod(nmethod* nm) {
2226   ShenandoahCodeRoots::register_nmethod(nm);
2227 }
2228 
2229 void ShenandoahHeap::unregister_nmethod(nmethod* nm) {
2230   ShenandoahCodeRoots::unregister_nmethod(nm);
2231 }
2232 
2233 void ShenandoahHeap::pin_object(JavaThread* thr, oop o) {
2234   heap_region_containing(o)->record_pin();
2235 }
2236 
2237 void ShenandoahHeap::unpin_object(JavaThread* thr, oop o) {
2238   ShenandoahHeapRegion* r = heap_region_containing(o);
2239   assert(r != nullptr, "Sanity");
2240   assert(r->pin_count() > 0, "Region " SIZE_FORMAT " should have non-zero pins", r->index());
2241   r->record_unpin();
2242 }
2243 
2244 void ShenandoahHeap::sync_pinned_region_status() {
2245   ShenandoahHeapLocker locker(lock());
2246 
2247   for (size_t i = 0; i < num_regions(); i++) {
2248     ShenandoahHeapRegion *r = get_region(i);
2249     if (r->is_active()) {
2250       if (r->is_pinned()) {
2251         if (r->pin_count() == 0) {
2252           r->make_unpinned();
2253         }
2254       } else {
2255         if (r->pin_count() > 0) {
2256           r->make_pinned();
2257         }
2258       }
2259     }
2260   }
2261 
2262   assert_pinned_region_status();
2263 }
2264 
2265 #ifdef ASSERT
2266 void ShenandoahHeap::assert_pinned_region_status() {
2267   for (size_t i = 0; i < num_regions(); i++) {
2268     ShenandoahHeapRegion* r = get_region(i);
2269     shenandoah_assert_generations_reconciled();
2270     if (gc_generation()->contains(r)) {
2271       assert((r->is_pinned() && r->pin_count() > 0) || (!r->is_pinned() && r->pin_count() == 0),
2272              "Region " SIZE_FORMAT " pinning status is inconsistent", i);
2273     }
2274   }
2275 }
2276 #endif
2277 
2278 ConcurrentGCTimer* ShenandoahHeap::gc_timer() const {
2279   return _gc_timer;
2280 }
2281 
2282 void ShenandoahHeap::prepare_concurrent_roots() {
2283   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2284   assert(!is_stw_gc_in_progress(), "Only concurrent GC");
2285   set_concurrent_strong_root_in_progress(!collection_set()->is_empty());
2286   set_concurrent_weak_root_in_progress(true);
2287   if (unload_classes()) {
2288     _unloader.prepare();
2289   }
2290 }
2291 
2292 void ShenandoahHeap::finish_concurrent_roots() {
2293   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2294   assert(!is_stw_gc_in_progress(), "Only concurrent GC");
2295   if (unload_classes()) {
2296     _unloader.finish();
2297   }
2298 }
2299 
2300 #ifdef ASSERT
2301 void ShenandoahHeap::assert_gc_workers(uint nworkers) {
2302   assert(nworkers > 0 && nworkers <= max_workers(), "Sanity");
2303 
2304   if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
2305     // Use ParallelGCThreads inside safepoints
2306     assert(nworkers == ParallelGCThreads, "Use ParallelGCThreads (%u) within safepoint, not %u",
2307            ParallelGCThreads, nworkers);
2308   } else {
2309     // Use ConcGCThreads outside safepoints
2310     assert(nworkers == ConcGCThreads, "Use ConcGCThreads (%u) outside safepoints, %u",
2311            ConcGCThreads, nworkers);
2312   }
2313 }
2314 #endif
2315 
2316 ShenandoahVerifier* ShenandoahHeap::verifier() {
2317   guarantee(ShenandoahVerify, "Should be enabled");
2318   assert (_verifier != nullptr, "sanity");
2319   return _verifier;
2320 }
2321 
2322 template<bool CONCURRENT>
2323 class ShenandoahUpdateHeapRefsTask : public WorkerTask {
2324 private:
2325   ShenandoahHeap* _heap;
2326   ShenandoahRegionIterator* _regions;
2327 public:
2328   explicit ShenandoahUpdateHeapRefsTask(ShenandoahRegionIterator* regions) :
2329     WorkerTask("Shenandoah Update References"),
2330     _heap(ShenandoahHeap::heap()),
2331     _regions(regions) {
2332   }
2333 
2334   void work(uint worker_id) {
2335     if (CONCURRENT) {
2336       ShenandoahConcurrentWorkerSession worker_session(worker_id);
2337       ShenandoahSuspendibleThreadSetJoiner stsj;
2338       do_work<ShenandoahConcUpdateRefsClosure>(worker_id);
2339     } else {
2340       ShenandoahParallelWorkerSession worker_session(worker_id);
2341       do_work<ShenandoahNonConcUpdateRefsClosure>(worker_id);
2342     }
2343   }
2344 
2345 private:
2346   template<class T>
2347   void do_work(uint worker_id) {
2348     if (CONCURRENT && (worker_id == 0)) {
2349       // We ask the first worker to replenish the Mutator free set by moving regions previously reserved to hold the
2350       // results of evacuation.  These reserves are no longer necessary because evacuation has completed.
2351       size_t cset_regions = _heap->collection_set()->count();
2352 
2353       // Now that evacuation is done, we can reassign any regions that had been reserved to hold the results of evacuation
2354       // to the mutator free set.  At the end of GC, we will have cset_regions newly evacuated fully empty regions from
2355       // which we will be able to replenish the Collector free set and the OldCollector free set in preparation for the
2356       // next GC cycle.
2357       _heap->free_set()->move_regions_from_collector_to_mutator(cset_regions);
2358     }
2359     // If !CONCURRENT, there's no value in expanding Mutator free set
2360     T cl;
2361     ShenandoahHeapRegion* r = _regions->next();
2362     while (r != nullptr) {
2363       HeapWord* update_watermark = r->get_update_watermark();
2364       assert (update_watermark >= r->bottom(), "sanity");
2365       if (r->is_active() && !r->is_cset()) {
2366         _heap->marked_object_oop_iterate(r, &cl, update_watermark);
2367         if (ShenandoahPacing) {
2368           _heap->pacer()->report_updaterefs(pointer_delta(update_watermark, r->bottom()));
2369         }
2370       }
2371       if (_heap->check_cancelled_gc_and_yield(CONCURRENT)) {
2372         return;
2373       }
2374       r = _regions->next();
2375     }
2376   }
2377 };
2378 
2379 void ShenandoahHeap::update_heap_references(bool concurrent) {
2380   assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
2381 
2382   if (concurrent) {
2383     ShenandoahUpdateHeapRefsTask<true> task(&_update_refs_iterator);
2384     workers()->run_task(&task);
2385   } else {
2386     ShenandoahUpdateHeapRefsTask<false> task(&_update_refs_iterator);
2387     workers()->run_task(&task);
2388   }
2389 }
2390 
2391 ShenandoahSynchronizePinnedRegionStates::ShenandoahSynchronizePinnedRegionStates() : _lock(ShenandoahHeap::heap()->lock()) { }
2392 
2393 void ShenandoahSynchronizePinnedRegionStates::heap_region_do(ShenandoahHeapRegion* r) {
2394   // Drop "pinned" state from regions that no longer have a pinned count. Put
2395   // regions with a pinned count into the "pinned" state.
2396   if (r->is_active()) {
2397     if (r->is_pinned()) {
2398       if (r->pin_count() == 0) {
2399         ShenandoahHeapLocker locker(_lock);
2400         r->make_unpinned();
2401       }
2402     } else {
2403       if (r->pin_count() > 0) {
2404         ShenandoahHeapLocker locker(_lock);
2405         r->make_pinned();
2406       }
2407     }
2408   }
2409 }
2410 
2411 void ShenandoahHeap::update_heap_region_states(bool concurrent) {
2412   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2413   assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
2414 
2415   {
2416     ShenandoahGCPhase phase(concurrent ?
2417                             ShenandoahPhaseTimings::final_update_refs_update_region_states :
2418                             ShenandoahPhaseTimings::degen_gc_final_update_refs_update_region_states);
2419 
2420     final_update_refs_update_region_states();
2421 
2422     assert_pinned_region_status();
2423   }
2424 
2425   {
2426     ShenandoahGCPhase phase(concurrent ?
2427                             ShenandoahPhaseTimings::final_update_refs_trash_cset :
2428                             ShenandoahPhaseTimings::degen_gc_final_update_refs_trash_cset);
2429     trash_cset_regions();
2430   }
2431 }
2432 
2433 void ShenandoahHeap::final_update_refs_update_region_states() {
2434   ShenandoahSynchronizePinnedRegionStates cl;
2435   parallel_heap_region_iterate(&cl);
2436 }
2437 
2438 void ShenandoahHeap::rebuild_free_set(bool concurrent) {
2439   ShenandoahGCPhase phase(concurrent ?
2440                           ShenandoahPhaseTimings::final_update_refs_rebuild_freeset :
2441                           ShenandoahPhaseTimings::degen_gc_final_update_refs_rebuild_freeset);
2442   ShenandoahHeapLocker locker(lock());
2443   size_t young_cset_regions, old_cset_regions;
2444   size_t first_old_region, last_old_region, old_region_count;
2445   _free_set->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old_region, last_old_region, old_region_count);
2446   // If there are no old regions, first_old_region will be greater than last_old_region
2447   assert((first_old_region > last_old_region) ||
2448          ((last_old_region + 1 - first_old_region >= old_region_count) &&
2449           get_region(first_old_region)->is_old() && get_region(last_old_region)->is_old()),
2450          "sanity: old_region_count: " SIZE_FORMAT ", first_old_region: " SIZE_FORMAT ", last_old_region: " SIZE_FORMAT,
2451          old_region_count, first_old_region, last_old_region);
2452 
2453   if (mode()->is_generational()) {
2454 #ifdef ASSERT
2455     if (ShenandoahVerify) {
2456       verifier()->verify_before_rebuilding_free_set();
2457     }
2458 #endif
2459 
2460     // The computation of bytes_of_allocation_runway_before_gc_trigger is quite conservative so consider all of this
2461     // available for transfer to old. Note that transfer of humongous regions does not impact available.
2462     ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::heap();
2463     size_t allocation_runway = gen_heap->young_generation()->heuristics()->bytes_of_allocation_runway_before_gc_trigger(young_cset_regions);
2464     gen_heap->compute_old_generation_balance(allocation_runway, old_cset_regions);
2465 
2466     // Total old_available may have been expanded to hold anticipated promotions.  We trigger if the fragmented available
2467     // memory represents more than 16 regions worth of data.  Note that fragmentation may increase when we promote regular
2468     // regions in place when many of these regular regions have an abundant amount of available memory within them.  Fragmentation
2469     // will decrease as promote-by-copy consumes the available memory within these partially consumed regions.
2470     //
2471     // We consider old-gen to have excessive fragmentation if more than 12.5% of old-gen is free memory that resides
2472     // within partially consumed regions of memory.
2473   }
2474   // Rebuild free set based on adjusted generation sizes.
2475   _free_set->finish_rebuild(young_cset_regions, old_cset_regions, old_region_count);
2476 
2477   if (mode()->is_generational()) {
2478     ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::heap();
2479     ShenandoahOldGeneration* old_gen = gen_heap->old_generation();
2480     old_gen->heuristics()->evaluate_triggers(first_old_region, last_old_region, old_region_count, num_regions());
2481   }
2482 }
2483 
2484 void ShenandoahHeap::print_extended_on(outputStream *st) const {
2485   print_on(st);
2486   st->cr();
2487   print_heap_regions_on(st);
2488 }
2489 
2490 bool ShenandoahHeap::is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self) {
2491   size_t slice = r->index() / _bitmap_regions_per_slice;
2492 
2493   size_t regions_from = _bitmap_regions_per_slice * slice;
2494   size_t regions_to   = MIN2(num_regions(), _bitmap_regions_per_slice * (slice + 1));
2495   for (size_t g = regions_from; g < regions_to; g++) {
2496     assert (g / _bitmap_regions_per_slice == slice, "same slice");
2497     if (skip_self && g == r->index()) continue;
2498     if (get_region(g)->is_committed()) {
2499       return true;
2500     }
2501   }
2502   return false;
2503 }
2504 
2505 bool ShenandoahHeap::commit_bitmap_slice(ShenandoahHeapRegion* r) {
2506   shenandoah_assert_heaplocked();
2507 
2508   // Bitmaps in special regions do not need commits
2509   if (_bitmap_region_special) {
2510     return true;
2511   }
2512 
2513   if (is_bitmap_slice_committed(r, true)) {
2514     // Some other region from the group is already committed, meaning the bitmap
2515     // slice is already committed, we exit right away.
2516     return true;
2517   }
2518 
2519   // Commit the bitmap slice:
2520   size_t slice = r->index() / _bitmap_regions_per_slice;
2521   size_t off = _bitmap_bytes_per_slice * slice;
2522   size_t len = _bitmap_bytes_per_slice;
2523   char* start = (char*) _bitmap_region.start() + off;
2524 
2525   if (!os::commit_memory(start, len, false)) {
2526     return false;
2527   }
2528 
2529   if (AlwaysPreTouch) {
2530     os::pretouch_memory(start, start + len, _pretouch_bitmap_page_size);
2531   }
2532 
2533   return true;
2534 }
2535 
2536 bool ShenandoahHeap::uncommit_bitmap_slice(ShenandoahHeapRegion *r) {
2537   shenandoah_assert_heaplocked();
2538 
2539   // Bitmaps in special regions do not need uncommits
2540   if (_bitmap_region_special) {
2541     return true;
2542   }
2543 
2544   if (is_bitmap_slice_committed(r, true)) {
2545     // Some other region from the group is still committed, meaning the bitmap
2546     // slice is should stay committed, exit right away.
2547     return true;
2548   }
2549 
2550   // Uncommit the bitmap slice:
2551   size_t slice = r->index() / _bitmap_regions_per_slice;
2552   size_t off = _bitmap_bytes_per_slice * slice;
2553   size_t len = _bitmap_bytes_per_slice;
2554   if (!os::uncommit_memory((char*)_bitmap_region.start() + off, len)) {
2555     return false;
2556   }
2557   return true;
2558 }
2559 
2560 void ShenandoahHeap::safepoint_synchronize_begin() {
2561   StackWatermarkSet::safepoint_synchronize_begin();
2562   SuspendibleThreadSet::synchronize();
2563 }
2564 
2565 void ShenandoahHeap::safepoint_synchronize_end() {
2566   SuspendibleThreadSet::desynchronize();
2567 }
2568 
2569 void ShenandoahHeap::try_inject_alloc_failure() {
2570   if (ShenandoahAllocFailureALot && !cancelled_gc() && ((os::random() % 1000) > 950)) {
2571     _inject_alloc_failure.set();
2572     os::naked_short_sleep(1);
2573     if (cancelled_gc()) {
2574       log_info(gc)("Allocation failure was successfully injected");
2575     }
2576   }
2577 }
2578 
2579 bool ShenandoahHeap::should_inject_alloc_failure() {
2580   return _inject_alloc_failure.is_set() && _inject_alloc_failure.try_unset();
2581 }
2582 
2583 void ShenandoahHeap::initialize_serviceability() {
2584   _memory_pool = new ShenandoahMemoryPool(this);
2585   _cycle_memory_manager.add_pool(_memory_pool);
2586   _stw_memory_manager.add_pool(_memory_pool);
2587 }
2588 
2589 GrowableArray<GCMemoryManager*> ShenandoahHeap::memory_managers() {
2590   GrowableArray<GCMemoryManager*> memory_managers(2);
2591   memory_managers.append(&_cycle_memory_manager);
2592   memory_managers.append(&_stw_memory_manager);
2593   return memory_managers;
2594 }
2595 
2596 GrowableArray<MemoryPool*> ShenandoahHeap::memory_pools() {
2597   GrowableArray<MemoryPool*> memory_pools(1);
2598   memory_pools.append(_memory_pool);
2599   return memory_pools;
2600 }
2601 
2602 MemoryUsage ShenandoahHeap::memory_usage() {
2603   return MemoryUsage(_initial_size, used(), committed(), max_capacity());
2604 }
2605 
2606 ShenandoahRegionIterator::ShenandoahRegionIterator() :
2607   _heap(ShenandoahHeap::heap()),
2608   _index(0) {}
2609 
2610 ShenandoahRegionIterator::ShenandoahRegionIterator(ShenandoahHeap* heap) :
2611   _heap(heap),
2612   _index(0) {}
2613 
2614 void ShenandoahRegionIterator::reset() {
2615   _index = 0;
2616 }
2617 
2618 bool ShenandoahRegionIterator::has_next() const {
2619   return _index < _heap->num_regions();
2620 }
2621 
2622 char ShenandoahHeap::gc_state() const {
2623   return _gc_state.raw_value();
2624 }
2625 
2626 ShenandoahLiveData* ShenandoahHeap::get_liveness_cache(uint worker_id) {
2627 #ifdef ASSERT
2628   assert(_liveness_cache != nullptr, "sanity");
2629   assert(worker_id < _max_workers, "sanity");
2630   for (uint i = 0; i < num_regions(); i++) {
2631     assert(_liveness_cache[worker_id][i] == 0, "liveness cache should be empty");
2632   }
2633 #endif
2634   return _liveness_cache[worker_id];
2635 }
2636 
2637 void ShenandoahHeap::flush_liveness_cache(uint worker_id) {
2638   assert(worker_id < _max_workers, "sanity");
2639   assert(_liveness_cache != nullptr, "sanity");
2640   ShenandoahLiveData* ld = _liveness_cache[worker_id];
2641   for (uint i = 0; i < num_regions(); i++) {
2642     ShenandoahLiveData live = ld[i];
2643     if (live > 0) {
2644       ShenandoahHeapRegion* r = get_region(i);
2645       r->increase_live_data_gc_words(live);
2646       ld[i] = 0;
2647     }
2648   }
2649 }
2650 
2651 bool ShenandoahHeap::requires_barriers(stackChunkOop obj) const {
2652   if (is_idle()) return false;
2653 
2654   // Objects allocated after marking start are implicitly alive, don't need any barriers during
2655   // marking phase.
2656   if (is_concurrent_mark_in_progress() &&
2657      !marking_context()->allocated_after_mark_start(obj)) {
2658     return true;
2659   }
2660 
2661   // Can not guarantee obj is deeply good.
2662   if (has_forwarded_objects()) {
2663     return true;
2664   }
2665 
2666   return false;
2667 }
2668 
2669 HeapWord* ShenandoahHeap::allocate_loaded_archive_space(size_t size) {
2670 #if INCLUDE_CDS_JAVA_HEAP
2671   // CDS wants a continuous memory range to load a bunch of objects.
2672   // This effectively bypasses normal allocation paths, and requires
2673   // a bit of massaging to unbreak GC invariants.
2674 
2675   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared(size);
2676 
2677   // Easy case: a single regular region, no further adjustments needed.
2678   if (!ShenandoahHeapRegion::requires_humongous(size)) {
2679     return allocate_memory(req);
2680   }
2681 
2682   // Hard case: the requested size would cause a humongous allocation.
2683   // We need to make sure it looks like regular allocation to the rest of GC.
2684 
2685   // CDS code would guarantee no objects straddle multiple regions, as long as
2686   // regions are as large as MIN_GC_REGION_ALIGNMENT. It is impractical at this
2687   // point to deal with case when Shenandoah runs with smaller regions.
2688   // TODO: This check can be dropped once MIN_GC_REGION_ALIGNMENT agrees more with Shenandoah.
2689   if (ShenandoahHeapRegion::region_size_bytes() < ArchiveHeapWriter::MIN_GC_REGION_ALIGNMENT) {
2690     return nullptr;
2691   }
2692 
2693   HeapWord* mem = allocate_memory(req);
2694   size_t start_idx = heap_region_index_containing(mem);
2695   size_t num_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize);
2696 
2697   // Flip humongous -> regular.
2698   {
2699     ShenandoahHeapLocker locker(lock(), false);
2700     for (size_t c = start_idx; c < start_idx + num_regions; c++) {
2701       get_region(c)->make_regular_bypass();
2702     }
2703   }
2704 
2705   return mem;
2706 #else
2707   assert(false, "Archive heap loader should not be available, should not be here");
2708   return nullptr;
2709 #endif // INCLUDE_CDS_JAVA_HEAP
2710 }
2711 
2712 void ShenandoahHeap::complete_loaded_archive_space(MemRegion archive_space) {
2713   // Nothing to do here, except checking that heap looks fine.
2714 #ifdef ASSERT
2715   HeapWord* start = archive_space.start();
2716   HeapWord* end = archive_space.end();
2717 
2718   // No unclaimed space between the objects.
2719   // Objects are properly allocated in correct regions.
2720   HeapWord* cur = start;
2721   while (cur < end) {
2722     oop oop = cast_to_oop(cur);
2723     shenandoah_assert_in_correct_region(nullptr, oop);
2724     cur += oop->size();
2725   }
2726 
2727   // No unclaimed tail at the end of archive space.
2728   assert(cur == end,
2729          "Archive space should be fully used: " PTR_FORMAT " " PTR_FORMAT,
2730          p2i(cur), p2i(end));
2731 
2732   // Region bounds are good.
2733   ShenandoahHeapRegion* begin_reg = heap_region_containing(start);
2734   ShenandoahHeapRegion* end_reg = heap_region_containing(end);
2735   assert(begin_reg->is_regular(), "Must be");
2736   assert(end_reg->is_regular(), "Must be");
2737   assert(begin_reg->bottom() == start,
2738          "Must agree: archive-space-start: " PTR_FORMAT ", begin-region-bottom: " PTR_FORMAT,
2739          p2i(start), p2i(begin_reg->bottom()));
2740   assert(end_reg->top() == end,
2741          "Must agree: archive-space-end: " PTR_FORMAT ", end-region-top: " PTR_FORMAT,
2742          p2i(end), p2i(end_reg->top()));
2743 #endif
2744 }
2745 
2746 ShenandoahGeneration* ShenandoahHeap::generation_for(ShenandoahAffiliation affiliation) const {
2747   if (!mode()->is_generational()) {
2748     return global_generation();
2749   } else if (affiliation == YOUNG_GENERATION) {
2750     return young_generation();
2751   } else if (affiliation == OLD_GENERATION) {
2752     return old_generation();
2753   }
2754 
2755   ShouldNotReachHere();
2756   return nullptr;
2757 }
2758 
2759 void ShenandoahHeap::log_heap_status(const char* msg) const {
2760   if (mode()->is_generational()) {
2761     young_generation()->log_status(msg);
2762     old_generation()->log_status(msg);
2763   } else {
2764     global_generation()->log_status(msg);
2765   }
2766 }
2767