1 /*
   2  * Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2013, 2022, Red Hat, Inc. All rights reserved.
   4  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
   5  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   6  *
   7  * This code is free software; you can redistribute it and/or modify it
   8  * under the terms of the GNU General Public License version 2 only, as
   9  * published by the Free Software Foundation.
  10  *
  11  * This code is distributed in the hope that it will be useful, but WITHOUT
  12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  14  * version 2 for more details (a copy is included in the LICENSE file that
  15  * accompanied this code).
  16  *
  17  * You should have received a copy of the GNU General Public License version
  18  * 2 along with this work; if not, write to the Free Software Foundation,
  19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20  *
  21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22  * or visit www.oracle.com if you need additional information or have any
  23  * questions.
  24  *
  25  */
  26 
  27 #include "precompiled.hpp"
  28 #include "memory/allocation.hpp"
  29 #include "memory/universe.hpp"
  30 
  31 #include "gc/shared/classUnloadingContext.hpp"
  32 #include "gc/shared/gcArguments.hpp"
  33 #include "gc/shared/gcTimer.hpp"
  34 #include "gc/shared/gcTraceTime.inline.hpp"
  35 #include "gc/shared/locationPrinter.inline.hpp"
  36 #include "gc/shared/memAllocator.hpp"
  37 #include "gc/shared/plab.hpp"
  38 #include "gc/shared/tlab_globals.hpp"
  39 
  40 #include "gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp"
  41 #include "gc/shenandoah/heuristics/shenandoahYoungHeuristics.hpp"
  42 #include "gc/shenandoah/shenandoahAllocRequest.hpp"
  43 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  44 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
  45 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  46 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  47 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
  48 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  49 #include "gc/shenandoah/shenandoahControlThread.hpp"
  50 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  51 #include "gc/shenandoah/shenandoahGenerationalEvacuationTask.hpp"
  52 #include "gc/shenandoah/shenandoahGenerationalHeap.hpp"
  53 #include "gc/shenandoah/shenandoahGlobalGeneration.hpp"
  54 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  55 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  56 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
  57 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  58 #include "gc/shenandoah/shenandoahInitLogger.hpp"
  59 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  60 #include "gc/shenandoah/shenandoahMemoryPool.hpp"
  61 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  62 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
  63 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  64 #include "gc/shenandoah/shenandoahPacer.inline.hpp"
  65 #include "gc/shenandoah/shenandoahPadding.hpp"
  66 #include "gc/shenandoah/shenandoahParallelCleaning.inline.hpp"
  67 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
  68 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
  69 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
  70 #include "gc/shenandoah/shenandoahSTWMark.hpp"
  71 #include "gc/shenandoah/shenandoahUtils.hpp"
  72 #include "gc/shenandoah/shenandoahVerifier.hpp"
  73 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
  74 #include "gc/shenandoah/shenandoahVMOperations.hpp"
  75 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
  76 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  77 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
  78 #include "gc/shenandoah/mode/shenandoahGenerationalMode.hpp"
  79 #include "gc/shenandoah/mode/shenandoahPassiveMode.hpp"
  80 #include "gc/shenandoah/mode/shenandoahSATBMode.hpp"
  81 #include "utilities/globalDefinitions.hpp"
  82 
  83 #if INCLUDE_JFR
  84 #include "gc/shenandoah/shenandoahJfrSupport.hpp"
  85 #endif
  86 
  87 #include "cds/archiveHeapWriter.hpp"
  88 #include "classfile/systemDictionary.hpp"
  89 #include "code/codeCache.hpp"
  90 #include "memory/classLoaderMetaspace.hpp"
  91 #include "memory/metaspaceUtils.hpp"
  92 #include "nmt/mallocTracker.hpp"
  93 #include "nmt/memTracker.hpp"
  94 #include "oops/compressedOops.inline.hpp"
  95 #include "prims/jvmtiTagMap.hpp"
  96 #include "runtime/atomic.hpp"
  97 #include "runtime/globals.hpp"
  98 #include "runtime/interfaceSupport.inline.hpp"
  99 #include "runtime/java.hpp"
 100 #include "runtime/orderAccess.hpp"
 101 #include "runtime/safepointMechanism.hpp"
 102 #include "runtime/stackWatermarkSet.hpp"
 103 #include "runtime/vmThread.hpp"
 104 #include "utilities/events.hpp"
 105 #include "utilities/powerOfTwo.hpp"
 106 
 107 class ShenandoahPretouchHeapTask : public WorkerTask {
 108 private:
 109   ShenandoahRegionIterator _regions;
 110   const size_t _page_size;
 111 public:
 112   ShenandoahPretouchHeapTask(size_t page_size) :
 113     WorkerTask("Shenandoah Pretouch Heap"),
 114     _page_size(page_size) {}
 115 
 116   virtual void work(uint worker_id) {
 117     ShenandoahHeapRegion* r = _regions.next();
 118     while (r != nullptr) {
 119       if (r->is_committed()) {
 120         os::pretouch_memory(r->bottom(), r->end(), _page_size);
 121       }
 122       r = _regions.next();
 123     }
 124   }
 125 };
 126 
 127 class ShenandoahPretouchBitmapTask : public WorkerTask {
 128 private:
 129   ShenandoahRegionIterator _regions;
 130   char* _bitmap_base;
 131   const size_t _bitmap_size;
 132   const size_t _page_size;
 133 public:
 134   ShenandoahPretouchBitmapTask(char* bitmap_base, size_t bitmap_size, size_t page_size) :
 135     WorkerTask("Shenandoah Pretouch Bitmap"),
 136     _bitmap_base(bitmap_base),
 137     _bitmap_size(bitmap_size),
 138     _page_size(page_size) {}
 139 
 140   virtual void work(uint worker_id) {
 141     ShenandoahHeapRegion* r = _regions.next();
 142     while (r != nullptr) {
 143       size_t start = r->index()       * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
 144       size_t end   = (r->index() + 1) * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
 145       assert (end <= _bitmap_size, "end is sane: " SIZE_FORMAT " < " SIZE_FORMAT, end, _bitmap_size);
 146 
 147       if (r->is_committed()) {
 148         os::pretouch_memory(_bitmap_base + start, _bitmap_base + end, _page_size);
 149       }
 150 
 151       r = _regions.next();
 152     }
 153   }
 154 };
 155 
 156 jint ShenandoahHeap::initialize() {
 157   //
 158   // Figure out heap sizing
 159   //
 160 
 161   size_t init_byte_size = InitialHeapSize;
 162   size_t min_byte_size  = MinHeapSize;
 163   size_t max_byte_size  = MaxHeapSize;
 164   size_t heap_alignment = HeapAlignment;
 165 
 166   size_t reg_size_bytes = ShenandoahHeapRegion::region_size_bytes();
 167 
 168   Universe::check_alignment(max_byte_size,  reg_size_bytes, "Shenandoah heap");
 169   Universe::check_alignment(init_byte_size, reg_size_bytes, "Shenandoah heap");
 170 
 171   _num_regions = ShenandoahHeapRegion::region_count();
 172   assert(_num_regions == (max_byte_size / reg_size_bytes),
 173          "Regions should cover entire heap exactly: " SIZE_FORMAT " != " SIZE_FORMAT "/" SIZE_FORMAT,
 174          _num_regions, max_byte_size, reg_size_bytes);
 175 
 176   size_t num_committed_regions = init_byte_size / reg_size_bytes;
 177   num_committed_regions = MIN2(num_committed_regions, _num_regions);
 178   assert(num_committed_regions <= _num_regions, "sanity");
 179   _initial_size = num_committed_regions * reg_size_bytes;
 180 
 181   size_t num_min_regions = min_byte_size / reg_size_bytes;
 182   num_min_regions = MIN2(num_min_regions, _num_regions);
 183   assert(num_min_regions <= _num_regions, "sanity");
 184   _minimum_size = num_min_regions * reg_size_bytes;
 185 
 186   // Default to max heap size.
 187   _soft_max_size = _num_regions * reg_size_bytes;
 188 
 189   _committed = _initial_size;
 190 
 191   size_t heap_page_size   = UseLargePages ? os::large_page_size() : os::vm_page_size();
 192   size_t bitmap_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
 193   size_t region_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
 194 
 195   //
 196   // Reserve and commit memory for heap
 197   //
 198 
 199   ReservedHeapSpace heap_rs = Universe::reserve_heap(max_byte_size, heap_alignment);
 200   initialize_reserved_region(heap_rs);
 201   _heap_region = MemRegion((HeapWord*)heap_rs.base(), heap_rs.size() / HeapWordSize);
 202   _heap_region_special = heap_rs.special();
 203 
 204   assert((((size_t) base()) & ShenandoahHeapRegion::region_size_bytes_mask()) == 0,
 205          "Misaligned heap: " PTR_FORMAT, p2i(base()));
 206   os::trace_page_sizes_for_requested_size("Heap",
 207                                           max_byte_size, heap_alignment,
 208                                           heap_rs.base(),
 209                                           heap_rs.size(), heap_rs.page_size());
 210 
 211 #if SHENANDOAH_OPTIMIZED_MARKTASK
 212   // The optimized ShenandoahMarkTask takes some bits away from the full object bits.
 213   // Fail if we ever attempt to address more than we can.
 214   if ((uintptr_t)heap_rs.end() >= ShenandoahMarkTask::max_addressable()) {
 215     FormatBuffer<512> buf("Shenandoah reserved [" PTR_FORMAT ", " PTR_FORMAT") for the heap, \n"
 216                           "but max object address is " PTR_FORMAT ". Try to reduce heap size, or try other \n"
 217                           "VM options that allocate heap at lower addresses (HeapBaseMinAddress, AllocateHeapAt, etc).",
 218                 p2i(heap_rs.base()), p2i(heap_rs.end()), ShenandoahMarkTask::max_addressable());
 219     vm_exit_during_initialization("Fatal Error", buf);
 220   }
 221 #endif
 222 
 223   ReservedSpace sh_rs = heap_rs.first_part(max_byte_size);
 224   if (!_heap_region_special) {
 225     os::commit_memory_or_exit(sh_rs.base(), _initial_size, heap_alignment, false,
 226                               "Cannot commit heap memory");
 227   }
 228 
 229   BarrierSet::set_barrier_set(new ShenandoahBarrierSet(this, _heap_region));
 230 
 231   // Now we know the number of regions and heap sizes, initialize the heuristics.
 232   initialize_heuristics();
 233 
 234   assert(_heap_region.byte_size() == heap_rs.size(), "Need to know reserved size for card table");
 235 
 236   //
 237   // Worker threads must be initialized after the barrier is configured
 238   //
 239   _workers = new ShenandoahWorkerThreads("Shenandoah GC Threads", _max_workers);
 240   if (_workers == nullptr) {
 241     vm_exit_during_initialization("Failed necessary allocation.");
 242   } else {
 243     _workers->initialize_workers();
 244   }
 245 
 246   if (ParallelGCThreads > 1) {
 247     _safepoint_workers = new ShenandoahWorkerThreads("Safepoint Cleanup Thread", ParallelGCThreads);
 248     _safepoint_workers->initialize_workers();
 249   }
 250 
 251   //
 252   // Reserve and commit memory for bitmap(s)
 253   //
 254 
 255   size_t bitmap_size_orig = ShenandoahMarkBitMap::compute_size(heap_rs.size());
 256   _bitmap_size = align_up(bitmap_size_orig, bitmap_page_size);
 257 
 258   size_t bitmap_bytes_per_region = reg_size_bytes / ShenandoahMarkBitMap::heap_map_factor();
 259 
 260   guarantee(bitmap_bytes_per_region != 0,
 261             "Bitmap bytes per region should not be zero");
 262   guarantee(is_power_of_2(bitmap_bytes_per_region),
 263             "Bitmap bytes per region should be power of two: " SIZE_FORMAT, bitmap_bytes_per_region);
 264 
 265   if (bitmap_page_size > bitmap_bytes_per_region) {
 266     _bitmap_regions_per_slice = bitmap_page_size / bitmap_bytes_per_region;
 267     _bitmap_bytes_per_slice = bitmap_page_size;
 268   } else {
 269     _bitmap_regions_per_slice = 1;
 270     _bitmap_bytes_per_slice = bitmap_bytes_per_region;
 271   }
 272 
 273   guarantee(_bitmap_regions_per_slice >= 1,
 274             "Should have at least one region per slice: " SIZE_FORMAT,
 275             _bitmap_regions_per_slice);
 276 
 277   guarantee(((_bitmap_bytes_per_slice) % bitmap_page_size) == 0,
 278             "Bitmap slices should be page-granular: bps = " SIZE_FORMAT ", page size = " SIZE_FORMAT,
 279             _bitmap_bytes_per_slice, bitmap_page_size);
 280 
 281   ReservedSpace bitmap(_bitmap_size, bitmap_page_size);
 282   os::trace_page_sizes_for_requested_size("Mark Bitmap",
 283                                           bitmap_size_orig, bitmap_page_size,
 284                                           bitmap.base(),
 285                                           bitmap.size(), bitmap.page_size());
 286   MemTracker::record_virtual_memory_type(bitmap.base(), mtGC);
 287   _bitmap_region = MemRegion((HeapWord*) bitmap.base(), bitmap.size() / HeapWordSize);
 288   _bitmap_region_special = bitmap.special();
 289 
 290   size_t bitmap_init_commit = _bitmap_bytes_per_slice *
 291     align_up(num_committed_regions, _bitmap_regions_per_slice) / _bitmap_regions_per_slice;
 292   bitmap_init_commit = MIN2(_bitmap_size, bitmap_init_commit);
 293   if (!_bitmap_region_special) {
 294     os::commit_memory_or_exit((char *) _bitmap_region.start(), bitmap_init_commit, bitmap_page_size, false,
 295                               "Cannot commit bitmap memory");
 296   }
 297 
 298   _marking_context = new ShenandoahMarkingContext(_heap_region, _bitmap_region, _num_regions);
 299 
 300   if (ShenandoahVerify) {
 301     ReservedSpace verify_bitmap(_bitmap_size, bitmap_page_size);
 302     os::trace_page_sizes_for_requested_size("Verify Bitmap",
 303                                             bitmap_size_orig, bitmap_page_size,
 304                                             verify_bitmap.base(),
 305                                             verify_bitmap.size(), verify_bitmap.page_size());
 306     if (!verify_bitmap.special()) {
 307       os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), bitmap_page_size, false,
 308                                 "Cannot commit verification bitmap memory");
 309     }
 310     MemTracker::record_virtual_memory_type(verify_bitmap.base(), mtGC);
 311     MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize);
 312     _verification_bit_map.initialize(_heap_region, verify_bitmap_region);
 313     _verifier = new ShenandoahVerifier(this, &_verification_bit_map);
 314   }
 315 
 316   // Reserve aux bitmap for use in object_iterate(). We don't commit it here.
 317   size_t aux_bitmap_page_size = bitmap_page_size;
 318 
 319   ReservedSpace aux_bitmap(_bitmap_size, aux_bitmap_page_size);
 320   os::trace_page_sizes_for_requested_size("Aux Bitmap",
 321                                           bitmap_size_orig, aux_bitmap_page_size,
 322                                           aux_bitmap.base(),
 323                                           aux_bitmap.size(), aux_bitmap.page_size());
 324   MemTracker::record_virtual_memory_type(aux_bitmap.base(), mtGC);
 325   _aux_bitmap_region = MemRegion((HeapWord*) aux_bitmap.base(), aux_bitmap.size() / HeapWordSize);
 326   _aux_bitmap_region_special = aux_bitmap.special();
 327   _aux_bit_map.initialize(_heap_region, _aux_bitmap_region);
 328 
 329   //
 330   // Create regions and region sets
 331   //
 332   size_t region_align = align_up(sizeof(ShenandoahHeapRegion), SHENANDOAH_CACHE_LINE_SIZE);
 333   size_t region_storage_size_orig = region_align * _num_regions;
 334   size_t region_storage_size = align_up(region_storage_size_orig,
 335                                         MAX2(region_page_size, os::vm_allocation_granularity()));
 336 
 337   ReservedSpace region_storage(region_storage_size, region_page_size);
 338   os::trace_page_sizes_for_requested_size("Region Storage",
 339                                           region_storage_size_orig, region_page_size,
 340                                           region_storage.base(),
 341                                           region_storage.size(), region_storage.page_size());
 342   MemTracker::record_virtual_memory_type(region_storage.base(), mtGC);
 343   if (!region_storage.special()) {
 344     os::commit_memory_or_exit(region_storage.base(), region_storage_size, region_page_size, false,
 345                               "Cannot commit region memory");
 346   }
 347 
 348   // Try to fit the collection set bitmap at lower addresses. This optimizes code generation for cset checks.
 349   // Go up until a sensible limit (subject to encoding constraints) and try to reserve the space there.
 350   // If not successful, bite a bullet and allocate at whatever address.
 351   {
 352     const size_t cset_align = MAX2<size_t>(os::vm_page_size(), os::vm_allocation_granularity());
 353     const size_t cset_size = align_up(((size_t) sh_rs.base() + sh_rs.size()) >> ShenandoahHeapRegion::region_size_bytes_shift(), cset_align);
 354     const size_t cset_page_size = os::vm_page_size();
 355 
 356     uintptr_t min = round_up_power_of_2(cset_align);
 357     uintptr_t max = (1u << 30u);
 358     ReservedSpace cset_rs;
 359 
 360     for (uintptr_t addr = min; addr <= max; addr <<= 1u) {
 361       char* req_addr = (char*)addr;
 362       assert(is_aligned(req_addr, cset_align), "Should be aligned");
 363       cset_rs = ReservedSpace(cset_size, cset_align, cset_page_size, req_addr);
 364       if (cset_rs.is_reserved()) {
 365         assert(cset_rs.base() == req_addr, "Allocated where requested: " PTR_FORMAT ", " PTR_FORMAT, p2i(cset_rs.base()), addr);
 366         _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
 367         break;
 368       }
 369     }
 370 
 371     if (_collection_set == nullptr) {
 372       cset_rs = ReservedSpace(cset_size, cset_align, os::vm_page_size());
 373       _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
 374     }
 375     os::trace_page_sizes_for_requested_size("Collection Set",
 376                                             cset_size, cset_page_size,
 377                                             cset_rs.base(),
 378                                             cset_rs.size(), cset_rs.page_size());
 379   }
 380 
 381   _regions = NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, _num_regions, mtGC);
 382   _affiliations = NEW_C_HEAP_ARRAY(uint8_t, _num_regions, mtGC);
 383   _free_set = new ShenandoahFreeSet(this, _num_regions);
 384 
 385   {
 386     ShenandoahHeapLocker locker(lock());
 387 
 388     for (size_t i = 0; i < _num_regions; i++) {
 389       HeapWord* start = (HeapWord*)sh_rs.base() + ShenandoahHeapRegion::region_size_words() * i;
 390       bool is_committed = i < num_committed_regions;
 391       void* loc = region_storage.base() + i * region_align;
 392 
 393       ShenandoahHeapRegion* r = new (loc) ShenandoahHeapRegion(start, i, is_committed);
 394       assert(is_aligned(r, SHENANDOAH_CACHE_LINE_SIZE), "Sanity");
 395 
 396       _marking_context->initialize_top_at_mark_start(r);
 397       _regions[i] = r;
 398       assert(!collection_set()->is_in(i), "New region should not be in collection set");
 399 
 400       _affiliations[i] = ShenandoahAffiliation::FREE;
 401     }
 402 
 403     // Initialize to complete
 404     _marking_context->mark_complete();
 405     size_t young_cset_regions, old_cset_regions;
 406 
 407     // We are initializing free set.  We ignore cset region tallies.
 408     size_t first_old, last_old, num_old;
 409     _free_set->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old, last_old, num_old);
 410     _free_set->finish_rebuild(young_cset_regions, old_cset_regions, num_old);
 411   }
 412 
 413   if (AlwaysPreTouch) {
 414     // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads,
 415     // before initialize() below zeroes it with initializing thread. For any given region,
 416     // we touch the region and the corresponding bitmaps from the same thread.
 417     ShenandoahPushWorkerScope scope(workers(), _max_workers, false);
 418 
 419     _pretouch_heap_page_size = heap_page_size;
 420     _pretouch_bitmap_page_size = bitmap_page_size;
 421 
 422     // OS memory managers may want to coalesce back-to-back pages. Make their jobs
 423     // simpler by pre-touching continuous spaces (heap and bitmap) separately.
 424 
 425     ShenandoahPretouchBitmapTask bcl(bitmap.base(), _bitmap_size, _pretouch_bitmap_page_size);
 426     _workers->run_task(&bcl);
 427 
 428     ShenandoahPretouchHeapTask hcl(_pretouch_heap_page_size);
 429     _workers->run_task(&hcl);
 430   }
 431 
 432   //
 433   // Initialize the rest of GC subsystems
 434   //
 435 
 436   _liveness_cache = NEW_C_HEAP_ARRAY(ShenandoahLiveData*, _max_workers, mtGC);
 437   for (uint worker = 0; worker < _max_workers; worker++) {
 438     _liveness_cache[worker] = NEW_C_HEAP_ARRAY(ShenandoahLiveData, _num_regions, mtGC);
 439     Copy::fill_to_bytes(_liveness_cache[worker], _num_regions * sizeof(ShenandoahLiveData));
 440   }
 441 
 442   // There should probably be Shenandoah-specific options for these,
 443   // just as there are G1-specific options.
 444   {
 445     ShenandoahSATBMarkQueueSet& satbqs = ShenandoahBarrierSet::satb_mark_queue_set();
 446     satbqs.set_process_completed_buffers_threshold(20); // G1SATBProcessCompletedThreshold
 447     satbqs.set_buffer_enqueue_threshold_percentage(60); // G1SATBBufferEnqueueingThresholdPercent
 448   }
 449 
 450   _monitoring_support = new ShenandoahMonitoringSupport(this);
 451   _phase_timings = new ShenandoahPhaseTimings(max_workers());
 452   ShenandoahCodeRoots::initialize();
 453 
 454   if (ShenandoahPacing) {
 455     _pacer = new ShenandoahPacer(this);
 456     _pacer->setup_for_idle();
 457   }
 458 
 459   initialize_controller();
 460 
 461   print_init_logger();
 462 
 463   return JNI_OK;
 464 }
 465 
 466 void ShenandoahHeap::initialize_controller() {
 467   _control_thread = new ShenandoahControlThread();
 468 }
 469 
 470 void ShenandoahHeap::print_init_logger() const {
 471   ShenandoahInitLogger::print();
 472 }
 473 
 474 void ShenandoahHeap::initialize_mode() {
 475   if (ShenandoahGCMode != nullptr) {
 476     if (strcmp(ShenandoahGCMode, "satb") == 0) {
 477       _gc_mode = new ShenandoahSATBMode();
 478     } else if (strcmp(ShenandoahGCMode, "passive") == 0) {
 479       _gc_mode = new ShenandoahPassiveMode();
 480     } else if (strcmp(ShenandoahGCMode, "generational") == 0) {
 481       _gc_mode = new ShenandoahGenerationalMode();
 482     } else {
 483       vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option");
 484     }
 485   } else {
 486     vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option (null)");
 487   }
 488   _gc_mode->initialize_flags();
 489   if (_gc_mode->is_diagnostic() && !UnlockDiagnosticVMOptions) {
 490     vm_exit_during_initialization(
 491             err_msg("GC mode \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
 492                     _gc_mode->name()));
 493   }
 494   if (_gc_mode->is_experimental() && !UnlockExperimentalVMOptions) {
 495     vm_exit_during_initialization(
 496             err_msg("GC mode \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
 497                     _gc_mode->name()));
 498   }
 499 }
 500 
 501 void ShenandoahHeap::initialize_heuristics() {
 502   _global_generation = new ShenandoahGlobalGeneration(mode()->is_generational(), max_workers(), max_capacity(), max_capacity());
 503   _global_generation->initialize_heuristics(mode());
 504   _evac_tracker = new ShenandoahEvacuationTracker(mode()->is_generational());
 505 }
 506 
 507 #ifdef _MSC_VER
 508 #pragma warning( push )
 509 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
 510 #endif
 511 
 512 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
 513   CollectedHeap(),
 514   _gc_generation(nullptr),
 515   _active_generation(nullptr),
 516   _initial_size(0),
 517   _committed(0),
 518   _max_workers(MAX3(ConcGCThreads, ParallelGCThreads, 1U)),
 519   _workers(nullptr),
 520   _safepoint_workers(nullptr),
 521   _heap_region_special(false),
 522   _num_regions(0),
 523   _regions(nullptr),
 524   _affiliations(nullptr),
 525   _gc_state_changed(false),
 526   _gc_no_progress_count(0),
 527   _cancel_requested_time(0),
 528   _update_refs_iterator(this),
 529   _global_generation(nullptr),
 530   _control_thread(nullptr),
 531   _young_generation(nullptr),
 532   _old_generation(nullptr),
 533   _shenandoah_policy(policy),
 534   _gc_mode(nullptr),
 535   _free_set(nullptr),
 536   _pacer(nullptr),
 537   _verifier(nullptr),
 538   _phase_timings(nullptr),
 539   _evac_tracker(nullptr),
 540   _mmu_tracker(),
 541   _monitoring_support(nullptr),
 542   _memory_pool(nullptr),
 543   _stw_memory_manager("Shenandoah Pauses"),
 544   _cycle_memory_manager("Shenandoah Cycles"),
 545   _gc_timer(new ConcurrentGCTimer()),
 546   _log_min_obj_alignment_in_bytes(LogMinObjAlignmentInBytes),
 547   _marking_context(nullptr),
 548   _bitmap_size(0),
 549   _bitmap_regions_per_slice(0),
 550   _bitmap_bytes_per_slice(0),
 551   _bitmap_region_special(false),
 552   _aux_bitmap_region_special(false),
 553   _liveness_cache(nullptr),
 554   _collection_set(nullptr)
 555 {
 556   // Initialize GC mode early, many subsequent initialization procedures depend on it
 557   initialize_mode();
 558 }
 559 
 560 #ifdef _MSC_VER
 561 #pragma warning( pop )
 562 #endif
 563 
 564 void ShenandoahHeap::print_on(outputStream* st) const {
 565   st->print_cr("Shenandoah Heap");
 566   st->print_cr(" " SIZE_FORMAT "%s max, " SIZE_FORMAT "%s soft max, " SIZE_FORMAT "%s committed, " SIZE_FORMAT "%s used",
 567                byte_size_in_proper_unit(max_capacity()), proper_unit_for_byte_size(max_capacity()),
 568                byte_size_in_proper_unit(soft_max_capacity()), proper_unit_for_byte_size(soft_max_capacity()),
 569                byte_size_in_proper_unit(committed()),    proper_unit_for_byte_size(committed()),
 570                byte_size_in_proper_unit(used()),         proper_unit_for_byte_size(used()));
 571   st->print_cr(" " SIZE_FORMAT " x " SIZE_FORMAT"%s regions",
 572                num_regions(),
 573                byte_size_in_proper_unit(ShenandoahHeapRegion::region_size_bytes()),
 574                proper_unit_for_byte_size(ShenandoahHeapRegion::region_size_bytes()));
 575 
 576   st->print("Status: ");
 577   if (has_forwarded_objects())                 st->print("has forwarded objects, ");
 578   if (is_concurrent_old_mark_in_progress())    st->print("old marking, ");
 579   if (is_concurrent_young_mark_in_progress())  st->print("young marking, ");
 580   if (is_evacuation_in_progress())             st->print("evacuating, ");
 581   if (is_update_refs_in_progress())            st->print("updating refs, ");
 582   if (is_degenerated_gc_in_progress())         st->print("degenerated gc, ");
 583   if (is_full_gc_in_progress())                st->print("full gc, ");
 584   if (is_full_gc_move_in_progress())           st->print("full gc move, ");
 585   if (is_concurrent_weak_root_in_progress())   st->print("concurrent weak roots, ");
 586   if (is_concurrent_strong_root_in_progress() &&
 587       !is_concurrent_weak_root_in_progress())  st->print("concurrent strong roots, ");
 588 
 589   if (cancelled_gc()) {
 590     st->print("cancelled");
 591   } else {
 592     st->print("not cancelled");
 593   }
 594   st->cr();
 595 
 596   st->print_cr("Reserved region:");
 597   st->print_cr(" - [" PTR_FORMAT ", " PTR_FORMAT ") ",
 598                p2i(reserved_region().start()),
 599                p2i(reserved_region().end()));
 600 
 601   ShenandoahCollectionSet* cset = collection_set();
 602   st->print_cr("Collection set:");
 603   if (cset != nullptr) {
 604     st->print_cr(" - map (vanilla): " PTR_FORMAT, p2i(cset->map_address()));
 605     st->print_cr(" - map (biased):  " PTR_FORMAT, p2i(cset->biased_map_address()));
 606   } else {
 607     st->print_cr(" (null)");
 608   }
 609 
 610   st->cr();
 611   MetaspaceUtils::print_on(st);
 612 
 613   if (Verbose) {
 614     st->cr();
 615     print_heap_regions_on(st);
 616   }
 617 }
 618 
 619 class ShenandoahInitWorkerGCLABClosure : public ThreadClosure {
 620 public:
 621   void do_thread(Thread* thread) {
 622     assert(thread != nullptr, "Sanity");
 623     assert(thread->is_Worker_thread(), "Only worker thread expected");
 624     ShenandoahThreadLocalData::initialize_gclab(thread);
 625   }
 626 };
 627 
 628 void ShenandoahHeap::post_initialize() {
 629   CollectedHeap::post_initialize();
 630   _mmu_tracker.initialize();
 631 
 632   MutexLocker ml(Threads_lock);
 633 
 634   ShenandoahInitWorkerGCLABClosure init_gclabs;
 635   _workers->threads_do(&init_gclabs);
 636 
 637   // gclab can not be initialized early during VM startup, as it can not determinate its max_size.
 638   // Now, we will let WorkerThreads to initialize gclab when new worker is created.
 639   _workers->set_initialize_gclab();
 640   if (_safepoint_workers != nullptr) {
 641     _safepoint_workers->threads_do(&init_gclabs);
 642     _safepoint_workers->set_initialize_gclab();
 643   }
 644 
 645   JFR_ONLY(ShenandoahJFRSupport::register_jfr_type_serializers();)
 646 }
 647 
 648 ShenandoahHeuristics* ShenandoahHeap::heuristics() {
 649   return _global_generation->heuristics();
 650 }
 651 
 652 size_t ShenandoahHeap::used() const {
 653   return global_generation()->used();
 654 }
 655 
 656 size_t ShenandoahHeap::committed() const {
 657   return Atomic::load(&_committed);
 658 }
 659 
 660 void ShenandoahHeap::increase_committed(size_t bytes) {
 661   shenandoah_assert_heaplocked_or_safepoint();
 662   _committed += bytes;
 663 }
 664 
 665 void ShenandoahHeap::decrease_committed(size_t bytes) {
 666   shenandoah_assert_heaplocked_or_safepoint();
 667   _committed -= bytes;
 668 }
 669 
 670 // For tracking usage based on allocations, it should be the case that:
 671 // * The sum of regions::used == heap::used
 672 // * The sum of a generation's regions::used == generation::used
 673 // * The sum of a generation's humongous regions::free == generation::humongous_waste
 674 // These invariants are checked by the verifier on GC safepoints.
 675 //
 676 // Additional notes:
 677 // * When a mutator's allocation request causes a region to be retired, the
 678 //   free memory left in that region is considered waste. It does not contribute
 679 //   to the usage, but it _does_ contribute to allocation rate.
 680 // * The bottom of a PLAB must be aligned on card size. In some cases this will
 681 //   require padding in front of the PLAB (a filler object). Because this padding
 682 //   is included in the region's used memory we include the padding in the usage
 683 //   accounting as waste.
 684 // * Mutator allocations are used to compute an allocation rate. They are also
 685 //   sent to the Pacer for those purposes.
 686 // * There are three sources of waste:
 687 //  1. The padding used to align a PLAB on card size
 688 //  2. Region's free is less than minimum TLAB size and is retired
 689 //  3. The unused portion of memory in the last region of a humongous object
 690 void ShenandoahHeap::increase_used(const ShenandoahAllocRequest& req) {
 691   size_t actual_bytes = req.actual_size() * HeapWordSize;
 692   size_t wasted_bytes = req.waste() * HeapWordSize;
 693   ShenandoahGeneration* generation = generation_for(req.affiliation());
 694 
 695   if (req.is_gc_alloc()) {
 696     assert(wasted_bytes == 0 || req.type() == ShenandoahAllocRequest::_alloc_plab, "Only PLABs have waste");
 697     increase_used(generation, actual_bytes + wasted_bytes);
 698   } else {
 699     assert(req.is_mutator_alloc(), "Expected mutator alloc here");
 700     // padding and actual size both count towards allocation counter
 701     generation->increase_allocated(actual_bytes + wasted_bytes);
 702 
 703     // only actual size counts toward usage for mutator allocations
 704     increase_used(generation, actual_bytes);
 705 
 706     // notify pacer of both actual size and waste
 707     notify_mutator_alloc_words(req.actual_size(), req.waste());
 708 
 709     if (wasted_bytes > 0 && ShenandoahHeapRegion::requires_humongous(req.actual_size())) {
 710       increase_humongous_waste(generation,wasted_bytes);
 711     }
 712   }
 713 }
 714 
 715 void ShenandoahHeap::increase_humongous_waste(ShenandoahGeneration* generation, size_t bytes) {
 716   generation->increase_humongous_waste(bytes);
 717   if (!generation->is_global()) {
 718     global_generation()->increase_humongous_waste(bytes);
 719   }
 720 }
 721 
 722 void ShenandoahHeap::decrease_humongous_waste(ShenandoahGeneration* generation, size_t bytes) {
 723   generation->decrease_humongous_waste(bytes);
 724   if (!generation->is_global()) {
 725     global_generation()->decrease_humongous_waste(bytes);
 726   }
 727 }
 728 
 729 void ShenandoahHeap::increase_used(ShenandoahGeneration* generation, size_t bytes) {
 730   generation->increase_used(bytes);
 731   if (!generation->is_global()) {
 732     global_generation()->increase_used(bytes);
 733   }
 734 }
 735 
 736 void ShenandoahHeap::decrease_used(ShenandoahGeneration* generation, size_t bytes) {
 737   generation->decrease_used(bytes);
 738   if (!generation->is_global()) {
 739     global_generation()->decrease_used(bytes);
 740   }
 741 }
 742 
 743 void ShenandoahHeap::notify_mutator_alloc_words(size_t words, size_t waste) {
 744   if (ShenandoahPacing) {
 745     control_thread()->pacing_notify_alloc(words);
 746     if (waste > 0) {
 747       pacer()->claim_for_alloc(waste, true);
 748     }
 749   }
 750 }
 751 
 752 size_t ShenandoahHeap::capacity() const {
 753   return committed();
 754 }
 755 
 756 size_t ShenandoahHeap::max_capacity() const {
 757   return _num_regions * ShenandoahHeapRegion::region_size_bytes();
 758 }
 759 
 760 size_t ShenandoahHeap::soft_max_capacity() const {
 761   size_t v = Atomic::load(&_soft_max_size);
 762   assert(min_capacity() <= v && v <= max_capacity(),
 763          "Should be in bounds: " SIZE_FORMAT " <= " SIZE_FORMAT " <= " SIZE_FORMAT,
 764          min_capacity(), v, max_capacity());
 765   return v;
 766 }
 767 
 768 void ShenandoahHeap::set_soft_max_capacity(size_t v) {
 769   assert(min_capacity() <= v && v <= max_capacity(),
 770          "Should be in bounds: " SIZE_FORMAT " <= " SIZE_FORMAT " <= " SIZE_FORMAT,
 771          min_capacity(), v, max_capacity());
 772   Atomic::store(&_soft_max_size, v);
 773 }
 774 
 775 size_t ShenandoahHeap::min_capacity() const {
 776   return _minimum_size;
 777 }
 778 
 779 size_t ShenandoahHeap::initial_capacity() const {
 780   return _initial_size;
 781 }
 782 
 783 bool ShenandoahHeap::is_in(const void* p) const {
 784   if (is_in_reserved(p)) {
 785     if (is_full_gc_move_in_progress()) {
 786       // Full GC move is running, we do not have a consistent region
 787       // information yet. But we know the pointer is in heap.
 788       return true;
 789     }
 790     // Now check if we point to a live section in active region.
 791     ShenandoahHeapRegion* r = heap_region_containing(p);
 792     return (r->is_active() && p < r->top());
 793   } else {
 794     return false;
 795   }
 796 }
 797 
 798 void ShenandoahHeap::maybe_uncommit(double shrink_before, size_t shrink_until) {
 799   assert (ShenandoahUncommit, "should be enabled");
 800 
 801   // Determine if there is work to do. This avoids taking heap lock if there is
 802   // no work available, avoids spamming logs with superfluous logging messages,
 803   // and minimises the amount of work while locks are taken.
 804 
 805   if (committed() <= shrink_until) return;
 806 
 807   bool has_work = false;
 808   for (size_t i = 0; i < num_regions(); i++) {
 809     ShenandoahHeapRegion* r = get_region(i);
 810     if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
 811       has_work = true;
 812       break;
 813     }
 814   }
 815 
 816   if (has_work) {
 817     static const char* msg = "Concurrent uncommit";
 818     ShenandoahConcurrentPhase gcPhase(msg, ShenandoahPhaseTimings::conc_uncommit, true /* log_heap_usage */);
 819     EventMark em("%s", msg);
 820 
 821     op_uncommit(shrink_before, shrink_until);
 822   }
 823 }
 824 
 825 void ShenandoahHeap::op_uncommit(double shrink_before, size_t shrink_until) {
 826   assert (ShenandoahUncommit, "should be enabled");
 827 
 828   // Application allocates from the beginning of the heap, and GC allocates at
 829   // the end of it. It is more efficient to uncommit from the end, so that applications
 830   // could enjoy the near committed regions. GC allocations are much less frequent,
 831   // and therefore can accept the committing costs.
 832 
 833   size_t count = 0;
 834   for (size_t i = num_regions(); i > 0; i--) { // care about size_t underflow
 835     ShenandoahHeapRegion* r = get_region(i - 1);
 836     if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
 837       ShenandoahHeapLocker locker(lock());
 838       if (r->is_empty_committed()) {
 839         if (committed() < shrink_until + ShenandoahHeapRegion::region_size_bytes()) {
 840           break;
 841         }
 842 
 843         r->make_uncommitted();
 844         count++;
 845       }
 846     }
 847     SpinPause(); // allow allocators to take the lock
 848   }
 849 
 850   if (count > 0) {
 851     notify_heap_changed();
 852   }
 853 }
 854 
 855 bool ShenandoahHeap::check_soft_max_changed() {
 856   size_t new_soft_max = Atomic::load(&SoftMaxHeapSize);
 857   size_t old_soft_max = soft_max_capacity();
 858   if (new_soft_max != old_soft_max) {
 859     new_soft_max = MAX2(min_capacity(), new_soft_max);
 860     new_soft_max = MIN2(max_capacity(), new_soft_max);
 861     if (new_soft_max != old_soft_max) {
 862       log_info(gc)("Soft Max Heap Size: " SIZE_FORMAT "%s -> " SIZE_FORMAT "%s",
 863                    byte_size_in_proper_unit(old_soft_max), proper_unit_for_byte_size(old_soft_max),
 864                    byte_size_in_proper_unit(new_soft_max), proper_unit_for_byte_size(new_soft_max)
 865       );
 866       set_soft_max_capacity(new_soft_max);
 867       return true;
 868     }
 869   }
 870   return false;
 871 }
 872 
 873 void ShenandoahHeap::notify_heap_changed() {
 874   // Update monitoring counters when we took a new region. This amortizes the
 875   // update costs on slow path.
 876   monitoring_support()->notify_heap_changed();
 877   _heap_changed.set();
 878 }
 879 
 880 void ShenandoahHeap::set_forced_counters_update(bool value) {
 881   monitoring_support()->set_forced_counters_update(value);
 882 }
 883 
 884 void ShenandoahHeap::handle_force_counters_update() {
 885   monitoring_support()->handle_force_counters_update();
 886 }
 887 
 888 HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) {
 889   // New object should fit the GCLAB size
 890   size_t min_size = MAX2(size, PLAB::min_size());
 891 
 892   // Figure out size of new GCLAB, looking back at heuristics. Expand aggressively.
 893   size_t new_size = ShenandoahThreadLocalData::gclab_size(thread) * 2;
 894 
 895   new_size = MIN2(new_size, PLAB::max_size());
 896   new_size = MAX2(new_size, PLAB::min_size());
 897 
 898   // Record new heuristic value even if we take any shortcut. This captures
 899   // the case when moderately-sized objects always take a shortcut. At some point,
 900   // heuristics should catch up with them.
 901   log_debug(gc, free)("Set new GCLAB size: " SIZE_FORMAT, new_size);
 902   ShenandoahThreadLocalData::set_gclab_size(thread, new_size);
 903 
 904   if (new_size < size) {
 905     // New size still does not fit the object. Fall back to shared allocation.
 906     // This avoids retiring perfectly good GCLABs, when we encounter a large object.
 907     log_debug(gc, free)("New gclab size (" SIZE_FORMAT ") is too small for " SIZE_FORMAT, new_size, size);
 908     return nullptr;
 909   }
 910 
 911   // Retire current GCLAB, and allocate a new one.
 912   PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
 913   gclab->retire();
 914 
 915   size_t actual_size = 0;
 916   HeapWord* gclab_buf = allocate_new_gclab(min_size, new_size, &actual_size);
 917   if (gclab_buf == nullptr) {
 918     return nullptr;
 919   }
 920 
 921   assert (size <= actual_size, "allocation should fit");
 922 
 923   // ...and clear or zap just allocated TLAB, if needed.
 924   if (ZeroTLAB) {
 925     Copy::zero_to_words(gclab_buf, actual_size);
 926   } else if (ZapTLAB) {
 927     // Skip mangling the space corresponding to the object header to
 928     // ensure that the returned space is not considered parsable by
 929     // any concurrent GC thread.
 930     size_t hdr_size = oopDesc::header_size();
 931     Copy::fill_to_words(gclab_buf + hdr_size, actual_size - hdr_size, badHeapWordVal);
 932   }
 933   gclab->set_buf(gclab_buf, actual_size);
 934   return gclab->allocate(size);
 935 }
 936 
 937 // Called from stubs in JIT code or interpreter
 938 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t min_size,
 939                                             size_t requested_size,
 940                                             size_t* actual_size) {
 941   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_tlab(min_size, requested_size);
 942   HeapWord* res = allocate_memory(req);
 943   if (res != nullptr) {
 944     *actual_size = req.actual_size();
 945   } else {
 946     *actual_size = 0;
 947   }
 948   return res;
 949 }
 950 
 951 HeapWord* ShenandoahHeap::allocate_new_gclab(size_t min_size,
 952                                              size_t word_size,
 953                                              size_t* actual_size) {
 954   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_gclab(min_size, word_size);
 955   HeapWord* res = allocate_memory(req);
 956   if (res != nullptr) {
 957     *actual_size = req.actual_size();
 958   } else {
 959     *actual_size = 0;
 960   }
 961   return res;
 962 }
 963 
 964 HeapWord* ShenandoahHeap::allocate_memory(ShenandoahAllocRequest& req) {
 965   intptr_t pacer_epoch = 0;
 966   bool in_new_region = false;
 967   HeapWord* result = nullptr;
 968 
 969   if (req.is_mutator_alloc()) {
 970     if (ShenandoahPacing) {
 971       pacer()->pace_for_alloc(req.size());
 972       pacer_epoch = pacer()->epoch();
 973     }
 974 
 975     if (!ShenandoahAllocFailureALot || !should_inject_alloc_failure()) {
 976       result = allocate_memory_under_lock(req, in_new_region);
 977     }
 978 
 979     // Check that gc overhead is not exceeded.
 980     //
 981     // Shenandoah will grind along for quite a while allocating one
 982     // object at a time using shared (non-tlab) allocations. This check
 983     // is testing that the GC overhead limit has not been exceeded.
 984     // This will notify the collector to start a cycle, but will raise
 985     // an OOME to the mutator if the last Full GCs have not made progress.
 986     // gc_no_progress_count is incremented following each degen or full GC that fails to achieve is_good_progress().
 987     if (result == nullptr && !req.is_lab_alloc() && get_gc_no_progress_count() > ShenandoahNoProgressThreshold) {
 988       control_thread()->handle_alloc_failure(req, false);
 989       req.set_actual_size(0);
 990       return nullptr;
 991     }
 992 
 993     if (result == nullptr) {
 994       // Block until control thread reacted, then retry allocation.
 995       //
 996       // It might happen that one of the threads requesting allocation would unblock
 997       // way later after GC happened, only to fail the second allocation, because
 998       // other threads have already depleted the free storage. In this case, a better
 999       // strategy is to try again, until at least one full GC has completed.
1000       //
1001       // Stop retrying and return nullptr to cause OOMError exception if our allocation failed even after:
1002       //   a) We experienced a GC that had good progress, or
1003       //   b) We experienced at least one Full GC (whether or not it had good progress)
1004 
1005       size_t original_count = shenandoah_policy()->full_gc_count();
1006       while ((result == nullptr) && (original_count == shenandoah_policy()->full_gc_count())) {
1007         control_thread()->handle_alloc_failure(req, true);
1008         result = allocate_memory_under_lock(req, in_new_region);
1009       }
1010       if (result != nullptr) {
1011         // If our allocation request has been satisifed after it initially failed, we count this as good gc progress
1012         notify_gc_progress();
1013       }
1014       if (log_develop_is_enabled(Debug, gc, alloc)) {
1015         ResourceMark rm;
1016         log_debug(gc, alloc)("Thread: %s, Result: " PTR_FORMAT ", Request: %s, Size: " SIZE_FORMAT
1017                              ", Original: " SIZE_FORMAT ", Latest: " SIZE_FORMAT,
1018                              Thread::current()->name(), p2i(result), req.type_string(), req.size(),
1019                              original_count, get_gc_no_progress_count());
1020       }
1021     }
1022   } else {
1023     assert(req.is_gc_alloc(), "Can only accept GC allocs here");
1024     result = allocate_memory_under_lock(req, in_new_region);
1025     // Do not call handle_alloc_failure() here, because we cannot block.
1026     // The allocation failure would be handled by the LRB slowpath with handle_alloc_failure_evac().
1027   }
1028 
1029   if (in_new_region) {
1030     notify_heap_changed();
1031   }
1032 
1033   if (result == nullptr) {
1034     req.set_actual_size(0);
1035   }
1036 
1037   // This is called regardless of the outcome of the allocation to account
1038   // for any waste created by retiring regions with this request.
1039   increase_used(req);
1040 
1041   if (result != nullptr) {
1042     size_t requested = req.size();
1043     size_t actual = req.actual_size();
1044 
1045     assert (req.is_lab_alloc() || (requested == actual),
1046             "Only LAB allocations are elastic: %s, requested = " SIZE_FORMAT ", actual = " SIZE_FORMAT,
1047             ShenandoahAllocRequest::alloc_type_to_string(req.type()), requested, actual);
1048 
1049     if (req.is_mutator_alloc()) {
1050       // If we requested more than we were granted, give the rest back to pacer.
1051       // This only matters if we are in the same pacing epoch: do not try to unpace
1052       // over the budget for the other phase.
1053       if (ShenandoahPacing && (pacer_epoch > 0) && (requested > actual)) {
1054         pacer()->unpace_for_alloc(pacer_epoch, requested - actual);
1055       }
1056     }
1057   }
1058 
1059   return result;
1060 }
1061 
1062 HeapWord* ShenandoahHeap::allocate_memory_under_lock(ShenandoahAllocRequest& req, bool& in_new_region) {
1063   // If we are dealing with mutator allocation, then we may need to block for safepoint.
1064   // We cannot block for safepoint for GC allocations, because there is a high chance
1065   // we are already running at safepoint or from stack watermark machinery, and we cannot
1066   // block again.
1067   ShenandoahHeapLocker locker(lock(), req.is_mutator_alloc());
1068 
1069   // Make sure the old generation has room for either evacuations or promotions before trying to allocate.
1070   if (req.is_old() && !old_generation()->can_allocate(req)) {
1071     return nullptr;
1072   }
1073 
1074   // If TLAB request size is greater than available, allocate() will attempt to downsize request to fit within available
1075   // memory.
1076   HeapWord* result = _free_set->allocate(req, in_new_region);
1077 
1078   // Record the plab configuration for this result and register the object.
1079   if (result != nullptr && req.is_old()) {
1080     old_generation()->configure_plab_for_current_thread(req);
1081     if (req.type() == ShenandoahAllocRequest::_alloc_shared_gc) {
1082       // Register the newly allocated object while we're holding the global lock since there's no synchronization
1083       // built in to the implementation of register_object().  There are potential races when multiple independent
1084       // threads are allocating objects, some of which might span the same card region.  For example, consider
1085       // a card table's memory region within which three objects are being allocated by three different threads:
1086       //
1087       // objects being "concurrently" allocated:
1088       //    [-----a------][-----b-----][--------------c------------------]
1089       //            [---- card table memory range --------------]
1090       //
1091       // Before any objects are allocated, this card's memory range holds no objects.  Note that allocation of object a
1092       // wants to set the starts-object, first-start, and last-start attributes of the preceding card region.
1093       // Allocation of object b wants to set the starts-object, first-start, and last-start attributes of this card region.
1094       // Allocation of object c also wants to set the starts-object, first-start, and last-start attributes of this
1095       // card region.
1096       //
1097       // The thread allocating b and the thread allocating c can "race" in various ways, resulting in confusion, such as
1098       // last-start representing object b while first-start represents object c.  This is why we need to require all
1099       // register_object() invocations to be "mutually exclusive" with respect to each card's memory range.
1100       old_generation()->card_scan()->register_object(result);
1101     }
1102   }
1103 
1104   return result;
1105 }
1106 
1107 HeapWord* ShenandoahHeap::mem_allocate(size_t size,
1108                                         bool*  gc_overhead_limit_was_exceeded) {
1109   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared(size);
1110   return allocate_memory(req);
1111 }
1112 
1113 MetaWord* ShenandoahHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
1114                                                              size_t size,
1115                                                              Metaspace::MetadataType mdtype) {
1116   MetaWord* result;
1117 
1118   // Inform metaspace OOM to GC heuristics if class unloading is possible.
1119   ShenandoahHeuristics* h = global_generation()->heuristics();
1120   if (h->can_unload_classes()) {
1121     h->record_metaspace_oom();
1122   }
1123 
1124   // Expand and retry allocation
1125   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
1126   if (result != nullptr) {
1127     return result;
1128   }
1129 
1130   // Start full GC
1131   collect(GCCause::_metadata_GC_clear_soft_refs);
1132 
1133   // Retry allocation
1134   result = loader_data->metaspace_non_null()->allocate(size, mdtype);
1135   if (result != nullptr) {
1136     return result;
1137   }
1138 
1139   // Expand and retry allocation
1140   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
1141   if (result != nullptr) {
1142     return result;
1143   }
1144 
1145   // Out of memory
1146   return nullptr;
1147 }
1148 
1149 class ShenandoahConcurrentEvacuateRegionObjectClosure : public ObjectClosure {
1150 private:
1151   ShenandoahHeap* const _heap;
1152   Thread* const _thread;
1153 public:
1154   ShenandoahConcurrentEvacuateRegionObjectClosure(ShenandoahHeap* heap) :
1155     _heap(heap), _thread(Thread::current()) {}
1156 
1157   void do_object(oop p) {
1158     shenandoah_assert_marked(nullptr, p);
1159     if (!p->is_forwarded()) {
1160       _heap->evacuate_object(p, _thread);
1161     }
1162   }
1163 };
1164 
1165 class ShenandoahEvacuationTask : public WorkerTask {
1166 private:
1167   ShenandoahHeap* const _sh;
1168   ShenandoahCollectionSet* const _cs;
1169   bool _concurrent;
1170 public:
1171   ShenandoahEvacuationTask(ShenandoahHeap* sh,
1172                            ShenandoahCollectionSet* cs,
1173                            bool concurrent) :
1174     WorkerTask("Shenandoah Evacuation"),
1175     _sh(sh),
1176     _cs(cs),
1177     _concurrent(concurrent)
1178   {}
1179 
1180   void work(uint worker_id) {
1181     if (_concurrent) {
1182       ShenandoahConcurrentWorkerSession worker_session(worker_id);
1183       ShenandoahSuspendibleThreadSetJoiner stsj;
1184       ShenandoahEvacOOMScope oom_evac_scope;
1185       do_work();
1186     } else {
1187       ShenandoahParallelWorkerSession worker_session(worker_id);
1188       ShenandoahEvacOOMScope oom_evac_scope;
1189       do_work();
1190     }
1191   }
1192 
1193 private:
1194   void do_work() {
1195     ShenandoahConcurrentEvacuateRegionObjectClosure cl(_sh);
1196     ShenandoahHeapRegion* r;
1197     while ((r =_cs->claim_next()) != nullptr) {
1198       assert(r->has_live(), "Region " SIZE_FORMAT " should have been reclaimed early", r->index());
1199       _sh->marked_object_iterate(r, &cl);
1200 
1201       if (ShenandoahPacing) {
1202         _sh->pacer()->report_evac(r->used() >> LogHeapWordSize);
1203       }
1204 
1205       if (_sh->check_cancelled_gc_and_yield(_concurrent)) {
1206         break;
1207       }
1208     }
1209   }
1210 };
1211 
1212 void ShenandoahHeap::evacuate_collection_set(bool concurrent) {
1213   if (mode()->is_generational()) {
1214     ShenandoahRegionIterator regions;
1215     ShenandoahGenerationalEvacuationTask task(ShenandoahGenerationalHeap::heap(), &regions, concurrent);
1216     workers()->run_task(&task);
1217   } else {
1218     ShenandoahEvacuationTask task(this, _collection_set, concurrent);
1219     workers()->run_task(&task);
1220   }
1221 }
1222 
1223 oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) {
1224   assert(thread == Thread::current(), "Expected thread parameter to be current thread.");
1225   if (ShenandoahThreadLocalData::is_oom_during_evac(thread)) {
1226     // This thread went through the OOM during evac protocol. It is safe to return
1227     // the forward pointer. It must not attempt to evacuate any other objects.
1228     return ShenandoahBarrierSet::resolve_forwarded(p);
1229   }
1230 
1231   assert(ShenandoahThreadLocalData::is_evac_allowed(thread), "must be enclosed in oom-evac scope");
1232 
1233   ShenandoahHeapRegion* r = heap_region_containing(p);
1234   assert(!r->is_humongous(), "never evacuate humongous objects");
1235 
1236   ShenandoahAffiliation target_gen = r->affiliation();
1237   return try_evacuate_object(p, thread, r, target_gen);
1238 }
1239 
1240 oop ShenandoahHeap::try_evacuate_object(oop p, Thread* thread, ShenandoahHeapRegion* from_region,
1241                                                ShenandoahAffiliation target_gen) {
1242   assert(target_gen == YOUNG_GENERATION, "Only expect evacuations to young in this mode");
1243   assert(from_region->is_young(), "Only expect evacuations from young in this mode");
1244   bool alloc_from_lab = true;
1245   HeapWord* copy = nullptr;
1246   size_t size = p->size();
1247 
1248 #ifdef ASSERT
1249   if (ShenandoahOOMDuringEvacALot &&
1250       (os::random() & 1) == 0) { // Simulate OOM every ~2nd slow-path call
1251     copy = nullptr;
1252   } else {
1253 #endif
1254     if (UseTLAB) {
1255       copy = allocate_from_gclab(thread, size);
1256       if ((copy == nullptr) && (size < ShenandoahThreadLocalData::gclab_size(thread))) {
1257         // GCLAB allocation failed because we are bumping up against the limit on young evacuation reserve.  Try resetting
1258         // the desired GCLAB size and retry GCLAB allocation to avoid cascading of shared memory allocations.
1259         ShenandoahThreadLocalData::set_gclab_size(thread, PLAB::min_size());
1260         copy = allocate_from_gclab(thread, size);
1261         // If we still get nullptr, we'll try a shared allocation below.
1262       }
1263     }
1264 
1265     if (copy == nullptr) {
1266       // If we failed to allocate in LAB, we'll try a shared allocation.
1267       ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared_gc(size, target_gen);
1268       copy = allocate_memory(req);
1269       alloc_from_lab = false;
1270     }
1271 #ifdef ASSERT
1272   }
1273 #endif
1274 
1275   if (copy == nullptr) {
1276     control_thread()->handle_alloc_failure_evac(size);
1277 
1278     _oom_evac_handler.handle_out_of_memory_during_evacuation();
1279 
1280     return ShenandoahBarrierSet::resolve_forwarded(p);
1281   }
1282 
1283   // Copy the object:
1284   _evac_tracker->begin_evacuation(thread, size * HeapWordSize);
1285   Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(p), copy, size);
1286 
1287   // Try to install the new forwarding pointer.
1288   oop copy_val = cast_to_oop(copy);
1289   oop result = ShenandoahForwarding::try_update_forwardee(p, copy_val);
1290   if (result == copy_val) {
1291     // Successfully evacuated. Our copy is now the public one!
1292     ContinuationGCSupport::relativize_stack_chunk(copy_val);
1293     _evac_tracker->end_evacuation(thread, size * HeapWordSize);
1294     shenandoah_assert_correct(nullptr, copy_val);
1295     return copy_val;
1296   }  else {
1297     // Failed to evacuate. We need to deal with the object that is left behind. Since this
1298     // new allocation is certainly after TAMS, it will be considered live in the next cycle.
1299     // But if it happens to contain references to evacuated regions, those references would
1300     // not get updated for this stale copy during this cycle, and we will crash while scanning
1301     // it the next cycle.
1302     if (alloc_from_lab) {
1303       // For LAB allocations, it is enough to rollback the allocation ptr. Either the next
1304       // object will overwrite this stale copy, or the filler object on LAB retirement will
1305       // do this.
1306       ShenandoahThreadLocalData::gclab(thread)->undo_allocation(copy, size);
1307     } else {
1308       // For non-LAB allocations, we have no way to retract the allocation, and
1309       // have to explicitly overwrite the copy with the filler object. With that overwrite,
1310       // we have to keep the fwdptr initialized and pointing to our (stale) copy.
1311       assert(size >= ShenandoahHeap::min_fill_size(), "previously allocated object known to be larger than min_size");
1312       fill_with_object(copy, size);
1313       shenandoah_assert_correct(nullptr, copy_val);
1314       // For non-LAB allocations, the object has already been registered
1315     }
1316     shenandoah_assert_correct(nullptr, result);
1317     return result;
1318   }
1319 }
1320 
1321 void ShenandoahHeap::trash_cset_regions() {
1322   ShenandoahHeapLocker locker(lock());
1323 
1324   ShenandoahCollectionSet* set = collection_set();
1325   ShenandoahHeapRegion* r;
1326   set->clear_current_index();
1327   while ((r = set->next()) != nullptr) {
1328     r->make_trash();
1329   }
1330   collection_set()->clear();
1331 }
1332 
1333 void ShenandoahHeap::print_heap_regions_on(outputStream* st) const {
1334   st->print_cr("Heap Regions:");
1335   st->print_cr("Region state: EU=empty-uncommitted, EC=empty-committed, R=regular, H=humongous start, HP=pinned humongous start");
1336   st->print_cr("              HC=humongous continuation, CS=collection set, TR=trash, P=pinned, CSP=pinned collection set");
1337   st->print_cr("BTE=bottom/top/end, TAMS=top-at-mark-start");
1338   st->print_cr("UWM=update watermark, U=used");
1339   st->print_cr("T=TLAB allocs, G=GCLAB allocs");
1340   st->print_cr("S=shared allocs, L=live data");
1341   st->print_cr("CP=critical pins");
1342 
1343   for (size_t i = 0; i < num_regions(); i++) {
1344     get_region(i)->print_on(st);
1345   }
1346 }
1347 
1348 size_t ShenandoahHeap::trash_humongous_region_at(ShenandoahHeapRegion* start) {
1349   assert(start->is_humongous_start(), "reclaim regions starting with the first one");
1350 
1351   oop humongous_obj = cast_to_oop(start->bottom());
1352   size_t size = humongous_obj->size();
1353   size_t required_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize);
1354   size_t index = start->index() + required_regions - 1;
1355 
1356   assert(!start->has_live(), "liveness must be zero");
1357 
1358   for(size_t i = 0; i < required_regions; i++) {
1359     // Reclaim from tail. Otherwise, assertion fails when printing region to trace log,
1360     // as it expects that every region belongs to a humongous region starting with a humongous start region.
1361     ShenandoahHeapRegion* region = get_region(index --);
1362 
1363     assert(region->is_humongous(), "expect correct humongous start or continuation");
1364     assert(!region->is_cset(), "Humongous region should not be in collection set");
1365 
1366     region->make_trash_immediate();
1367   }
1368   return required_regions;
1369 }
1370 
1371 class ShenandoahCheckCleanGCLABClosure : public ThreadClosure {
1372 public:
1373   ShenandoahCheckCleanGCLABClosure() {}
1374   void do_thread(Thread* thread) {
1375     PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1376     assert(gclab != nullptr, "GCLAB should be initialized for %s", thread->name());
1377     assert(gclab->words_remaining() == 0, "GCLAB should not need retirement");
1378 
1379     if (ShenandoahHeap::heap()->mode()->is_generational()) {
1380       PLAB* plab = ShenandoahThreadLocalData::plab(thread);
1381       assert(plab != nullptr, "PLAB should be initialized for %s", thread->name());
1382       assert(plab->words_remaining() == 0, "PLAB should not need retirement");
1383     }
1384   }
1385 };
1386 
1387 class ShenandoahRetireGCLABClosure : public ThreadClosure {
1388 private:
1389   bool const _resize;
1390 public:
1391   ShenandoahRetireGCLABClosure(bool resize) : _resize(resize) {}
1392   void do_thread(Thread* thread) {
1393     PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1394     assert(gclab != nullptr, "GCLAB should be initialized for %s", thread->name());
1395     gclab->retire();
1396     if (_resize && ShenandoahThreadLocalData::gclab_size(thread) > 0) {
1397       ShenandoahThreadLocalData::set_gclab_size(thread, 0);
1398     }
1399 
1400     if (ShenandoahHeap::heap()->mode()->is_generational()) {
1401       PLAB* plab = ShenandoahThreadLocalData::plab(thread);
1402       assert(plab != nullptr, "PLAB should be initialized for %s", thread->name());
1403 
1404       // There are two reasons to retire all plabs between old-gen evacuation passes.
1405       //  1. We need to make the plab memory parsable by remembered-set scanning.
1406       //  2. We need to establish a trustworthy UpdateWaterMark value within each old-gen heap region
1407       ShenandoahGenerationalHeap::heap()->retire_plab(plab, thread);
1408       if (_resize && ShenandoahThreadLocalData::plab_size(thread) > 0) {
1409         ShenandoahThreadLocalData::set_plab_size(thread, 0);
1410       }
1411     }
1412   }
1413 };
1414 
1415 void ShenandoahHeap::labs_make_parsable() {
1416   assert(UseTLAB, "Only call with UseTLAB");
1417 
1418   ShenandoahRetireGCLABClosure cl(false);
1419 
1420   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1421     ThreadLocalAllocBuffer& tlab = t->tlab();
1422     tlab.make_parsable();
1423     cl.do_thread(t);
1424   }
1425 
1426   workers()->threads_do(&cl);
1427 }
1428 
1429 void ShenandoahHeap::tlabs_retire(bool resize) {
1430   assert(UseTLAB, "Only call with UseTLAB");
1431   assert(!resize || ResizeTLAB, "Only call for resize when ResizeTLAB is enabled");
1432 
1433   ThreadLocalAllocStats stats;
1434 
1435   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1436     ThreadLocalAllocBuffer& tlab = t->tlab();
1437     tlab.retire(&stats);
1438     if (resize) {
1439       tlab.resize();
1440     }
1441   }
1442 
1443   stats.publish();
1444 
1445 #ifdef ASSERT
1446   ShenandoahCheckCleanGCLABClosure cl;
1447   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1448     cl.do_thread(t);
1449   }
1450   workers()->threads_do(&cl);
1451 #endif
1452 }
1453 
1454 void ShenandoahHeap::gclabs_retire(bool resize) {
1455   assert(UseTLAB, "Only call with UseTLAB");
1456   assert(!resize || ResizeTLAB, "Only call for resize when ResizeTLAB is enabled");
1457 
1458   ShenandoahRetireGCLABClosure cl(resize);
1459   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1460     cl.do_thread(t);
1461   }
1462   workers()->threads_do(&cl);
1463 
1464   if (safepoint_workers() != nullptr) {
1465     safepoint_workers()->threads_do(&cl);
1466   }
1467 }
1468 
1469 // Returns size in bytes
1470 size_t ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const {
1471   // Return the max allowed size, and let the allocation path
1472   // figure out the safe size for current allocation.
1473   return ShenandoahHeapRegion::max_tlab_size_bytes();
1474 }
1475 
1476 size_t ShenandoahHeap::max_tlab_size() const {
1477   // Returns size in words
1478   return ShenandoahHeapRegion::max_tlab_size_words();
1479 }
1480 
1481 void ShenandoahHeap::collect(GCCause::Cause cause) {
1482   control_thread()->request_gc(cause);
1483 }
1484 
1485 void ShenandoahHeap::do_full_collection(bool clear_all_soft_refs) {
1486   //assert(false, "Shouldn't need to do full collections");
1487 }
1488 
1489 HeapWord* ShenandoahHeap::block_start(const void* addr) const {
1490   ShenandoahHeapRegion* r = heap_region_containing(addr);
1491   if (r != nullptr) {
1492     return r->block_start(addr);
1493   }
1494   return nullptr;
1495 }
1496 
1497 bool ShenandoahHeap::block_is_obj(const HeapWord* addr) const {
1498   ShenandoahHeapRegion* r = heap_region_containing(addr);
1499   return r->block_is_obj(addr);
1500 }
1501 
1502 bool ShenandoahHeap::print_location(outputStream* st, void* addr) const {
1503   return BlockLocationPrinter<ShenandoahHeap>::print_location(st, addr);
1504 }
1505 
1506 void ShenandoahHeap::prepare_for_verify() {
1507   if (SafepointSynchronize::is_at_safepoint() && UseTLAB) {
1508     labs_make_parsable();
1509   }
1510 }
1511 
1512 void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const {
1513   if (_shenandoah_policy->is_at_shutdown()) {
1514     return;
1515   }
1516 
1517   if (_control_thread != nullptr) {
1518     tcl->do_thread(_control_thread);
1519   }
1520 
1521   workers()->threads_do(tcl);
1522   if (_safepoint_workers != nullptr) {
1523     _safepoint_workers->threads_do(tcl);
1524   }
1525 }
1526 
1527 void ShenandoahHeap::print_tracing_info() const {
1528   LogTarget(Info, gc, stats) lt;
1529   if (lt.is_enabled()) {
1530     ResourceMark rm;
1531     LogStream ls(lt);
1532 
1533     phase_timings()->print_global_on(&ls);
1534 
1535     ls.cr();
1536     ls.cr();
1537 
1538     shenandoah_policy()->print_gc_stats(&ls);
1539 
1540     ls.cr();
1541 
1542     evac_tracker()->print_global_on(&ls);
1543 
1544     ls.cr();
1545     ls.cr();
1546   }
1547 }
1548 
1549 void ShenandoahHeap::set_gc_generation(ShenandoahGeneration* generation) {
1550   shenandoah_assert_control_or_vm_thread_at_safepoint();
1551   _gc_generation = generation;
1552 }
1553 
1554 // Active generation may only be set by the VM thread at a safepoint.
1555 void ShenandoahHeap::set_active_generation() {
1556   assert(Thread::current()->is_VM_thread(), "Only the VM Thread");
1557   assert(SafepointSynchronize::is_at_safepoint(), "Only at a safepoint!");
1558   assert(_gc_generation != nullptr, "Will set _active_generation to nullptr");
1559   _active_generation = _gc_generation;
1560 }
1561 
1562 void ShenandoahHeap::on_cycle_start(GCCause::Cause cause, ShenandoahGeneration* generation) {
1563   shenandoah_policy()->record_collection_cause(cause);
1564 
1565   assert(gc_cause()  == GCCause::_no_gc, "Over-writing cause");
1566   assert(_gc_generation == nullptr, "Over-writing _gc_generation");
1567 
1568   set_gc_cause(cause);
1569   set_gc_generation(generation);
1570 
1571   generation->heuristics()->record_cycle_start();
1572 }
1573 
1574 void ShenandoahHeap::on_cycle_end(ShenandoahGeneration* generation) {
1575   assert(gc_cause() != GCCause::_no_gc, "cause wasn't set");
1576   assert(_gc_generation != nullptr, "_gc_generation wasn't set");
1577 
1578   generation->heuristics()->record_cycle_end();
1579   if (mode()->is_generational() && generation->is_global()) {
1580     // If we just completed a GLOBAL GC, claim credit for completion of young-gen and old-gen GC as well
1581     young_generation()->heuristics()->record_cycle_end();
1582     old_generation()->heuristics()->record_cycle_end();
1583   }
1584 
1585   set_gc_generation(nullptr);
1586   set_gc_cause(GCCause::_no_gc);
1587 }
1588 
1589 void ShenandoahHeap::verify(VerifyOption vo) {
1590   if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
1591     if (ShenandoahVerify) {
1592       verifier()->verify_generic(vo);
1593     } else {
1594       // TODO: Consider allocating verification bitmaps on demand,
1595       // and turn this on unconditionally.
1596     }
1597   }
1598 }
1599 size_t ShenandoahHeap::tlab_capacity(Thread *thr) const {
1600   return _free_set->capacity();
1601 }
1602 
1603 class ObjectIterateScanRootClosure : public BasicOopIterateClosure {
1604 private:
1605   MarkBitMap* _bitmap;
1606   ShenandoahScanObjectStack* _oop_stack;
1607   ShenandoahHeap* const _heap;
1608   ShenandoahMarkingContext* const _marking_context;
1609 
1610   template <class T>
1611   void do_oop_work(T* p) {
1612     T o = RawAccess<>::oop_load(p);
1613     if (!CompressedOops::is_null(o)) {
1614       oop obj = CompressedOops::decode_not_null(o);
1615       if (_heap->is_concurrent_weak_root_in_progress() && !_marking_context->is_marked(obj)) {
1616         // There may be dead oops in weak roots in concurrent root phase, do not touch them.
1617         return;
1618       }
1619       obj = ShenandoahBarrierSet::barrier_set()->load_reference_barrier(obj);
1620 
1621       assert(oopDesc::is_oop(obj), "must be a valid oop");
1622       if (!_bitmap->is_marked(obj)) {
1623         _bitmap->mark(obj);
1624         _oop_stack->push(obj);
1625       }
1626     }
1627   }
1628 public:
1629   ObjectIterateScanRootClosure(MarkBitMap* bitmap, ShenandoahScanObjectStack* oop_stack) :
1630     _bitmap(bitmap), _oop_stack(oop_stack), _heap(ShenandoahHeap::heap()),
1631     _marking_context(_heap->marking_context()) {}
1632   void do_oop(oop* p)       { do_oop_work(p); }
1633   void do_oop(narrowOop* p) { do_oop_work(p); }
1634 };
1635 
1636 /*
1637  * This is public API, used in preparation of object_iterate().
1638  * Since we don't do linear scan of heap in object_iterate() (see comment below), we don't
1639  * need to make the heap parsable. For Shenandoah-internal linear heap scans that we can
1640  * control, we call SH::tlabs_retire, SH::gclabs_retire.
1641  */
1642 void ShenandoahHeap::ensure_parsability(bool retire_tlabs) {
1643   // No-op.
1644 }
1645 
1646 /*
1647  * Iterates objects in the heap. This is public API, used for, e.g., heap dumping.
1648  *
1649  * We cannot safely iterate objects by doing a linear scan at random points in time. Linear
1650  * scanning needs to deal with dead objects, which may have dead Klass* pointers (e.g.
1651  * calling oopDesc::size() would crash) or dangling reference fields (crashes) etc. Linear
1652  * scanning therefore depends on having a valid marking bitmap to support it. However, we only
1653  * have a valid marking bitmap after successful marking. In particular, we *don't* have a valid
1654  * marking bitmap during marking, after aborted marking or during/after cleanup (when we just
1655  * wiped the bitmap in preparation for next marking).
1656  *
1657  * For all those reasons, we implement object iteration as a single marking traversal, reporting
1658  * objects as we mark+traverse through the heap, starting from GC roots. JVMTI IterateThroughHeap
1659  * is allowed to report dead objects, but is not required to do so.
1660  */
1661 void ShenandoahHeap::object_iterate(ObjectClosure* cl) {
1662   // Reset bitmap
1663   if (!prepare_aux_bitmap_for_iteration())
1664     return;
1665 
1666   ShenandoahScanObjectStack oop_stack;
1667   ObjectIterateScanRootClosure oops(&_aux_bit_map, &oop_stack);
1668   // Seed the stack with root scan
1669   scan_roots_for_iteration(&oop_stack, &oops);
1670 
1671   // Work through the oop stack to traverse heap
1672   while (! oop_stack.is_empty()) {
1673     oop obj = oop_stack.pop();
1674     assert(oopDesc::is_oop(obj), "must be a valid oop");
1675     cl->do_object(obj);
1676     obj->oop_iterate(&oops);
1677   }
1678 
1679   assert(oop_stack.is_empty(), "should be empty");
1680   // Reclaim bitmap
1681   reclaim_aux_bitmap_for_iteration();
1682 }
1683 
1684 bool ShenandoahHeap::prepare_aux_bitmap_for_iteration() {
1685   assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1686 
1687   if (!_aux_bitmap_region_special && !os::commit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size(), false)) {
1688     log_warning(gc)("Could not commit native memory for auxiliary marking bitmap for heap iteration");
1689     return false;
1690   }
1691   // Reset bitmap
1692   _aux_bit_map.clear();
1693   return true;
1694 }
1695 
1696 void ShenandoahHeap::scan_roots_for_iteration(ShenandoahScanObjectStack* oop_stack, ObjectIterateScanRootClosure* oops) {
1697   // Process GC roots according to current GC cycle
1698   // This populates the work stack with initial objects
1699   // It is important to relinquish the associated locks before diving
1700   // into heap dumper
1701   uint n_workers = safepoint_workers() != nullptr ? safepoint_workers()->active_workers() : 1;
1702   ShenandoahHeapIterationRootScanner rp(n_workers);
1703   rp.roots_do(oops);
1704 }
1705 
1706 void ShenandoahHeap::reclaim_aux_bitmap_for_iteration() {
1707   if (!_aux_bitmap_region_special && !os::uncommit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size())) {
1708     log_warning(gc)("Could not uncommit native memory for auxiliary marking bitmap for heap iteration");
1709   }
1710 }
1711 
1712 // Closure for parallelly iterate objects
1713 class ShenandoahObjectIterateParScanClosure : public BasicOopIterateClosure {
1714 private:
1715   MarkBitMap* _bitmap;
1716   ShenandoahObjToScanQueue* _queue;
1717   ShenandoahHeap* const _heap;
1718   ShenandoahMarkingContext* const _marking_context;
1719 
1720   template <class T>
1721   void do_oop_work(T* p) {
1722     T o = RawAccess<>::oop_load(p);
1723     if (!CompressedOops::is_null(o)) {
1724       oop obj = CompressedOops::decode_not_null(o);
1725       if (_heap->is_concurrent_weak_root_in_progress() && !_marking_context->is_marked(obj)) {
1726         // There may be dead oops in weak roots in concurrent root phase, do not touch them.
1727         return;
1728       }
1729       obj = ShenandoahBarrierSet::barrier_set()->load_reference_barrier(obj);
1730 
1731       assert(oopDesc::is_oop(obj), "Must be a valid oop");
1732       if (_bitmap->par_mark(obj)) {
1733         _queue->push(ShenandoahMarkTask(obj));
1734       }
1735     }
1736   }
1737 public:
1738   ShenandoahObjectIterateParScanClosure(MarkBitMap* bitmap, ShenandoahObjToScanQueue* q) :
1739     _bitmap(bitmap), _queue(q), _heap(ShenandoahHeap::heap()),
1740     _marking_context(_heap->marking_context()) {}
1741   void do_oop(oop* p)       { do_oop_work(p); }
1742   void do_oop(narrowOop* p) { do_oop_work(p); }
1743 };
1744 
1745 // Object iterator for parallel heap iteraion.
1746 // The root scanning phase happenes in construction as a preparation of
1747 // parallel marking queues.
1748 // Every worker processes it's own marking queue. work-stealing is used
1749 // to balance workload.
1750 class ShenandoahParallelObjectIterator : public ParallelObjectIteratorImpl {
1751 private:
1752   uint                         _num_workers;
1753   bool                         _init_ready;
1754   MarkBitMap*                  _aux_bit_map;
1755   ShenandoahHeap*              _heap;
1756   ShenandoahScanObjectStack    _roots_stack; // global roots stack
1757   ShenandoahObjToScanQueueSet* _task_queues;
1758 public:
1759   ShenandoahParallelObjectIterator(uint num_workers, MarkBitMap* bitmap) :
1760         _num_workers(num_workers),
1761         _init_ready(false),
1762         _aux_bit_map(bitmap),
1763         _heap(ShenandoahHeap::heap()) {
1764     // Initialize bitmap
1765     _init_ready = _heap->prepare_aux_bitmap_for_iteration();
1766     if (!_init_ready) {
1767       return;
1768     }
1769 
1770     ObjectIterateScanRootClosure oops(_aux_bit_map, &_roots_stack);
1771     _heap->scan_roots_for_iteration(&_roots_stack, &oops);
1772 
1773     _init_ready = prepare_worker_queues();
1774   }
1775 
1776   ~ShenandoahParallelObjectIterator() {
1777     // Reclaim bitmap
1778     _heap->reclaim_aux_bitmap_for_iteration();
1779     // Reclaim queue for workers
1780     if (_task_queues!= nullptr) {
1781       for (uint i = 0; i < _num_workers; ++i) {
1782         ShenandoahObjToScanQueue* q = _task_queues->queue(i);
1783         if (q != nullptr) {
1784           delete q;
1785           _task_queues->register_queue(i, nullptr);
1786         }
1787       }
1788       delete _task_queues;
1789       _task_queues = nullptr;
1790     }
1791   }
1792 
1793   virtual void object_iterate(ObjectClosure* cl, uint worker_id) {
1794     if (_init_ready) {
1795       object_iterate_parallel(cl, worker_id, _task_queues);
1796     }
1797   }
1798 
1799 private:
1800   // Divide global root_stack into worker queues
1801   bool prepare_worker_queues() {
1802     _task_queues = new ShenandoahObjToScanQueueSet((int) _num_workers);
1803     // Initialize queues for every workers
1804     for (uint i = 0; i < _num_workers; ++i) {
1805       ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue();
1806       _task_queues->register_queue(i, task_queue);
1807     }
1808     // Divide roots among the workers. Assume that object referencing distribution
1809     // is related with root kind, use round-robin to make every worker have same chance
1810     // to process every kind of roots
1811     size_t roots_num = _roots_stack.size();
1812     if (roots_num == 0) {
1813       // No work to do
1814       return false;
1815     }
1816 
1817     for (uint j = 0; j < roots_num; j++) {
1818       uint stack_id = j % _num_workers;
1819       oop obj = _roots_stack.pop();
1820       _task_queues->queue(stack_id)->push(ShenandoahMarkTask(obj));
1821     }
1822     return true;
1823   }
1824 
1825   void object_iterate_parallel(ObjectClosure* cl,
1826                                uint worker_id,
1827                                ShenandoahObjToScanQueueSet* queue_set) {
1828     assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1829     assert(queue_set != nullptr, "task queue must not be null");
1830 
1831     ShenandoahObjToScanQueue* q = queue_set->queue(worker_id);
1832     assert(q != nullptr, "object iterate queue must not be null");
1833 
1834     ShenandoahMarkTask t;
1835     ShenandoahObjectIterateParScanClosure oops(_aux_bit_map, q);
1836 
1837     // Work through the queue to traverse heap.
1838     // Steal when there is no task in queue.
1839     while (q->pop(t) || queue_set->steal(worker_id, t)) {
1840       oop obj = t.obj();
1841       assert(oopDesc::is_oop(obj), "must be a valid oop");
1842       cl->do_object(obj);
1843       obj->oop_iterate(&oops);
1844     }
1845     assert(q->is_empty(), "should be empty");
1846   }
1847 };
1848 
1849 ParallelObjectIteratorImpl* ShenandoahHeap::parallel_object_iterator(uint workers) {
1850   return new ShenandoahParallelObjectIterator(workers, &_aux_bit_map);
1851 }
1852 
1853 // Keep alive an object that was loaded with AS_NO_KEEPALIVE.
1854 void ShenandoahHeap::keep_alive(oop obj) {
1855   if (is_concurrent_mark_in_progress() && (obj != nullptr)) {
1856     ShenandoahBarrierSet::barrier_set()->enqueue(obj);
1857   }
1858 }
1859 
1860 void ShenandoahHeap::heap_region_iterate(ShenandoahHeapRegionClosure* blk) const {
1861   for (size_t i = 0; i < num_regions(); i++) {
1862     ShenandoahHeapRegion* current = get_region(i);
1863     blk->heap_region_do(current);
1864   }
1865 }
1866 
1867 class ShenandoahParallelHeapRegionTask : public WorkerTask {
1868 private:
1869   ShenandoahHeap* const _heap;
1870   ShenandoahHeapRegionClosure* const _blk;
1871   size_t const _stride;
1872 
1873   shenandoah_padding(0);
1874   volatile size_t _index;
1875   shenandoah_padding(1);
1876 
1877 public:
1878   ShenandoahParallelHeapRegionTask(ShenandoahHeapRegionClosure* blk, size_t stride) :
1879           WorkerTask("Shenandoah Parallel Region Operation"),
1880           _heap(ShenandoahHeap::heap()), _blk(blk), _stride(stride), _index(0) {}
1881 
1882   void work(uint worker_id) {
1883     ShenandoahParallelWorkerSession worker_session(worker_id);
1884     size_t stride = _stride;
1885 
1886     size_t max = _heap->num_regions();
1887     while (Atomic::load(&_index) < max) {
1888       size_t cur = Atomic::fetch_then_add(&_index, stride, memory_order_relaxed);
1889       size_t start = cur;
1890       size_t end = MIN2(cur + stride, max);
1891       if (start >= max) break;
1892 
1893       for (size_t i = cur; i < end; i++) {
1894         ShenandoahHeapRegion* current = _heap->get_region(i);
1895         _blk->heap_region_do(current);
1896       }
1897     }
1898   }
1899 };
1900 
1901 void ShenandoahHeap::parallel_heap_region_iterate(ShenandoahHeapRegionClosure* blk) const {
1902   assert(blk->is_thread_safe(), "Only thread-safe closures here");
1903   const uint active_workers = workers()->active_workers();
1904   const size_t n_regions = num_regions();
1905   size_t stride = ShenandoahParallelRegionStride;
1906   if (stride == 0 && active_workers > 1) {
1907     // Automatically derive the stride to balance the work between threads
1908     // evenly. Do not try to split work if below the reasonable threshold.
1909     constexpr size_t threshold = 4096;
1910     stride = n_regions <= threshold ?
1911             threshold :
1912             (n_regions + active_workers - 1) / active_workers;
1913   }
1914 
1915   if (n_regions > stride && active_workers > 1) {
1916     ShenandoahParallelHeapRegionTask task(blk, stride);
1917     workers()->run_task(&task);
1918   } else {
1919     heap_region_iterate(blk);
1920   }
1921 }
1922 
1923 class ShenandoahRendezvousClosure : public HandshakeClosure {
1924 public:
1925   inline ShenandoahRendezvousClosure(const char* name) : HandshakeClosure(name) {}
1926   inline void do_thread(Thread* thread) {}
1927 };
1928 
1929 void ShenandoahHeap::rendezvous_threads(const char* name) {
1930   ShenandoahRendezvousClosure cl(name);
1931   Handshake::execute(&cl);
1932 }
1933 
1934 void ShenandoahHeap::recycle_trash() {
1935   free_set()->recycle_trash();
1936 }
1937 
1938 void ShenandoahHeap::do_class_unloading() {
1939   _unloader.unload();
1940   if (mode()->is_generational()) {
1941     old_generation()->set_parsable(false);
1942   }
1943 }
1944 
1945 void ShenandoahHeap::stw_weak_refs(bool full_gc) {
1946   // Weak refs processing
1947   ShenandoahPhaseTimings::Phase phase = full_gc ? ShenandoahPhaseTimings::full_gc_weakrefs
1948                                                 : ShenandoahPhaseTimings::degen_gc_weakrefs;
1949   ShenandoahTimingsTracker t(phase);
1950   ShenandoahGCWorkerPhase worker_phase(phase);
1951   shenandoah_assert_generations_reconciled();
1952   gc_generation()->ref_processor()->process_references(phase, workers(), false /* concurrent */);
1953 }
1954 
1955 void ShenandoahHeap::prepare_update_heap_references(bool concurrent) {
1956   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
1957 
1958   // Evacuation is over, no GCLABs are needed anymore. GCLABs are under URWM, so we need to
1959   // make them parsable for update code to work correctly. Plus, we can compute new sizes
1960   // for future GCLABs here.
1961   if (UseTLAB) {
1962     ShenandoahGCPhase phase(concurrent ?
1963                             ShenandoahPhaseTimings::init_update_refs_manage_gclabs :
1964                             ShenandoahPhaseTimings::degen_gc_init_update_refs_manage_gclabs);
1965     gclabs_retire(ResizeTLAB);
1966   }
1967 
1968   _update_refs_iterator.reset();
1969 }
1970 
1971 void ShenandoahHeap::propagate_gc_state_to_java_threads() {
1972   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at Shenandoah safepoint");
1973   if (_gc_state_changed) {
1974     _gc_state_changed = false;
1975     char state = gc_state();
1976     for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1977       ShenandoahThreadLocalData::set_gc_state(t, state);
1978     }
1979   }
1980 }
1981 
1982 void ShenandoahHeap::set_gc_state(uint mask, bool value) {
1983   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at Shenandoah safepoint");
1984   _gc_state.set_cond(mask, value);
1985   _gc_state_changed = true;
1986   // Check that if concurrent weak root is set then active_gen isn't null
1987   assert(!is_concurrent_weak_root_in_progress() || active_generation() != nullptr, "Error");
1988   shenandoah_assert_generations_reconciled();
1989 }
1990 
1991 void ShenandoahHeap::set_concurrent_young_mark_in_progress(bool in_progress) {
1992   uint mask;
1993   assert(!has_forwarded_objects(), "Young marking is not concurrent with evacuation");
1994   if (!in_progress && is_concurrent_old_mark_in_progress()) {
1995     assert(mode()->is_generational(), "Only generational GC has old marking");
1996     assert(_gc_state.is_set(MARKING), "concurrent_old_marking_in_progress implies MARKING");
1997     // If old-marking is in progress when we turn off YOUNG_MARKING, leave MARKING (and OLD_MARKING) on
1998     mask = YOUNG_MARKING;
1999   } else {
2000     mask = MARKING | YOUNG_MARKING;
2001   }
2002   set_gc_state(mask, in_progress);
2003   manage_satb_barrier(in_progress);
2004 }
2005 
2006 void ShenandoahHeap::set_concurrent_old_mark_in_progress(bool in_progress) {
2007 #ifdef ASSERT
2008   // has_forwarded_objects() iff UPDATEREFS or EVACUATION
2009   bool has_forwarded = has_forwarded_objects();
2010   bool updating_or_evacuating = _gc_state.is_set(UPDATEREFS | EVACUATION);
2011   bool evacuating = _gc_state.is_set(EVACUATION);
2012   assert ((has_forwarded == updating_or_evacuating) || (evacuating && !has_forwarded && collection_set()->is_empty()),
2013           "Updating or evacuating iff has forwarded objects, or if evacuation phase is promoting in place without forwarding");
2014 #endif
2015   if (!in_progress && is_concurrent_young_mark_in_progress()) {
2016     // If young-marking is in progress when we turn off OLD_MARKING, leave MARKING (and YOUNG_MARKING) on
2017     assert(_gc_state.is_set(MARKING), "concurrent_young_marking_in_progress implies MARKING");
2018     set_gc_state(OLD_MARKING, in_progress);
2019   } else {
2020     set_gc_state(MARKING | OLD_MARKING, in_progress);
2021   }
2022   manage_satb_barrier(in_progress);
2023 }
2024 
2025 bool ShenandoahHeap::is_prepare_for_old_mark_in_progress() const {
2026   return old_generation()->is_preparing_for_mark();
2027 }
2028 
2029 void ShenandoahHeap::manage_satb_barrier(bool active) {
2030   if (is_concurrent_mark_in_progress()) {
2031     // Ignore request to deactivate barrier while concurrent mark is in progress.
2032     // Do not attempt to re-activate the barrier if it is already active.
2033     if (active && !ShenandoahBarrierSet::satb_mark_queue_set().is_active()) {
2034       ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(active, !active);
2035     }
2036   } else {
2037     // No concurrent marking is in progress so honor request to deactivate,
2038     // but only if the barrier is already active.
2039     if (!active && ShenandoahBarrierSet::satb_mark_queue_set().is_active()) {
2040       ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(active, !active);
2041     }
2042   }
2043 }
2044 
2045 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
2046   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only call this at safepoint");
2047   set_gc_state(EVACUATION, in_progress);
2048 }
2049 
2050 void ShenandoahHeap::set_concurrent_strong_root_in_progress(bool in_progress) {
2051   if (in_progress) {
2052     _concurrent_strong_root_in_progress.set();
2053   } else {
2054     _concurrent_strong_root_in_progress.unset();
2055   }
2056 }
2057 
2058 void ShenandoahHeap::set_concurrent_weak_root_in_progress(bool cond) {
2059   set_gc_state(WEAK_ROOTS, cond);
2060 }
2061 
2062 GCTracer* ShenandoahHeap::tracer() {
2063   return shenandoah_policy()->tracer();
2064 }
2065 
2066 size_t ShenandoahHeap::tlab_used(Thread* thread) const {
2067   return _free_set->used();
2068 }
2069 
2070 bool ShenandoahHeap::try_cancel_gc() {
2071   jbyte prev = _cancelled_gc.cmpxchg(CANCELLED, CANCELLABLE);
2072   return prev == CANCELLABLE;
2073 }
2074 
2075 void ShenandoahHeap::cancel_concurrent_mark() {
2076   if (mode()->is_generational()) {
2077     young_generation()->cancel_marking();
2078     old_generation()->cancel_marking();
2079   }
2080 
2081   global_generation()->cancel_marking();
2082 
2083   ShenandoahBarrierSet::satb_mark_queue_set().abandon_partial_marking();
2084 }
2085 
2086 void ShenandoahHeap::cancel_gc(GCCause::Cause cause) {
2087   if (try_cancel_gc()) {
2088     FormatBuffer<> msg("Cancelling GC: %s", GCCause::to_string(cause));
2089     log_info(gc)("%s", msg.buffer());
2090     Events::log(Thread::current(), "%s", msg.buffer());
2091     _cancel_requested_time = os::elapsedTime();
2092   }
2093 }
2094 
2095 uint ShenandoahHeap::max_workers() {
2096   return _max_workers;
2097 }
2098 
2099 void ShenandoahHeap::stop() {
2100   // The shutdown sequence should be able to terminate when GC is running.
2101 
2102   // Step 0. Notify policy to disable event recording and prevent visiting gc threads during shutdown
2103   _shenandoah_policy->record_shutdown();
2104 
2105   // Step 1. Notify control thread that we are in shutdown.
2106   // Note that we cannot do that with stop(), because stop() is blocking and waits for the actual shutdown.
2107   // Doing stop() here would wait for the normal GC cycle to complete, never falling through to cancel below.
2108   control_thread()->prepare_for_graceful_shutdown();
2109 
2110   // Step 2. Notify GC workers that we are cancelling GC.
2111   cancel_gc(GCCause::_shenandoah_stop_vm);
2112 
2113   // Step 3. Wait until GC worker exits normally.
2114   control_thread()->stop();
2115 }
2116 
2117 void ShenandoahHeap::stw_unload_classes(bool full_gc) {
2118   if (!unload_classes()) return;
2119   ClassUnloadingContext ctx(_workers->active_workers(),
2120                             true /* unregister_nmethods_during_purge */,
2121                             false /* lock_nmethod_free_separately */);
2122 
2123   // Unload classes and purge SystemDictionary.
2124   {
2125     ShenandoahPhaseTimings::Phase phase = full_gc ?
2126                                           ShenandoahPhaseTimings::full_gc_purge_class_unload :
2127                                           ShenandoahPhaseTimings::degen_gc_purge_class_unload;
2128     ShenandoahIsAliveSelector is_alive;
2129     {
2130       CodeCache::UnlinkingScope scope(is_alive.is_alive_closure());
2131       ShenandoahGCPhase gc_phase(phase);
2132       ShenandoahGCWorkerPhase worker_phase(phase);
2133       bool unloading_occurred = SystemDictionary::do_unloading(gc_timer());
2134 
2135       uint num_workers = _workers->active_workers();
2136       ShenandoahClassUnloadingTask unlink_task(phase, num_workers, unloading_occurred);
2137       _workers->run_task(&unlink_task);
2138     }
2139     // Release unloaded nmethods's memory.
2140     ClassUnloadingContext::context()->purge_and_free_nmethods();
2141   }
2142 
2143   {
2144     ShenandoahGCPhase phase(full_gc ?
2145                             ShenandoahPhaseTimings::full_gc_purge_cldg :
2146                             ShenandoahPhaseTimings::degen_gc_purge_cldg);
2147     ClassLoaderDataGraph::purge(true /* at_safepoint */);
2148   }
2149   // Resize and verify metaspace
2150   MetaspaceGC::compute_new_size();
2151   DEBUG_ONLY(MetaspaceUtils::verify();)
2152 }
2153 
2154 // Weak roots are either pre-evacuated (final mark) or updated (final updaterefs),
2155 // so they should not have forwarded oops.
2156 // However, we do need to "null" dead oops in the roots, if can not be done
2157 // in concurrent cycles.
2158 void ShenandoahHeap::stw_process_weak_roots(bool full_gc) {
2159   uint num_workers = _workers->active_workers();
2160   ShenandoahPhaseTimings::Phase timing_phase = full_gc ?
2161                                                ShenandoahPhaseTimings::full_gc_purge_weak_par :
2162                                                ShenandoahPhaseTimings::degen_gc_purge_weak_par;
2163   ShenandoahGCPhase phase(timing_phase);
2164   ShenandoahGCWorkerPhase worker_phase(timing_phase);
2165   // Cleanup weak roots
2166   if (has_forwarded_objects()) {
2167     ShenandoahForwardedIsAliveClosure is_alive;
2168     ShenandoahUpdateRefsClosure keep_alive;
2169     ShenandoahParallelWeakRootsCleaningTask<ShenandoahForwardedIsAliveClosure, ShenandoahUpdateRefsClosure>
2170       cleaning_task(timing_phase, &is_alive, &keep_alive, num_workers);
2171     _workers->run_task(&cleaning_task);
2172   } else {
2173     ShenandoahIsAliveClosure is_alive;
2174 #ifdef ASSERT
2175     ShenandoahAssertNotForwardedClosure verify_cl;
2176     ShenandoahParallelWeakRootsCleaningTask<ShenandoahIsAliveClosure, ShenandoahAssertNotForwardedClosure>
2177       cleaning_task(timing_phase, &is_alive, &verify_cl, num_workers);
2178 #else
2179     ShenandoahParallelWeakRootsCleaningTask<ShenandoahIsAliveClosure, DoNothingClosure>
2180       cleaning_task(timing_phase, &is_alive, &do_nothing_cl, num_workers);
2181 #endif
2182     _workers->run_task(&cleaning_task);
2183   }
2184 }
2185 
2186 void ShenandoahHeap::parallel_cleaning(bool full_gc) {
2187   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2188   assert(is_stw_gc_in_progress(), "Only for Degenerated and Full GC");
2189   ShenandoahGCPhase phase(full_gc ?
2190                           ShenandoahPhaseTimings::full_gc_purge :
2191                           ShenandoahPhaseTimings::degen_gc_purge);
2192   stw_weak_refs(full_gc);
2193   stw_process_weak_roots(full_gc);
2194   stw_unload_classes(full_gc);
2195 }
2196 
2197 void ShenandoahHeap::set_has_forwarded_objects(bool cond) {
2198   set_gc_state(HAS_FORWARDED, cond);
2199 }
2200 
2201 void ShenandoahHeap::set_unload_classes(bool uc) {
2202   _unload_classes.set_cond(uc);
2203 }
2204 
2205 bool ShenandoahHeap::unload_classes() const {
2206   return _unload_classes.is_set();
2207 }
2208 
2209 address ShenandoahHeap::in_cset_fast_test_addr() {
2210   ShenandoahHeap* heap = ShenandoahHeap::heap();
2211   assert(heap->collection_set() != nullptr, "Sanity");
2212   return (address) heap->collection_set()->biased_map_address();
2213 }
2214 
2215 void ShenandoahHeap::reset_bytes_allocated_since_gc_start() {
2216   if (mode()->is_generational()) {
2217     young_generation()->reset_bytes_allocated_since_gc_start();
2218     old_generation()->reset_bytes_allocated_since_gc_start();
2219   }
2220 
2221   global_generation()->reset_bytes_allocated_since_gc_start();
2222 }
2223 
2224 void ShenandoahHeap::set_degenerated_gc_in_progress(bool in_progress) {
2225   _degenerated_gc_in_progress.set_cond(in_progress);
2226 }
2227 
2228 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) {
2229   _full_gc_in_progress.set_cond(in_progress);
2230 }
2231 
2232 void ShenandoahHeap::set_full_gc_move_in_progress(bool in_progress) {
2233   assert (is_full_gc_in_progress(), "should be");
2234   _full_gc_move_in_progress.set_cond(in_progress);
2235 }
2236 
2237 void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) {
2238   set_gc_state(UPDATEREFS, in_progress);
2239 }
2240 
2241 void ShenandoahHeap::register_nmethod(nmethod* nm) {
2242   ShenandoahCodeRoots::register_nmethod(nm);
2243 }
2244 
2245 void ShenandoahHeap::unregister_nmethod(nmethod* nm) {
2246   ShenandoahCodeRoots::unregister_nmethod(nm);
2247 }
2248 
2249 void ShenandoahHeap::pin_object(JavaThread* thr, oop o) {
2250   heap_region_containing(o)->record_pin();
2251 }
2252 
2253 void ShenandoahHeap::unpin_object(JavaThread* thr, oop o) {
2254   ShenandoahHeapRegion* r = heap_region_containing(o);
2255   assert(r != nullptr, "Sanity");
2256   assert(r->pin_count() > 0, "Region " SIZE_FORMAT " should have non-zero pins", r->index());
2257   r->record_unpin();
2258 }
2259 
2260 void ShenandoahHeap::sync_pinned_region_status() {
2261   ShenandoahHeapLocker locker(lock());
2262 
2263   for (size_t i = 0; i < num_regions(); i++) {
2264     ShenandoahHeapRegion *r = get_region(i);
2265     if (r->is_active()) {
2266       if (r->is_pinned()) {
2267         if (r->pin_count() == 0) {
2268           r->make_unpinned();
2269         }
2270       } else {
2271         if (r->pin_count() > 0) {
2272           r->make_pinned();
2273         }
2274       }
2275     }
2276   }
2277 
2278   assert_pinned_region_status();
2279 }
2280 
2281 #ifdef ASSERT
2282 void ShenandoahHeap::assert_pinned_region_status() {
2283   for (size_t i = 0; i < num_regions(); i++) {
2284     ShenandoahHeapRegion* r = get_region(i);
2285     shenandoah_assert_generations_reconciled();
2286     if (gc_generation()->contains(r)) {
2287       assert((r->is_pinned() && r->pin_count() > 0) || (!r->is_pinned() && r->pin_count() == 0),
2288              "Region " SIZE_FORMAT " pinning status is inconsistent", i);
2289     }
2290   }
2291 }
2292 #endif
2293 
2294 ConcurrentGCTimer* ShenandoahHeap::gc_timer() const {
2295   return _gc_timer;
2296 }
2297 
2298 void ShenandoahHeap::prepare_concurrent_roots() {
2299   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2300   assert(!is_stw_gc_in_progress(), "Only concurrent GC");
2301   set_concurrent_strong_root_in_progress(!collection_set()->is_empty());
2302   set_concurrent_weak_root_in_progress(true);
2303   if (unload_classes()) {
2304     _unloader.prepare();
2305   }
2306 }
2307 
2308 void ShenandoahHeap::finish_concurrent_roots() {
2309   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2310   assert(!is_stw_gc_in_progress(), "Only concurrent GC");
2311   if (unload_classes()) {
2312     _unloader.finish();
2313   }
2314 }
2315 
2316 #ifdef ASSERT
2317 void ShenandoahHeap::assert_gc_workers(uint nworkers) {
2318   assert(nworkers > 0 && nworkers <= max_workers(), "Sanity");
2319 
2320   if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
2321     // Use ParallelGCThreads inside safepoints
2322     assert(nworkers == ParallelGCThreads, "Use ParallelGCThreads (%u) within safepoint, not %u",
2323            ParallelGCThreads, nworkers);
2324   } else {
2325     // Use ConcGCThreads outside safepoints
2326     assert(nworkers == ConcGCThreads, "Use ConcGCThreads (%u) outside safepoints, %u",
2327            ConcGCThreads, nworkers);
2328   }
2329 }
2330 #endif
2331 
2332 ShenandoahVerifier* ShenandoahHeap::verifier() {
2333   guarantee(ShenandoahVerify, "Should be enabled");
2334   assert (_verifier != nullptr, "sanity");
2335   return _verifier;
2336 }
2337 
2338 template<bool CONCURRENT>
2339 class ShenandoahUpdateHeapRefsTask : public WorkerTask {
2340 private:
2341   ShenandoahHeap* _heap;
2342   ShenandoahRegionIterator* _regions;
2343 public:
2344   explicit ShenandoahUpdateHeapRefsTask(ShenandoahRegionIterator* regions) :
2345     WorkerTask("Shenandoah Update References"),
2346     _heap(ShenandoahHeap::heap()),
2347     _regions(regions) {
2348   }
2349 
2350   void work(uint worker_id) {
2351     if (CONCURRENT) {
2352       ShenandoahConcurrentWorkerSession worker_session(worker_id);
2353       ShenandoahSuspendibleThreadSetJoiner stsj;
2354       do_work<ShenandoahConcUpdateRefsClosure>(worker_id);
2355     } else {
2356       ShenandoahParallelWorkerSession worker_session(worker_id);
2357       do_work<ShenandoahSTWUpdateRefsClosure>(worker_id);
2358     }
2359   }
2360 
2361 private:
2362   template<class T>
2363   void do_work(uint worker_id) {
2364     if (CONCURRENT && (worker_id == 0)) {
2365       // We ask the first worker to replenish the Mutator free set by moving regions previously reserved to hold the
2366       // results of evacuation.  These reserves are no longer necessary because evacuation has completed.
2367       size_t cset_regions = _heap->collection_set()->count();
2368 
2369       // Now that evacuation is done, we can reassign any regions that had been reserved to hold the results of evacuation
2370       // to the mutator free set.  At the end of GC, we will have cset_regions newly evacuated fully empty regions from
2371       // which we will be able to replenish the Collector free set and the OldCollector free set in preparation for the
2372       // next GC cycle.
2373       _heap->free_set()->move_regions_from_collector_to_mutator(cset_regions);
2374     }
2375     // If !CONCURRENT, there's no value in expanding Mutator free set
2376     T cl;
2377     ShenandoahHeapRegion* r = _regions->next();
2378     while (r != nullptr) {
2379       HeapWord* update_watermark = r->get_update_watermark();
2380       assert (update_watermark >= r->bottom(), "sanity");
2381       if (r->is_active() && !r->is_cset()) {
2382         _heap->marked_object_oop_iterate(r, &cl, update_watermark);
2383         if (ShenandoahPacing) {
2384           _heap->pacer()->report_updaterefs(pointer_delta(update_watermark, r->bottom()));
2385         }
2386       }
2387       if (_heap->check_cancelled_gc_and_yield(CONCURRENT)) {
2388         return;
2389       }
2390       r = _regions->next();
2391     }
2392   }
2393 };
2394 
2395 void ShenandoahHeap::update_heap_references(bool concurrent) {
2396   assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
2397 
2398   if (concurrent) {
2399     ShenandoahUpdateHeapRefsTask<true> task(&_update_refs_iterator);
2400     workers()->run_task(&task);
2401   } else {
2402     ShenandoahUpdateHeapRefsTask<false> task(&_update_refs_iterator);
2403     workers()->run_task(&task);
2404   }
2405 }
2406 
2407 ShenandoahSynchronizePinnedRegionStates::ShenandoahSynchronizePinnedRegionStates() : _lock(ShenandoahHeap::heap()->lock()) { }
2408 
2409 void ShenandoahSynchronizePinnedRegionStates::heap_region_do(ShenandoahHeapRegion* r) {
2410   // Drop "pinned" state from regions that no longer have a pinned count. Put
2411   // regions with a pinned count into the "pinned" state.
2412   if (r->is_active()) {
2413     if (r->is_pinned()) {
2414       if (r->pin_count() == 0) {
2415         ShenandoahHeapLocker locker(_lock);
2416         r->make_unpinned();
2417       }
2418     } else {
2419       if (r->pin_count() > 0) {
2420         ShenandoahHeapLocker locker(_lock);
2421         r->make_pinned();
2422       }
2423     }
2424   }
2425 }
2426 
2427 void ShenandoahHeap::update_heap_region_states(bool concurrent) {
2428   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2429   assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
2430 
2431   {
2432     ShenandoahGCPhase phase(concurrent ?
2433                             ShenandoahPhaseTimings::final_update_refs_update_region_states :
2434                             ShenandoahPhaseTimings::degen_gc_final_update_refs_update_region_states);
2435 
2436     final_update_refs_update_region_states();
2437 
2438     assert_pinned_region_status();
2439   }
2440 
2441   {
2442     ShenandoahGCPhase phase(concurrent ?
2443                             ShenandoahPhaseTimings::final_update_refs_trash_cset :
2444                             ShenandoahPhaseTimings::degen_gc_final_update_refs_trash_cset);
2445     trash_cset_regions();
2446   }
2447 }
2448 
2449 void ShenandoahHeap::final_update_refs_update_region_states() {
2450   ShenandoahSynchronizePinnedRegionStates cl;
2451   parallel_heap_region_iterate(&cl);
2452 }
2453 
2454 void ShenandoahHeap::rebuild_free_set(bool concurrent) {
2455   ShenandoahGCPhase phase(concurrent ?
2456                           ShenandoahPhaseTimings::final_update_refs_rebuild_freeset :
2457                           ShenandoahPhaseTimings::degen_gc_final_update_refs_rebuild_freeset);
2458   ShenandoahHeapLocker locker(lock());
2459   size_t young_cset_regions, old_cset_regions;
2460   size_t first_old_region, last_old_region, old_region_count;
2461   _free_set->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old_region, last_old_region, old_region_count);
2462   // If there are no old regions, first_old_region will be greater than last_old_region
2463   assert((first_old_region > last_old_region) ||
2464          ((last_old_region + 1 - first_old_region >= old_region_count) &&
2465           get_region(first_old_region)->is_old() && get_region(last_old_region)->is_old()),
2466          "sanity: old_region_count: " SIZE_FORMAT ", first_old_region: " SIZE_FORMAT ", last_old_region: " SIZE_FORMAT,
2467          old_region_count, first_old_region, last_old_region);
2468 
2469   if (mode()->is_generational()) {
2470 #ifdef ASSERT
2471     if (ShenandoahVerify) {
2472       verifier()->verify_before_rebuilding_free_set();
2473     }
2474 #endif
2475 
2476     // The computation of bytes_of_allocation_runway_before_gc_trigger is quite conservative so consider all of this
2477     // available for transfer to old. Note that transfer of humongous regions does not impact available.
2478     ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::heap();
2479     size_t allocation_runway = gen_heap->young_generation()->heuristics()->bytes_of_allocation_runway_before_gc_trigger(young_cset_regions);
2480     gen_heap->compute_old_generation_balance(allocation_runway, old_cset_regions);
2481 
2482     // Total old_available may have been expanded to hold anticipated promotions.  We trigger if the fragmented available
2483     // memory represents more than 16 regions worth of data.  Note that fragmentation may increase when we promote regular
2484     // regions in place when many of these regular regions have an abundant amount of available memory within them.  Fragmentation
2485     // will decrease as promote-by-copy consumes the available memory within these partially consumed regions.
2486     //
2487     // We consider old-gen to have excessive fragmentation if more than 12.5% of old-gen is free memory that resides
2488     // within partially consumed regions of memory.
2489   }
2490   // Rebuild free set based on adjusted generation sizes.
2491   _free_set->finish_rebuild(young_cset_regions, old_cset_regions, old_region_count);
2492 
2493   if (mode()->is_generational()) {
2494     ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::heap();
2495     ShenandoahOldGeneration* old_gen = gen_heap->old_generation();
2496     old_gen->heuristics()->evaluate_triggers(first_old_region, last_old_region, old_region_count, num_regions());
2497   }
2498 }
2499 
2500 void ShenandoahHeap::print_extended_on(outputStream *st) const {
2501   print_on(st);
2502   st->cr();
2503   print_heap_regions_on(st);
2504 }
2505 
2506 bool ShenandoahHeap::is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self) {
2507   size_t slice = r->index() / _bitmap_regions_per_slice;
2508 
2509   size_t regions_from = _bitmap_regions_per_slice * slice;
2510   size_t regions_to   = MIN2(num_regions(), _bitmap_regions_per_slice * (slice + 1));
2511   for (size_t g = regions_from; g < regions_to; g++) {
2512     assert (g / _bitmap_regions_per_slice == slice, "same slice");
2513     if (skip_self && g == r->index()) continue;
2514     if (get_region(g)->is_committed()) {
2515       return true;
2516     }
2517   }
2518   return false;
2519 }
2520 
2521 bool ShenandoahHeap::commit_bitmap_slice(ShenandoahHeapRegion* r) {
2522   shenandoah_assert_heaplocked();
2523 
2524   // Bitmaps in special regions do not need commits
2525   if (_bitmap_region_special) {
2526     return true;
2527   }
2528 
2529   if (is_bitmap_slice_committed(r, true)) {
2530     // Some other region from the group is already committed, meaning the bitmap
2531     // slice is already committed, we exit right away.
2532     return true;
2533   }
2534 
2535   // Commit the bitmap slice:
2536   size_t slice = r->index() / _bitmap_regions_per_slice;
2537   size_t off = _bitmap_bytes_per_slice * slice;
2538   size_t len = _bitmap_bytes_per_slice;
2539   char* start = (char*) _bitmap_region.start() + off;
2540 
2541   if (!os::commit_memory(start, len, false)) {
2542     return false;
2543   }
2544 
2545   if (AlwaysPreTouch) {
2546     os::pretouch_memory(start, start + len, _pretouch_bitmap_page_size);
2547   }
2548 
2549   return true;
2550 }
2551 
2552 bool ShenandoahHeap::uncommit_bitmap_slice(ShenandoahHeapRegion *r) {
2553   shenandoah_assert_heaplocked();
2554 
2555   // Bitmaps in special regions do not need uncommits
2556   if (_bitmap_region_special) {
2557     return true;
2558   }
2559 
2560   if (is_bitmap_slice_committed(r, true)) {
2561     // Some other region from the group is still committed, meaning the bitmap
2562     // slice is should stay committed, exit right away.
2563     return true;
2564   }
2565 
2566   // Uncommit the bitmap slice:
2567   size_t slice = r->index() / _bitmap_regions_per_slice;
2568   size_t off = _bitmap_bytes_per_slice * slice;
2569   size_t len = _bitmap_bytes_per_slice;
2570   if (!os::uncommit_memory((char*)_bitmap_region.start() + off, len)) {
2571     return false;
2572   }
2573   return true;
2574 }
2575 
2576 void ShenandoahHeap::safepoint_synchronize_begin() {
2577   StackWatermarkSet::safepoint_synchronize_begin();
2578   SuspendibleThreadSet::synchronize();
2579 }
2580 
2581 void ShenandoahHeap::safepoint_synchronize_end() {
2582   SuspendibleThreadSet::desynchronize();
2583 }
2584 
2585 void ShenandoahHeap::try_inject_alloc_failure() {
2586   if (ShenandoahAllocFailureALot && !cancelled_gc() && ((os::random() % 1000) > 950)) {
2587     _inject_alloc_failure.set();
2588     os::naked_short_sleep(1);
2589     if (cancelled_gc()) {
2590       log_info(gc)("Allocation failure was successfully injected");
2591     }
2592   }
2593 }
2594 
2595 bool ShenandoahHeap::should_inject_alloc_failure() {
2596   return _inject_alloc_failure.is_set() && _inject_alloc_failure.try_unset();
2597 }
2598 
2599 void ShenandoahHeap::initialize_serviceability() {
2600   _memory_pool = new ShenandoahMemoryPool(this);
2601   _cycle_memory_manager.add_pool(_memory_pool);
2602   _stw_memory_manager.add_pool(_memory_pool);
2603 }
2604 
2605 GrowableArray<GCMemoryManager*> ShenandoahHeap::memory_managers() {
2606   GrowableArray<GCMemoryManager*> memory_managers(2);
2607   memory_managers.append(&_cycle_memory_manager);
2608   memory_managers.append(&_stw_memory_manager);
2609   return memory_managers;
2610 }
2611 
2612 GrowableArray<MemoryPool*> ShenandoahHeap::memory_pools() {
2613   GrowableArray<MemoryPool*> memory_pools(1);
2614   memory_pools.append(_memory_pool);
2615   return memory_pools;
2616 }
2617 
2618 MemoryUsage ShenandoahHeap::memory_usage() {
2619   return MemoryUsage(_initial_size, used(), committed(), max_capacity());
2620 }
2621 
2622 ShenandoahRegionIterator::ShenandoahRegionIterator() :
2623   _heap(ShenandoahHeap::heap()),
2624   _index(0) {}
2625 
2626 ShenandoahRegionIterator::ShenandoahRegionIterator(ShenandoahHeap* heap) :
2627   _heap(heap),
2628   _index(0) {}
2629 
2630 void ShenandoahRegionIterator::reset() {
2631   _index = 0;
2632 }
2633 
2634 bool ShenandoahRegionIterator::has_next() const {
2635   return _index < _heap->num_regions();
2636 }
2637 
2638 char ShenandoahHeap::gc_state() const {
2639   return _gc_state.raw_value();
2640 }
2641 
2642 ShenandoahLiveData* ShenandoahHeap::get_liveness_cache(uint worker_id) {
2643 #ifdef ASSERT
2644   assert(_liveness_cache != nullptr, "sanity");
2645   assert(worker_id < _max_workers, "sanity");
2646   for (uint i = 0; i < num_regions(); i++) {
2647     assert(_liveness_cache[worker_id][i] == 0, "liveness cache should be empty");
2648   }
2649 #endif
2650   return _liveness_cache[worker_id];
2651 }
2652 
2653 void ShenandoahHeap::flush_liveness_cache(uint worker_id) {
2654   assert(worker_id < _max_workers, "sanity");
2655   assert(_liveness_cache != nullptr, "sanity");
2656   ShenandoahLiveData* ld = _liveness_cache[worker_id];
2657   for (uint i = 0; i < num_regions(); i++) {
2658     ShenandoahLiveData live = ld[i];
2659     if (live > 0) {
2660       ShenandoahHeapRegion* r = get_region(i);
2661       r->increase_live_data_gc_words(live);
2662       ld[i] = 0;
2663     }
2664   }
2665 }
2666 
2667 bool ShenandoahHeap::requires_barriers(stackChunkOop obj) const {
2668   if (is_idle()) return false;
2669 
2670   // Objects allocated after marking start are implicitly alive, don't need any barriers during
2671   // marking phase.
2672   if (is_concurrent_mark_in_progress() &&
2673      !marking_context()->allocated_after_mark_start(obj)) {
2674     return true;
2675   }
2676 
2677   // Can not guarantee obj is deeply good.
2678   if (has_forwarded_objects()) {
2679     return true;
2680   }
2681 
2682   return false;
2683 }
2684 
2685 HeapWord* ShenandoahHeap::allocate_loaded_archive_space(size_t size) {
2686 #if INCLUDE_CDS_JAVA_HEAP
2687   // CDS wants a continuous memory range to load a bunch of objects.
2688   // This effectively bypasses normal allocation paths, and requires
2689   // a bit of massaging to unbreak GC invariants.
2690 
2691   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared(size);
2692 
2693   // Easy case: a single regular region, no further adjustments needed.
2694   if (!ShenandoahHeapRegion::requires_humongous(size)) {
2695     return allocate_memory(req);
2696   }
2697 
2698   // Hard case: the requested size would cause a humongous allocation.
2699   // We need to make sure it looks like regular allocation to the rest of GC.
2700 
2701   // CDS code would guarantee no objects straddle multiple regions, as long as
2702   // regions are as large as MIN_GC_REGION_ALIGNMENT. It is impractical at this
2703   // point to deal with case when Shenandoah runs with smaller regions.
2704   // TODO: This check can be dropped once MIN_GC_REGION_ALIGNMENT agrees more with Shenandoah.
2705   if (ShenandoahHeapRegion::region_size_bytes() < ArchiveHeapWriter::MIN_GC_REGION_ALIGNMENT) {
2706     return nullptr;
2707   }
2708 
2709   HeapWord* mem = allocate_memory(req);
2710   size_t start_idx = heap_region_index_containing(mem);
2711   size_t num_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize);
2712 
2713   // Flip humongous -> regular.
2714   {
2715     ShenandoahHeapLocker locker(lock(), false);
2716     for (size_t c = start_idx; c < start_idx + num_regions; c++) {
2717       get_region(c)->make_regular_bypass();
2718     }
2719   }
2720 
2721   return mem;
2722 #else
2723   assert(false, "Archive heap loader should not be available, should not be here");
2724   return nullptr;
2725 #endif // INCLUDE_CDS_JAVA_HEAP
2726 }
2727 
2728 void ShenandoahHeap::complete_loaded_archive_space(MemRegion archive_space) {
2729   // Nothing to do here, except checking that heap looks fine.
2730 #ifdef ASSERT
2731   HeapWord* start = archive_space.start();
2732   HeapWord* end = archive_space.end();
2733 
2734   // No unclaimed space between the objects.
2735   // Objects are properly allocated in correct regions.
2736   HeapWord* cur = start;
2737   while (cur < end) {
2738     oop oop = cast_to_oop(cur);
2739     shenandoah_assert_in_correct_region(nullptr, oop);
2740     cur += oop->size();
2741   }
2742 
2743   // No unclaimed tail at the end of archive space.
2744   assert(cur == end,
2745          "Archive space should be fully used: " PTR_FORMAT " " PTR_FORMAT,
2746          p2i(cur), p2i(end));
2747 
2748   // Region bounds are good.
2749   ShenandoahHeapRegion* begin_reg = heap_region_containing(start);
2750   ShenandoahHeapRegion* end_reg = heap_region_containing(end);
2751   assert(begin_reg->is_regular(), "Must be");
2752   assert(end_reg->is_regular(), "Must be");
2753   assert(begin_reg->bottom() == start,
2754          "Must agree: archive-space-start: " PTR_FORMAT ", begin-region-bottom: " PTR_FORMAT,
2755          p2i(start), p2i(begin_reg->bottom()));
2756   assert(end_reg->top() == end,
2757          "Must agree: archive-space-end: " PTR_FORMAT ", end-region-top: " PTR_FORMAT,
2758          p2i(end), p2i(end_reg->top()));
2759 #endif
2760 }
2761 
2762 ShenandoahGeneration* ShenandoahHeap::generation_for(ShenandoahAffiliation affiliation) const {
2763   if (!mode()->is_generational()) {
2764     return global_generation();
2765   } else if (affiliation == YOUNG_GENERATION) {
2766     return young_generation();
2767   } else if (affiliation == OLD_GENERATION) {
2768     return old_generation();
2769   }
2770 
2771   ShouldNotReachHere();
2772   return nullptr;
2773 }
2774 
2775 void ShenandoahHeap::log_heap_status(const char* msg) const {
2776   if (mode()->is_generational()) {
2777     young_generation()->log_status(msg);
2778     old_generation()->log_status(msg);
2779   } else {
2780     global_generation()->log_status(msg);
2781   }
2782 }
2783