1 /*
   2  * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2013, 2022, Red Hat, Inc. All rights reserved.
   4  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
   5  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   6  *
   7  * This code is free software; you can redistribute it and/or modify it
   8  * under the terms of the GNU General Public License version 2 only, as
   9  * published by the Free Software Foundation.
  10  *
  11  * This code is distributed in the hope that it will be useful, but WITHOUT
  12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  14  * version 2 for more details (a copy is included in the LICENSE file that
  15  * accompanied this code).
  16  *
  17  * You should have received a copy of the GNU General Public License version
  18  * 2 along with this work; if not, write to the Free Software Foundation,
  19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20  *
  21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22  * or visit www.oracle.com if you need additional information or have any
  23  * questions.
  24  *
  25  */
  26 
  27 #include "precompiled.hpp"
  28 #include "memory/allocation.hpp"
  29 #include "memory/universe.hpp"
  30 
  31 #include "gc/shared/gcArguments.hpp"
  32 #include "gc/shared/gcTimer.hpp"
  33 #include "gc/shared/gcTraceTime.inline.hpp"
  34 #include "gc/shared/locationPrinter.inline.hpp"
  35 #include "gc/shared/memAllocator.hpp"
  36 #include "gc/shared/plab.hpp"
  37 #include "gc/shared/tlab_globals.hpp"
  38 
  39 #include "gc/shenandoah/shenandoahAgeCensus.hpp"
  40 #include "gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp"
  41 #include "gc/shenandoah/heuristics/shenandoahYoungHeuristics.hpp"
  42 #include "gc/shenandoah/shenandoahAllocRequest.hpp"
  43 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  44 #include "gc/shenandoah/shenandoahCardTable.hpp"
  45 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
  46 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  47 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  48 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
  49 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  50 #include "gc/shenandoah/shenandoahControlThread.hpp"
  51 #include "gc/shenandoah/shenandoahRegulatorThread.hpp"
  52 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  53 #include "gc/shenandoah/shenandoahGlobalGeneration.hpp"
  54 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  55 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  56 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
  57 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  58 #include "gc/shenandoah/shenandoahInitLogger.hpp"
  59 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  60 #include "gc/shenandoah/shenandoahMemoryPool.hpp"
  61 #include "gc/shenandoah/shenandoahMetrics.hpp"
  62 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  63 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
  64 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  65 #include "gc/shenandoah/shenandoahPacer.inline.hpp"
  66 #include "gc/shenandoah/shenandoahPadding.hpp"
  67 #include "gc/shenandoah/shenandoahParallelCleaning.inline.hpp"
  68 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
  69 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
  70 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
  71 #include "gc/shenandoah/shenandoahSTWMark.hpp"
  72 #include "gc/shenandoah/shenandoahUtils.hpp"
  73 #include "gc/shenandoah/shenandoahVerifier.hpp"
  74 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
  75 #include "gc/shenandoah/shenandoahVMOperations.hpp"
  76 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
  77 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  78 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
  79 #include "gc/shenandoah/mode/shenandoahGenerationalMode.hpp"
  80 #include "gc/shenandoah/mode/shenandoahIUMode.hpp"
  81 #include "gc/shenandoah/mode/shenandoahPassiveMode.hpp"
  82 #include "gc/shenandoah/mode/shenandoahSATBMode.hpp"
  83 #include "utilities/globalDefinitions.hpp"
  84 
  85 #if INCLUDE_JFR
  86 #include "gc/shenandoah/shenandoahJfrSupport.hpp"
  87 #endif
  88 
  89 #include "classfile/systemDictionary.hpp"
  90 #include "code/codeCache.hpp"
  91 #include "memory/classLoaderMetaspace.hpp"
  92 #include "memory/metaspaceUtils.hpp"
  93 #include "oops/compressedOops.inline.hpp"
  94 #include "prims/jvmtiTagMap.hpp"
  95 #include "runtime/atomic.hpp"
  96 #include "runtime/globals.hpp"
  97 #include "runtime/interfaceSupport.inline.hpp"
  98 #include "runtime/java.hpp"
  99 #include "runtime/orderAccess.hpp"
 100 #include "runtime/safepointMechanism.hpp"
 101 #include "runtime/vmThread.hpp"
 102 #include "services/mallocTracker.hpp"
 103 #include "services/memTracker.hpp"
 104 #include "utilities/events.hpp"
 105 #include "utilities/powerOfTwo.hpp"
 106 
 107 class ShenandoahPretouchHeapTask : public WorkerTask {
 108 private:
 109   ShenandoahRegionIterator _regions;
 110   const size_t _page_size;
 111 public:
 112   ShenandoahPretouchHeapTask(size_t page_size) :
 113     WorkerTask("Shenandoah Pretouch Heap"),
 114     _page_size(page_size) {}
 115 
 116   virtual void work(uint worker_id) {
 117     ShenandoahHeapRegion* r = _regions.next();
 118     while (r != nullptr) {
 119       if (r->is_committed()) {
 120         os::pretouch_memory(r->bottom(), r->end(), _page_size);
 121       }
 122       r = _regions.next();
 123     }
 124   }
 125 };
 126 
 127 class ShenandoahPretouchBitmapTask : public WorkerTask {
 128 private:
 129   ShenandoahRegionIterator _regions;
 130   char* _bitmap_base;
 131   const size_t _bitmap_size;
 132   const size_t _page_size;
 133 public:
 134   ShenandoahPretouchBitmapTask(char* bitmap_base, size_t bitmap_size, size_t page_size) :
 135     WorkerTask("Shenandoah Pretouch Bitmap"),
 136     _bitmap_base(bitmap_base),
 137     _bitmap_size(bitmap_size),
 138     _page_size(page_size) {}
 139 
 140   virtual void work(uint worker_id) {
 141     ShenandoahHeapRegion* r = _regions.next();
 142     while (r != nullptr) {
 143       size_t start = r->index()       * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
 144       size_t end   = (r->index() + 1) * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
 145       assert (end <= _bitmap_size, "end is sane: " SIZE_FORMAT " < " SIZE_FORMAT, end, _bitmap_size);
 146 
 147       if (r->is_committed()) {
 148         os::pretouch_memory(_bitmap_base + start, _bitmap_base + end, _page_size);
 149       }
 150 
 151       r = _regions.next();
 152     }
 153   }
 154 };
 155 
 156 jint ShenandoahHeap::initialize() {
 157   //
 158   // Figure out heap sizing
 159   //
 160 
 161   size_t init_byte_size = InitialHeapSize;
 162   size_t min_byte_size  = MinHeapSize;
 163   size_t max_byte_size  = MaxHeapSize;
 164   size_t heap_alignment = HeapAlignment;
 165 
 166   size_t reg_size_bytes = ShenandoahHeapRegion::region_size_bytes();
 167 
 168   Universe::check_alignment(max_byte_size,  reg_size_bytes, "Shenandoah heap");
 169   Universe::check_alignment(init_byte_size, reg_size_bytes, "Shenandoah heap");
 170 
 171   _num_regions = ShenandoahHeapRegion::region_count();
 172   assert(_num_regions == (max_byte_size / reg_size_bytes),
 173          "Regions should cover entire heap exactly: " SIZE_FORMAT " != " SIZE_FORMAT "/" SIZE_FORMAT,
 174          _num_regions, max_byte_size, reg_size_bytes);
 175 
 176   size_t num_committed_regions = init_byte_size / reg_size_bytes;
 177   num_committed_regions = MIN2(num_committed_regions, _num_regions);
 178   assert(num_committed_regions <= _num_regions, "sanity");
 179   _initial_size = num_committed_regions * reg_size_bytes;
 180 
 181   size_t num_min_regions = min_byte_size / reg_size_bytes;
 182   num_min_regions = MIN2(num_min_regions, _num_regions);
 183   assert(num_min_regions <= _num_regions, "sanity");
 184   _minimum_size = num_min_regions * reg_size_bytes;
 185 
 186   // Default to max heap size.
 187   _soft_max_size = _num_regions * reg_size_bytes;
 188 
 189   _committed = _initial_size;
 190 
 191   // Now we know the number of regions and heap sizes, initialize the heuristics.
 192   initialize_heuristics_generations();
 193 
 194   size_t heap_page_size   = UseLargePages ? os::large_page_size() : os::vm_page_size();
 195   size_t bitmap_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
 196   size_t region_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
 197 
 198   //
 199   // Reserve and commit memory for heap
 200   //
 201 
 202   ReservedHeapSpace heap_rs = Universe::reserve_heap(max_byte_size, heap_alignment);
 203   initialize_reserved_region(heap_rs);
 204   _heap_region = MemRegion((HeapWord*)heap_rs.base(), heap_rs.size() / HeapWordSize);
 205   _heap_region_special = heap_rs.special();
 206 
 207   assert((((size_t) base()) & ShenandoahHeapRegion::region_size_bytes_mask()) == 0,
 208          "Misaligned heap: " PTR_FORMAT, p2i(base()));
 209   os::trace_page_sizes_for_requested_size("Heap",
 210                                           max_byte_size, heap_alignment,
 211                                           heap_rs.base(),
 212                                           heap_rs.size(), heap_rs.page_size());
 213 
 214 #if SHENANDOAH_OPTIMIZED_MARKTASK
 215   // The optimized ShenandoahMarkTask takes some bits away from the full object bits.
 216   // Fail if we ever attempt to address more than we can.
 217   if ((uintptr_t)heap_rs.end() >= ShenandoahMarkTask::max_addressable()) {
 218     FormatBuffer<512> buf("Shenandoah reserved [" PTR_FORMAT ", " PTR_FORMAT") for the heap, \n"
 219                           "but max object address is " PTR_FORMAT ". Try to reduce heap size, or try other \n"
 220                           "VM options that allocate heap at lower addresses (HeapBaseMinAddress, AllocateHeapAt, etc).",
 221                 p2i(heap_rs.base()), p2i(heap_rs.end()), ShenandoahMarkTask::max_addressable());
 222     vm_exit_during_initialization("Fatal Error", buf);
 223   }
 224 #endif
 225 
 226   ReservedSpace sh_rs = heap_rs.first_part(max_byte_size);
 227   if (!_heap_region_special) {
 228     os::commit_memory_or_exit(sh_rs.base(), _initial_size, heap_alignment, false,
 229                               "Cannot commit heap memory");
 230   }
 231 
 232   BarrierSet::set_barrier_set(new ShenandoahBarrierSet(this, _heap_region));
 233 
 234   //
 235   // After reserving the Java heap, create the card table, barriers, and workers, in dependency order
 236   //
 237   if (mode()->is_generational()) {
 238     ShenandoahDirectCardMarkRememberedSet *rs;
 239     ShenandoahCardTable* card_table = ShenandoahBarrierSet::barrier_set()->card_table();
 240     size_t card_count = card_table->cards_required(heap_rs.size() / HeapWordSize);
 241     rs = new ShenandoahDirectCardMarkRememberedSet(ShenandoahBarrierSet::barrier_set()->card_table(), card_count);
 242     _card_scan = new ShenandoahScanRemembered<ShenandoahDirectCardMarkRememberedSet>(rs);
 243 
 244     // Age census structure
 245     _age_census = new ShenandoahAgeCensus();
 246   }
 247 
 248   _workers = new ShenandoahWorkerThreads("Shenandoah GC Threads", _max_workers);
 249   if (_workers == nullptr) {
 250     vm_exit_during_initialization("Failed necessary allocation.");
 251   } else {
 252     _workers->initialize_workers();
 253   }
 254 
 255   if (ParallelGCThreads > 1) {
 256     _safepoint_workers = new ShenandoahWorkerThreads("Safepoint Cleanup Thread", ParallelGCThreads);
 257     _safepoint_workers->initialize_workers();
 258   }
 259 
 260   //
 261   // Reserve and commit memory for bitmap(s)
 262   //
 263 
 264   size_t bitmap_size_orig = ShenandoahMarkBitMap::compute_size(heap_rs.size());
 265   _bitmap_size = align_up(bitmap_size_orig, bitmap_page_size);
 266 
 267   size_t bitmap_bytes_per_region = reg_size_bytes / ShenandoahMarkBitMap::heap_map_factor();
 268 
 269   guarantee(bitmap_bytes_per_region != 0,
 270             "Bitmap bytes per region should not be zero");
 271   guarantee(is_power_of_2(bitmap_bytes_per_region),
 272             "Bitmap bytes per region should be power of two: " SIZE_FORMAT, bitmap_bytes_per_region);
 273 
 274   if (bitmap_page_size > bitmap_bytes_per_region) {
 275     _bitmap_regions_per_slice = bitmap_page_size / bitmap_bytes_per_region;
 276     _bitmap_bytes_per_slice = bitmap_page_size;
 277   } else {
 278     _bitmap_regions_per_slice = 1;
 279     _bitmap_bytes_per_slice = bitmap_bytes_per_region;
 280   }
 281 
 282   guarantee(_bitmap_regions_per_slice >= 1,
 283             "Should have at least one region per slice: " SIZE_FORMAT,
 284             _bitmap_regions_per_slice);
 285 
 286   guarantee(((_bitmap_bytes_per_slice) % bitmap_page_size) == 0,
 287             "Bitmap slices should be page-granular: bps = " SIZE_FORMAT ", page size = " SIZE_FORMAT,
 288             _bitmap_bytes_per_slice, bitmap_page_size);
 289 
 290   ReservedSpace bitmap(_bitmap_size, bitmap_page_size);
 291   os::trace_page_sizes_for_requested_size("Mark Bitmap",
 292                                           bitmap_size_orig, bitmap_page_size,
 293                                           bitmap.base(),
 294                                           bitmap.size(), bitmap.page_size());
 295   MemTracker::record_virtual_memory_type(bitmap.base(), mtGC);
 296   _bitmap_region = MemRegion((HeapWord*) bitmap.base(), bitmap.size() / HeapWordSize);
 297   _bitmap_region_special = bitmap.special();
 298 
 299   size_t bitmap_init_commit = _bitmap_bytes_per_slice *
 300                               align_up(num_committed_regions, _bitmap_regions_per_slice) / _bitmap_regions_per_slice;
 301   bitmap_init_commit = MIN2(_bitmap_size, bitmap_init_commit);
 302   if (!_bitmap_region_special) {
 303     os::commit_memory_or_exit((char *) _bitmap_region.start(), bitmap_init_commit, bitmap_page_size, false,
 304                               "Cannot commit bitmap memory");
 305   }
 306 
 307   _marking_context = new ShenandoahMarkingContext(_heap_region, _bitmap_region, _num_regions);
 308 
 309   if (ShenandoahVerify) {
 310     ReservedSpace verify_bitmap(_bitmap_size, bitmap_page_size);
 311     os::trace_page_sizes_for_requested_size("Verify Bitmap",
 312                                             bitmap_size_orig, bitmap_page_size,
 313                                             verify_bitmap.base(),
 314                                             verify_bitmap.size(), verify_bitmap.page_size());
 315     if (!verify_bitmap.special()) {
 316       os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), bitmap_page_size, false,
 317                                 "Cannot commit verification bitmap memory");
 318     }
 319     MemTracker::record_virtual_memory_type(verify_bitmap.base(), mtGC);
 320     MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize);
 321     _verification_bit_map.initialize(_heap_region, verify_bitmap_region);
 322     _verifier = new ShenandoahVerifier(this, &_verification_bit_map);
 323   }
 324 
 325   // Reserve aux bitmap for use in object_iterate(). We don't commit it here.
 326   size_t aux_bitmap_page_size = bitmap_page_size;
 327 #ifdef LINUX
 328   // In THP "advise" mode, we refrain from advising the system to use large pages
 329   // since we know these commits will be short lived, and there is no reason to trash
 330   // the THP area with this bitmap.
 331   if (UseTransparentHugePages) {
 332     aux_bitmap_page_size = os::vm_page_size();
 333   }
 334 #endif
 335   ReservedSpace aux_bitmap(_bitmap_size, aux_bitmap_page_size);
 336   os::trace_page_sizes_for_requested_size("Aux Bitmap",
 337                                           bitmap_size_orig, aux_bitmap_page_size,
 338                                           aux_bitmap.base(),
 339                                           aux_bitmap.size(), aux_bitmap.page_size());
 340   MemTracker::record_virtual_memory_type(aux_bitmap.base(), mtGC);
 341   _aux_bitmap_region = MemRegion((HeapWord*) aux_bitmap.base(), aux_bitmap.size() / HeapWordSize);
 342   _aux_bitmap_region_special = aux_bitmap.special();
 343   _aux_bit_map.initialize(_heap_region, _aux_bitmap_region);
 344 
 345   //
 346   // Create regions and region sets
 347   //
 348   size_t region_align = align_up(sizeof(ShenandoahHeapRegion), SHENANDOAH_CACHE_LINE_SIZE);
 349   size_t region_storage_size_orig = region_align * _num_regions;
 350   size_t region_storage_size = align_up(region_storage_size_orig,
 351                                         MAX2(region_page_size, os::vm_allocation_granularity()));
 352 
 353   ReservedSpace region_storage(region_storage_size, region_page_size);
 354   os::trace_page_sizes_for_requested_size("Region Storage",
 355                                           region_storage_size_orig, region_page_size,
 356                                           region_storage.base(),
 357                                           region_storage.size(), region_storage.page_size());
 358   MemTracker::record_virtual_memory_type(region_storage.base(), mtGC);
 359   if (!region_storage.special()) {
 360     os::commit_memory_or_exit(region_storage.base(), region_storage_size, region_page_size, false,
 361                               "Cannot commit region memory");
 362   }
 363 
 364   // Try to fit the collection set bitmap at lower addresses. This optimizes code generation for cset checks.
 365   // Go up until a sensible limit (subject to encoding constraints) and try to reserve the space there.
 366   // If not successful, bite a bullet and allocate at whatever address.
 367   {
 368     const size_t cset_align = MAX2<size_t>(os::vm_page_size(), os::vm_allocation_granularity());
 369     const size_t cset_size = align_up(((size_t) sh_rs.base() + sh_rs.size()) >> ShenandoahHeapRegion::region_size_bytes_shift(), cset_align);
 370     const size_t cset_page_size = os::vm_page_size();
 371 
 372     uintptr_t min = round_up_power_of_2(cset_align);
 373     uintptr_t max = (1u << 30u);
 374     ReservedSpace cset_rs;
 375 
 376     for (uintptr_t addr = min; addr <= max; addr <<= 1u) {
 377       char* req_addr = (char*)addr;
 378       assert(is_aligned(req_addr, cset_align), "Should be aligned");
 379       cset_rs = ReservedSpace(cset_size, cset_align, cset_page_size, req_addr);
 380       if (cset_rs.is_reserved()) {
 381         assert(cset_rs.base() == req_addr, "Allocated where requested: " PTR_FORMAT ", " PTR_FORMAT, p2i(cset_rs.base()), addr);
 382         _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
 383         break;
 384       }
 385     }
 386 
 387     if (_collection_set == nullptr) {
 388       cset_rs = ReservedSpace(cset_size, cset_align, os::vm_page_size());
 389       _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
 390     }
 391     os::trace_page_sizes_for_requested_size("Collection Set",
 392                                             cset_size, cset_page_size,
 393                                             cset_rs.base(),
 394                                             cset_rs.size(), cset_rs.page_size());
 395   }
 396 
 397   _regions = NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, _num_regions, mtGC);
 398   _affiliations = NEW_C_HEAP_ARRAY(uint8_t, _num_regions, mtGC);
 399   _free_set = new ShenandoahFreeSet(this, _num_regions);
 400 
 401   {
 402     ShenandoahHeapLocker locker(lock());
 403 
 404 
 405     for (size_t i = 0; i < _num_regions; i++) {
 406       HeapWord* start = (HeapWord*)sh_rs.base() + ShenandoahHeapRegion::region_size_words() * i;
 407       bool is_committed = i < num_committed_regions;
 408       void* loc = region_storage.base() + i * region_align;
 409 
 410       ShenandoahHeapRegion* r = new (loc) ShenandoahHeapRegion(start, i, is_committed);
 411       assert(is_aligned(r, SHENANDOAH_CACHE_LINE_SIZE), "Sanity");
 412 
 413       _marking_context->initialize_top_at_mark_start(r);
 414       _regions[i] = r;
 415       assert(!collection_set()->is_in(i), "New region should not be in collection set");
 416 
 417       _affiliations[i] = ShenandoahAffiliation::FREE;
 418     }
 419 
 420     // Initialize to complete
 421     _marking_context->mark_complete();
 422     size_t young_cset_regions, old_cset_regions;
 423 
 424     // We are initializing free set.  We ignore cset region tallies.
 425     _free_set->prepare_to_rebuild(young_cset_regions, old_cset_regions);
 426     _free_set->rebuild(young_cset_regions, old_cset_regions);
 427   }
 428 
 429   if (AlwaysPreTouch) {
 430     // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads,
 431     // before initialize() below zeroes it with initializing thread. For any given region,
 432     // we touch the region and the corresponding bitmaps from the same thread.
 433     ShenandoahPushWorkerScope scope(workers(), _max_workers, false);
 434 
 435     _pretouch_heap_page_size = heap_page_size;
 436     _pretouch_bitmap_page_size = bitmap_page_size;
 437 
 438 #ifdef LINUX
 439     // UseTransparentHugePages would madvise that backing memory can be coalesced into huge
 440     // pages. But, the kernel needs to know that every small page is used, in order to coalesce
 441     // them into huge one. Therefore, we need to pretouch with smaller pages.
 442     if (UseTransparentHugePages) {
 443       _pretouch_heap_page_size = (size_t)os::vm_page_size();
 444       _pretouch_bitmap_page_size = (size_t)os::vm_page_size();
 445     }
 446 #endif
 447 
 448     // OS memory managers may want to coalesce back-to-back pages. Make their jobs
 449     // simpler by pre-touching continuous spaces (heap and bitmap) separately.
 450 
 451     ShenandoahPretouchBitmapTask bcl(bitmap.base(), _bitmap_size, _pretouch_bitmap_page_size);
 452     _workers->run_task(&bcl);
 453 
 454     ShenandoahPretouchHeapTask hcl(_pretouch_heap_page_size);
 455     _workers->run_task(&hcl);
 456   }
 457 
 458   //
 459   // Initialize the rest of GC subsystems
 460   //
 461 
 462   _liveness_cache = NEW_C_HEAP_ARRAY(ShenandoahLiveData*, _max_workers, mtGC);
 463   for (uint worker = 0; worker < _max_workers; worker++) {
 464     _liveness_cache[worker] = NEW_C_HEAP_ARRAY(ShenandoahLiveData, _num_regions, mtGC);
 465     Copy::fill_to_bytes(_liveness_cache[worker], _num_regions * sizeof(ShenandoahLiveData));
 466   }
 467 
 468   // There should probably be Shenandoah-specific options for these,
 469   // just as there are G1-specific options.
 470   {
 471     ShenandoahSATBMarkQueueSet& satbqs = ShenandoahBarrierSet::satb_mark_queue_set();
 472     satbqs.set_process_completed_buffers_threshold(20); // G1SATBProcessCompletedThreshold
 473     satbqs.set_buffer_enqueue_threshold_percentage(60); // G1SATBBufferEnqueueingThresholdPercent
 474   }
 475 
 476   _monitoring_support = new ShenandoahMonitoringSupport(this);
 477   _phase_timings = new ShenandoahPhaseTimings(max_workers());
 478   ShenandoahCodeRoots::initialize();
 479 
 480   if (ShenandoahPacing) {
 481     _pacer = new ShenandoahPacer(this);
 482     _pacer->setup_for_idle();
 483   } else {
 484     _pacer = nullptr;
 485   }
 486 
 487   _control_thread = new ShenandoahControlThread();
 488   _regulator_thread = new ShenandoahRegulatorThread(_control_thread);
 489 
 490   print_init_logger();
 491 
 492   return JNI_OK;
 493 }
 494 
 495 void ShenandoahHeap::print_init_logger() const {
 496   ShenandoahInitLogger::print();
 497 }
 498 
 499 size_t ShenandoahHeap::max_size_for(ShenandoahGeneration* generation) const {
 500   switch (generation->type()) {
 501     case YOUNG:
 502       return _generation_sizer.max_young_size();
 503     case OLD:
 504       return max_capacity() - _generation_sizer.min_young_size();
 505     case GLOBAL_GEN:
 506     case GLOBAL_NON_GEN:
 507       return max_capacity();
 508     default:
 509       ShouldNotReachHere();
 510       return 0;
 511   }
 512 }
 513 
 514 size_t ShenandoahHeap::min_size_for(ShenandoahGeneration* generation) const {
 515   switch (generation->type()) {
 516     case YOUNG:
 517       return _generation_sizer.min_young_size();
 518     case OLD:
 519       return max_capacity() - _generation_sizer.max_young_size();
 520     case GLOBAL_GEN:
 521     case GLOBAL_NON_GEN:
 522       return min_capacity();
 523     default:
 524       ShouldNotReachHere();
 525       return 0;
 526   }
 527 }
 528 
 529 void ShenandoahHeap::initialize_heuristics_generations() {
 530   if (ShenandoahGCMode != nullptr) {
 531     if (strcmp(ShenandoahGCMode, "satb") == 0) {
 532       _gc_mode = new ShenandoahSATBMode();
 533     } else if (strcmp(ShenandoahGCMode, "iu") == 0) {
 534       _gc_mode = new ShenandoahIUMode();
 535     } else if (strcmp(ShenandoahGCMode, "passive") == 0) {
 536       _gc_mode = new ShenandoahPassiveMode();
 537     } else if (strcmp(ShenandoahGCMode, "generational") == 0) {
 538       _gc_mode = new ShenandoahGenerationalMode();
 539     } else {
 540       vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option");
 541     }
 542   } else {
 543     vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option (null)");
 544   }
 545   _gc_mode->initialize_flags();
 546   if (_gc_mode->is_diagnostic() && !UnlockDiagnosticVMOptions) {
 547     vm_exit_during_initialization(
 548             err_msg("GC mode \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
 549                     _gc_mode->name()));
 550   }
 551   if (_gc_mode->is_experimental() && !UnlockExperimentalVMOptions) {
 552     vm_exit_during_initialization(
 553             err_msg("GC mode \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
 554                     _gc_mode->name()));
 555   }
 556 
 557   // Max capacity is the maximum _allowed_ capacity. That is, the maximum allowed capacity
 558   // for old would be total heap - minimum capacity of young. This means the sum of the maximum
 559   // allowed for old and young could exceed the total heap size. It remains the case that the
 560   // _actual_ capacity of young + old = total.
 561   _generation_sizer.heap_size_changed(max_capacity());
 562   size_t initial_capacity_young = _generation_sizer.max_young_size();
 563   size_t max_capacity_young = _generation_sizer.max_young_size();
 564   size_t initial_capacity_old = max_capacity() - max_capacity_young;
 565   size_t max_capacity_old = max_capacity() - initial_capacity_young;
 566 
 567   _young_generation = new ShenandoahYoungGeneration(_max_workers, max_capacity_young, initial_capacity_young);
 568   _old_generation = new ShenandoahOldGeneration(_max_workers, max_capacity_old, initial_capacity_old);
 569   _global_generation = new ShenandoahGlobalGeneration(_gc_mode->is_generational(), _max_workers, max_capacity(), max_capacity());
 570   _global_generation->initialize_heuristics(_gc_mode);
 571   if (mode()->is_generational()) {
 572     _young_generation->initialize_heuristics(_gc_mode);
 573     _old_generation->initialize_heuristics(_gc_mode);
 574   }
 575   _evac_tracker = new ShenandoahEvacuationTracker(mode()->is_generational());
 576 }
 577 
 578 #ifdef _MSC_VER
 579 #pragma warning( push )
 580 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
 581 #endif
 582 
 583 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
 584   CollectedHeap(),
 585   _gc_generation(nullptr),
 586   _prepare_for_old_mark(false),
 587   _initial_size(0),
 588   _promotion_potential(0),
 589   _committed(0),
 590   _max_workers(MAX3(ConcGCThreads, ParallelGCThreads, 1U)),
 591   _workers(nullptr),
 592   _safepoint_workers(nullptr),
 593   _heap_region_special(false),
 594   _num_regions(0),
 595   _regions(nullptr),
 596   _affiliations(nullptr),
 597   _update_refs_iterator(this),
 598   _promoted_reserve(0),
 599   _old_evac_reserve(0),
 600   _old_evac_expended(0),
 601   _young_evac_reserve(0),
 602   _captured_old_usage(0),
 603   _previous_promotion(0),
 604   _upgraded_to_full(false),
 605   _age_census(nullptr),
 606   _has_evacuation_reserve_quantities(false),
 607   _cancel_requested_time(0),
 608   _young_generation(nullptr),
 609   _global_generation(nullptr),
 610   _old_generation(nullptr),
 611   _control_thread(nullptr),
 612   _regulator_thread(nullptr),
 613   _shenandoah_policy(policy),
 614   _free_set(nullptr),
 615   _pacer(nullptr),
 616   _verifier(nullptr),
 617   _phase_timings(nullptr),
 618   _evac_tracker(nullptr),
 619   _mmu_tracker(),
 620   _generation_sizer(&_mmu_tracker),
 621   _monitoring_support(nullptr),
 622   _memory_pool(nullptr),
 623   _young_gen_memory_pool(nullptr),
 624   _old_gen_memory_pool(nullptr),
 625   _stw_memory_manager("Shenandoah Pauses"),
 626   _cycle_memory_manager("Shenandoah Cycles"),
 627   _gc_timer(new ConcurrentGCTimer()),
 628   _soft_ref_policy(),
 629   _log_min_obj_alignment_in_bytes(LogMinObjAlignmentInBytes),
 630   _old_regions_surplus(0),
 631   _old_regions_deficit(0),
 632   _marking_context(nullptr),
 633   _bitmap_size(0),
 634   _bitmap_regions_per_slice(0),
 635   _bitmap_bytes_per_slice(0),
 636   _bitmap_region_special(false),
 637   _aux_bitmap_region_special(false),
 638   _liveness_cache(nullptr),
 639   _collection_set(nullptr),
 640   _card_scan(nullptr)
 641 {
 642 }
 643 
 644 #ifdef _MSC_VER
 645 #pragma warning( pop )
 646 #endif
 647 
 648 void ShenandoahHeap::print_on(outputStream* st) const {
 649   st->print_cr("Shenandoah Heap");
 650   st->print_cr(" " SIZE_FORMAT "%s max, " SIZE_FORMAT "%s soft max, " SIZE_FORMAT "%s committed, " SIZE_FORMAT "%s used",
 651                byte_size_in_proper_unit(max_capacity()), proper_unit_for_byte_size(max_capacity()),
 652                byte_size_in_proper_unit(soft_max_capacity()), proper_unit_for_byte_size(soft_max_capacity()),
 653                byte_size_in_proper_unit(committed()),    proper_unit_for_byte_size(committed()),
 654                byte_size_in_proper_unit(used()),         proper_unit_for_byte_size(used()));
 655   st->print_cr(" " SIZE_FORMAT " x " SIZE_FORMAT"%s regions",
 656                num_regions(),
 657                byte_size_in_proper_unit(ShenandoahHeapRegion::region_size_bytes()),
 658                proper_unit_for_byte_size(ShenandoahHeapRegion::region_size_bytes()));
 659 
 660   st->print("Status: ");
 661   if (has_forwarded_objects())                 st->print("has forwarded objects, ");
 662   if (is_concurrent_old_mark_in_progress())    st->print("old marking, ");
 663   if (is_concurrent_young_mark_in_progress())  st->print("young marking, ");
 664   if (is_evacuation_in_progress())             st->print("evacuating, ");
 665   if (is_update_refs_in_progress())            st->print("updating refs, ");
 666   if (is_degenerated_gc_in_progress())         st->print("degenerated gc, ");
 667   if (is_full_gc_in_progress())                st->print("full gc, ");
 668   if (is_full_gc_move_in_progress())           st->print("full gc move, ");
 669   if (is_concurrent_weak_root_in_progress())   st->print("concurrent weak roots, ");
 670   if (is_concurrent_strong_root_in_progress() &&
 671       !is_concurrent_weak_root_in_progress())  st->print("concurrent strong roots, ");
 672 
 673   if (cancelled_gc()) {
 674     st->print("cancelled");
 675   } else {
 676     st->print("not cancelled");
 677   }
 678   st->cr();
 679 
 680   st->print_cr("Reserved region:");
 681   st->print_cr(" - [" PTR_FORMAT ", " PTR_FORMAT ") ",
 682                p2i(reserved_region().start()),
 683                p2i(reserved_region().end()));
 684 
 685   ShenandoahCollectionSet* cset = collection_set();
 686   st->print_cr("Collection set:");
 687   if (cset != nullptr) {
 688     st->print_cr(" - map (vanilla): " PTR_FORMAT, p2i(cset->map_address()));
 689     st->print_cr(" - map (biased):  " PTR_FORMAT, p2i(cset->biased_map_address()));
 690   } else {
 691     st->print_cr(" (null)");
 692   }
 693 
 694   st->cr();
 695   MetaspaceUtils::print_on(st);
 696 
 697   if (Verbose) {
 698     st->cr();
 699     print_heap_regions_on(st);
 700   }
 701 }
 702 
 703 class ShenandoahInitWorkerGCLABClosure : public ThreadClosure {
 704 public:
 705   void do_thread(Thread* thread) {
 706     assert(thread != nullptr, "Sanity");
 707     assert(thread->is_Worker_thread(), "Only worker thread expected");
 708     ShenandoahThreadLocalData::initialize_gclab(thread);
 709   }
 710 };
 711 
 712 void ShenandoahHeap::post_initialize() {
 713   CollectedHeap::post_initialize();
 714   _mmu_tracker.initialize();
 715 
 716   MutexLocker ml(Threads_lock);
 717 
 718   ShenandoahInitWorkerGCLABClosure init_gclabs;
 719   _workers->threads_do(&init_gclabs);
 720 
 721   // gclab can not be initialized early during VM startup, as it can not determinate its max_size.
 722   // Now, we will let WorkerThreads to initialize gclab when new worker is created.
 723   _workers->set_initialize_gclab();
 724   if (_safepoint_workers != nullptr) {
 725     _safepoint_workers->threads_do(&init_gclabs);
 726     _safepoint_workers->set_initialize_gclab();
 727   }
 728 
 729   JFR_ONLY(ShenandoahJFRSupport::register_jfr_type_serializers());
 730 }
 731 
 732 ShenandoahHeuristics* ShenandoahHeap::heuristics() {
 733   return _global_generation->heuristics();
 734 }
 735 
 736 ShenandoahOldHeuristics* ShenandoahHeap::old_heuristics() {
 737   return (ShenandoahOldHeuristics*) _old_generation->heuristics();
 738 }
 739 
 740 ShenandoahYoungHeuristics* ShenandoahHeap::young_heuristics() {
 741   return (ShenandoahYoungHeuristics*) _young_generation->heuristics();
 742 }
 743 
 744 bool ShenandoahHeap::doing_mixed_evacuations() {
 745   return _old_generation->state() == ShenandoahOldGeneration::WAITING_FOR_EVAC;
 746 }
 747 
 748 bool ShenandoahHeap::is_old_bitmap_stable() const {
 749   return _old_generation->is_mark_complete();
 750 }
 751 
 752 bool ShenandoahHeap::is_gc_generation_young() const {
 753   return _gc_generation != nullptr && _gc_generation->is_young();
 754 }
 755 
 756 size_t ShenandoahHeap::used() const {
 757   return global_generation()->used();
 758 }
 759 
 760 size_t ShenandoahHeap::committed() const {
 761   return Atomic::load(&_committed);
 762 }
 763 
 764 void ShenandoahHeap::increase_committed(size_t bytes) {
 765   shenandoah_assert_heaplocked_or_safepoint();
 766   _committed += bytes;
 767 }
 768 
 769 void ShenandoahHeap::decrease_committed(size_t bytes) {
 770   shenandoah_assert_heaplocked_or_safepoint();
 771   _committed -= bytes;
 772 }
 773 
 774 // For tracking usage based on allocations, it should be the case that:
 775 // * The sum of regions::used == heap::used
 776 // * The sum of a generation's regions::used == generation::used
 777 // * The sum of a generation's humongous regions::free == generation::humongous_waste
 778 // These invariants are checked by the verifier on GC safepoints.
 779 //
 780 // Additional notes:
 781 // * When a mutator's allocation request causes a region to be retired, the
 782 //   free memory left in that region is considered waste. It does not contribute
 783 //   to the usage, but it _does_ contribute to allocation rate.
 784 // * The bottom of a PLAB must be aligned on card size. In some cases this will
 785 //   require padding in front of the PLAB (a filler object). Because this padding
 786 //   is included in the region's used memory we include the padding in the usage
 787 //   accounting as waste.
 788 // * Mutator allocations are used to compute an allocation rate. They are also
 789 //   sent to the Pacer for those purposes.
 790 // * There are three sources of waste:
 791 //  1. The padding used to align a PLAB on card size
 792 //  2. Region's free is less than minimum TLAB size and is retired
 793 //  3. The unused portion of memory in the last region of a humongous object
 794 void ShenandoahHeap::increase_used(const ShenandoahAllocRequest& req) {
 795   size_t actual_bytes = req.actual_size() * HeapWordSize;
 796   size_t wasted_bytes = req.waste() * HeapWordSize;
 797   ShenandoahGeneration* generation = generation_for(req.affiliation());
 798 
 799   if (req.is_gc_alloc()) {
 800     assert(wasted_bytes == 0 || req.type() == ShenandoahAllocRequest::_alloc_plab, "Only PLABs have waste");
 801     increase_used(generation, actual_bytes + wasted_bytes);
 802   } else {
 803     assert(req.is_mutator_alloc(), "Expected mutator alloc here");
 804     // padding and actual size both count towards allocation counter
 805     generation->increase_allocated(actual_bytes + wasted_bytes);
 806 
 807     // only actual size counts toward usage for mutator allocations
 808     increase_used(generation, actual_bytes);
 809 
 810     // notify pacer of both actual size and waste
 811     notify_mutator_alloc_words(req.actual_size(), req.waste());
 812 
 813     if (wasted_bytes > 0 && req.actual_size() > ShenandoahHeapRegion::humongous_threshold_words()) {
 814       increase_humongous_waste(generation,wasted_bytes);
 815     }
 816   }
 817 }
 818 
 819 void ShenandoahHeap::increase_humongous_waste(ShenandoahGeneration* generation, size_t bytes) {
 820   generation->increase_humongous_waste(bytes);
 821   if (!generation->is_global()) {
 822     global_generation()->increase_humongous_waste(bytes);
 823   }
 824 }
 825 
 826 void ShenandoahHeap::decrease_humongous_waste(ShenandoahGeneration* generation, size_t bytes) {
 827   generation->decrease_humongous_waste(bytes);
 828   if (!generation->is_global()) {
 829     global_generation()->decrease_humongous_waste(bytes);
 830   }
 831 }
 832 
 833 void ShenandoahHeap::increase_used(ShenandoahGeneration* generation, size_t bytes) {
 834   generation->increase_used(bytes);
 835   if (!generation->is_global()) {
 836     global_generation()->increase_used(bytes);
 837   }
 838 }
 839 
 840 void ShenandoahHeap::decrease_used(ShenandoahGeneration* generation, size_t bytes) {
 841   generation->decrease_used(bytes);
 842   if (!generation->is_global()) {
 843     global_generation()->decrease_used(bytes);
 844   }
 845 }
 846 
 847 void ShenandoahHeap::notify_mutator_alloc_words(size_t words, size_t waste) {
 848   if (ShenandoahPacing) {
 849     control_thread()->pacing_notify_alloc(words);
 850     if (waste > 0) {
 851       pacer()->claim_for_alloc(waste, true);
 852     }
 853   }
 854 }
 855 
 856 size_t ShenandoahHeap::capacity() const {
 857   return committed();
 858 }
 859 
 860 size_t ShenandoahHeap::max_capacity() const {
 861   return _num_regions * ShenandoahHeapRegion::region_size_bytes();
 862 }
 863 
 864 size_t ShenandoahHeap::soft_max_capacity() const {
 865   size_t v = Atomic::load(&_soft_max_size);
 866   assert(min_capacity() <= v && v <= max_capacity(),
 867          "Should be in bounds: " SIZE_FORMAT " <= " SIZE_FORMAT " <= " SIZE_FORMAT,
 868          min_capacity(), v, max_capacity());
 869   return v;
 870 }
 871 
 872 void ShenandoahHeap::set_soft_max_capacity(size_t v) {
 873   assert(min_capacity() <= v && v <= max_capacity(),
 874          "Should be in bounds: " SIZE_FORMAT " <= " SIZE_FORMAT " <= " SIZE_FORMAT,
 875          min_capacity(), v, max_capacity());
 876   Atomic::store(&_soft_max_size, v);
 877 }
 878 
 879 size_t ShenandoahHeap::min_capacity() const {
 880   return _minimum_size;
 881 }
 882 
 883 size_t ShenandoahHeap::initial_capacity() const {
 884   return _initial_size;
 885 }
 886 
 887 void ShenandoahHeap::op_uncommit(double shrink_before, size_t shrink_until) {
 888   assert (ShenandoahUncommit, "should be enabled");
 889 
 890   // Application allocates from the beginning of the heap, and GC allocates at
 891   // the end of it. It is more efficient to uncommit from the end, so that applications
 892   // could enjoy the near committed regions. GC allocations are much less frequent,
 893   // and therefore can accept the committing costs.
 894 
 895   size_t count = 0;
 896   for (size_t i = num_regions(); i > 0; i--) { // care about size_t underflow
 897     ShenandoahHeapRegion* r = get_region(i - 1);
 898     if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
 899       ShenandoahHeapLocker locker(lock());
 900       if (r->is_empty_committed()) {
 901         if (committed() < shrink_until + ShenandoahHeapRegion::region_size_bytes()) {
 902           break;
 903         }
 904 
 905         r->make_uncommitted();
 906         count++;
 907       }
 908     }
 909     SpinPause(); // allow allocators to take the lock
 910   }
 911 
 912   if (count > 0) {
 913     control_thread()->notify_heap_changed();
 914     regulator_thread()->notify_heap_changed();
 915   }
 916 }
 917 
 918 void ShenandoahHeap::handle_old_evacuation(HeapWord* obj, size_t words, bool promotion) {
 919   // Only register the copy of the object that won the evacuation race.
 920   card_scan()->register_object_without_lock(obj);
 921 
 922   // Mark the entire range of the evacuated object as dirty.  At next remembered set scan,
 923   // we will clear dirty bits that do not hold interesting pointers.  It's more efficient to
 924   // do this in batch, in a background GC thread than to try to carefully dirty only cards
 925   // that hold interesting pointers right now.
 926   card_scan()->mark_range_as_dirty(obj, words);
 927 
 928   if (promotion) {
 929     // This evacuation was a promotion, track this as allocation against old gen
 930     old_generation()->increase_allocated(words * HeapWordSize);
 931   }
 932 }
 933 
 934 void ShenandoahHeap::handle_old_evacuation_failure() {
 935   if (_old_gen_oom_evac.try_set()) {
 936     log_info(gc)("Old gen evac failure.");
 937   }
 938 }
 939 
 940 void ShenandoahHeap::report_promotion_failure(Thread* thread, size_t size) {
 941   // We squelch excessive reports to reduce noise in logs.
 942   const size_t MaxReportsPerEpoch = 4;
 943   static size_t last_report_epoch = 0;
 944   static size_t epoch_report_count = 0;
 945 
 946   size_t promotion_reserve;
 947   size_t promotion_expended;
 948 
 949   size_t gc_id = control_thread()->get_gc_id();
 950 
 951   if ((gc_id != last_report_epoch) || (epoch_report_count++ < MaxReportsPerEpoch)) {
 952     {
 953       // Promotion failures should be very rare.  Invest in providing useful diagnostic info.
 954       ShenandoahHeapLocker locker(lock());
 955       promotion_reserve = get_promoted_reserve();
 956       promotion_expended = get_promoted_expended();
 957     }
 958     PLAB* plab = ShenandoahThreadLocalData::plab(thread);
 959     size_t words_remaining = (plab == nullptr)? 0: plab->words_remaining();
 960     const char* promote_enabled = ShenandoahThreadLocalData::allow_plab_promotions(thread)? "enabled": "disabled";
 961     ShenandoahGeneration* old_gen = old_generation();
 962     size_t old_capacity = old_gen->max_capacity();
 963     size_t old_usage = old_gen->used();
 964     size_t old_free_regions = old_gen->free_unaffiliated_regions();
 965 
 966     log_info(gc, ergo)("Promotion failed, size " SIZE_FORMAT ", has plab? %s, PLAB remaining: " SIZE_FORMAT
 967                        ", plab promotions %s, promotion reserve: " SIZE_FORMAT ", promotion expended: " SIZE_FORMAT
 968                        ", old capacity: " SIZE_FORMAT ", old_used: " SIZE_FORMAT ", old unaffiliated regions: " SIZE_FORMAT,
 969                        size * HeapWordSize, plab == nullptr? "no": "yes",
 970                        words_remaining * HeapWordSize, promote_enabled, promotion_reserve, promotion_expended,
 971                        old_capacity, old_usage, old_free_regions);
 972 
 973     if ((gc_id == last_report_epoch) && (epoch_report_count >= MaxReportsPerEpoch)) {
 974       log_info(gc, ergo)("Squelching additional promotion failure reports for current epoch");
 975     } else if (gc_id != last_report_epoch) {
 976       last_report_epoch = gc_id;;
 977       epoch_report_count = 1;
 978     }
 979   }
 980 }
 981 
 982 HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) {
 983   // New object should fit the GCLAB size
 984   size_t min_size = MAX2(size, PLAB::min_size());
 985 
 986   // Figure out size of new GCLAB, looking back at heuristics. Expand aggressively.
 987   size_t new_size = ShenandoahThreadLocalData::gclab_size(thread) * 2;
 988 
 989   // Limit growth of GCLABs to ShenandoahMaxEvacLABRatio * the minimum size.  This enables more equitable distribution of
 990   // available evacuation buidget between the many threads that are coordinating in the evacuation effort.
 991   if (ShenandoahMaxEvacLABRatio > 0) {
 992     log_debug(gc, free)("Allocate new gclab: " SIZE_FORMAT ", " SIZE_FORMAT, new_size, PLAB::min_size() * ShenandoahMaxEvacLABRatio);
 993     new_size = MIN2(new_size, PLAB::min_size() * ShenandoahMaxEvacLABRatio);
 994   }
 995 
 996   new_size = MIN2(new_size, PLAB::max_size());
 997   new_size = MAX2(new_size, PLAB::min_size());
 998 
 999   // Record new heuristic value even if we take any shortcut. This captures
1000   // the case when moderately-sized objects always take a shortcut. At some point,
1001   // heuristics should catch up with them.
1002   ShenandoahThreadLocalData::set_gclab_size(thread, new_size);
1003 
1004   if (new_size < size) {
1005     // New size still does not fit the object. Fall back to shared allocation.
1006     // This avoids retiring perfectly good GCLABs, when we encounter a large object.
1007     log_debug(gc, free)("New gclab size (" SIZE_FORMAT ") is too small for " SIZE_FORMAT, new_size, size);
1008     return nullptr;
1009   }
1010 
1011   // Retire current GCLAB, and allocate a new one.
1012   PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1013   gclab->retire();
1014 
1015   size_t actual_size = 0;
1016   HeapWord* gclab_buf = allocate_new_gclab(min_size, new_size, &actual_size);
1017   if (gclab_buf == nullptr) {
1018     return nullptr;
1019   }
1020 
1021   assert (size <= actual_size, "allocation should fit");
1022 
1023   if (ZeroTLAB) {
1024     // ..and clear it.
1025     Copy::zero_to_words(gclab_buf, actual_size);
1026   } else {
1027     // ...and zap just allocated object.
1028 #ifdef ASSERT
1029     // Skip mangling the space corresponding to the object header to
1030     // ensure that the returned space is not considered parsable by
1031     // any concurrent GC thread.
1032     size_t hdr_size = oopDesc::header_size();
1033     Copy::fill_to_words(gclab_buf + hdr_size, actual_size - hdr_size, badHeapWordVal);
1034 #endif // ASSERT
1035   }
1036   gclab->set_buf(gclab_buf, actual_size);
1037   return gclab->allocate(size);
1038 }
1039 
1040 // Establish a new PLAB and allocate size HeapWords within it.
1041 HeapWord* ShenandoahHeap::allocate_from_plab_slow(Thread* thread, size_t size, bool is_promotion) {
1042   // New object should fit the PLAB size
1043   size_t min_size = MAX2(size, PLAB::min_size());
1044 
1045   // Figure out size of new PLAB, looking back at heuristics. Expand aggressively.
1046   size_t cur_size = ShenandoahThreadLocalData::plab_size(thread);
1047   if (cur_size == 0) {
1048     cur_size = PLAB::min_size();
1049   }
1050   size_t future_size = cur_size * 2;
1051   // Limit growth of PLABs to ShenandoahMaxEvacLABRatio * the minimum size.  This enables more equitable distribution of
1052   // available evacuation buidget between the many threads that are coordinating in the evacuation effort.
1053   if (ShenandoahMaxEvacLABRatio > 0) {
1054     future_size = MIN2(future_size, PLAB::min_size() * ShenandoahMaxEvacLABRatio);
1055   }
1056   future_size = MIN2(future_size, PLAB::max_size());
1057   future_size = MAX2(future_size, PLAB::min_size());
1058 
1059   size_t unalignment = future_size % CardTable::card_size_in_words();
1060   if (unalignment != 0) {
1061     future_size = future_size - unalignment + CardTable::card_size_in_words();
1062   }
1063 
1064   // Record new heuristic value even if we take any shortcut. This captures
1065   // the case when moderately-sized objects always take a shortcut. At some point,
1066   // heuristics should catch up with them.  Note that the requested cur_size may
1067   // not be honored, but we remember that this is the preferred size.
1068   ShenandoahThreadLocalData::set_plab_size(thread, future_size);
1069   if (cur_size < size) {
1070     // The PLAB to be allocated is still not large enough to hold the object. Fall back to shared allocation.
1071     // This avoids retiring perfectly good PLABs in order to represent a single large object allocation.
1072     return nullptr;
1073   }
1074 
1075   // Retire current PLAB, and allocate a new one.
1076   PLAB* plab = ShenandoahThreadLocalData::plab(thread);
1077   if (plab->words_remaining() < PLAB::min_size()) {
1078     // Retire current PLAB, and allocate a new one.
1079     // CAUTION: retire_plab may register the remnant filler object with the remembered set scanner without a lock.  This
1080     // is safe iff it is assured that each PLAB is a whole-number multiple of card-mark memory size and each PLAB is
1081     // aligned with the start of a card's memory range.
1082     retire_plab(plab, thread);
1083 
1084     size_t actual_size = 0;
1085     // allocate_new_plab resets plab_evacuated and plab_promoted and disables promotions if old-gen available is
1086     // less than the remaining evacuation need.  It also adjusts plab_preallocated and expend_promoted if appropriate.
1087     HeapWord* plab_buf = allocate_new_plab(min_size, cur_size, &actual_size);
1088     if (plab_buf == nullptr) {
1089       if (min_size == PLAB::min_size()) {
1090         // Disable plab promotions for this thread because we cannot even allocate a plab of minimal size.  This allows us
1091         // to fail faster on subsequent promotion attempts.
1092         ShenandoahThreadLocalData::disable_plab_promotions(thread);
1093       }
1094       return NULL;
1095     } else {
1096       ShenandoahThreadLocalData::enable_plab_retries(thread);
1097     }
1098     assert (size <= actual_size, "allocation should fit");
1099     if (ZeroTLAB) {
1100       // ..and clear it.
1101       Copy::zero_to_words(plab_buf, actual_size);
1102     } else {
1103       // ...and zap just allocated object.
1104 #ifdef ASSERT
1105       // Skip mangling the space corresponding to the object header to
1106       // ensure that the returned space is not considered parsable by
1107       // any concurrent GC thread.
1108       size_t hdr_size = oopDesc::header_size();
1109       Copy::fill_to_words(plab_buf + hdr_size, actual_size - hdr_size, badHeapWordVal);
1110 #endif // ASSERT
1111     }
1112     plab->set_buf(plab_buf, actual_size);
1113     if (is_promotion && !ShenandoahThreadLocalData::allow_plab_promotions(thread)) {
1114       return nullptr;
1115     }
1116     return plab->allocate(size);
1117   } else {
1118     // If there's still at least min_size() words available within the current plab, don't retire it.  Let's gnaw
1119     // away on this plab as long as we can.  Meanwhile, return nullptr to force this particular allocation request
1120     // to be satisfied with a shared allocation.  By packing more promotions into the previously allocated PLAB, we
1121     // reduce the likelihood of evacuation failures, and we we reduce the need for downsizing our PLABs.
1122     return nullptr;
1123   }
1124 }
1125 
1126 // TODO: It is probably most efficient to register all objects (both promotions and evacuations) that were allocated within
1127 // this plab at the time we retire the plab.  A tight registration loop will run within both code and data caches.  This change
1128 // would allow smaller and faster in-line implementation of alloc_from_plab().  Since plabs are aligned on card-table boundaries,
1129 // this object registration loop can be performed without acquiring a lock.
1130 void ShenandoahHeap::retire_plab(PLAB* plab, Thread* thread) {
1131   // We don't enforce limits on plab_evacuated.  We let it consume all available old-gen memory in order to reduce
1132   // probability of an evacuation failure.  We do enforce limits on promotion, to make sure that excessive promotion
1133   // does not result in an old-gen evacuation failure.  Note that a failed promotion is relatively harmless.  Any
1134   // object that fails to promote in the current cycle will be eligible for promotion in a subsequent cycle.
1135 
1136   // When the plab was instantiated, its entirety was treated as if the entire buffer was going to be dedicated to
1137   // promotions.  Now that we are retiring the buffer, we adjust for the reality that the plab is not entirely promotions.
1138   //  1. Some of the plab may have been dedicated to evacuations.
1139   //  2. Some of the plab may have been abandoned due to waste (at the end of the plab).
1140   size_t not_promoted =
1141     ShenandoahThreadLocalData::get_plab_preallocated_promoted(thread) - ShenandoahThreadLocalData::get_plab_promoted(thread);
1142   ShenandoahThreadLocalData::reset_plab_promoted(thread);
1143   ShenandoahThreadLocalData::reset_plab_evacuated(thread);
1144   ShenandoahThreadLocalData::set_plab_preallocated_promoted(thread, 0);
1145   if (not_promoted > 0) {
1146     unexpend_promoted(not_promoted);
1147   }
1148   size_t waste = plab->waste();
1149   HeapWord* top = plab->top();
1150   plab->retire();
1151   if (top != nullptr && plab->waste() > waste && is_in_old(top)) {
1152     // If retiring the plab created a filler object, then we
1153     // need to register it with our card scanner so it can
1154     // safely walk the region backing the plab.
1155     log_debug(gc)("retire_plab() is registering remnant of size " SIZE_FORMAT " at " PTR_FORMAT,
1156                   plab->waste() - waste, p2i(top));
1157     card_scan()->register_object_without_lock(top);
1158   }
1159 }
1160 
1161 void ShenandoahHeap::retire_plab(PLAB* plab) {
1162   Thread* thread = Thread::current();
1163   retire_plab(plab, thread);
1164 }
1165 
1166 void ShenandoahHeap::cancel_old_gc() {
1167   shenandoah_assert_safepoint();
1168   assert(_old_generation != nullptr, "Should only have mixed collections in generation mode.");
1169   log_info(gc)("Terminating old gc cycle.");
1170 
1171   // Stop marking
1172   old_generation()->cancel_marking();
1173   // Stop coalescing undead objects
1174   set_prepare_for_old_mark_in_progress(false);
1175   // Stop tracking old regions
1176   old_heuristics()->abandon_collection_candidates();
1177   // Remove old generation access to young generation mark queues
1178   young_generation()->set_old_gen_task_queues(nullptr);
1179   // Transition to IDLE now.
1180   _old_generation->transition_to(ShenandoahOldGeneration::IDLE);
1181 }
1182 
1183 bool ShenandoahHeap::is_old_gc_active() {
1184   return _old_generation->state() != ShenandoahOldGeneration::IDLE;
1185 }
1186 
1187 // xfer_limit is the maximum we're able to transfer from young to old
1188 void ShenandoahHeap::adjust_generation_sizes_for_next_cycle(
1189   size_t xfer_limit, size_t young_cset_regions, size_t old_cset_regions) {
1190 
1191   // Make sure old-generation is large enough, but no larger, than is necessary to hold mixed evacuations
1192   // and promotions if we anticipate either.
1193   size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
1194   size_t promo_load = get_promotion_potential();
1195   // The free set will reserve this amount of memory to hold young evacuations
1196   size_t young_reserve = (young_generation()->max_capacity() * ShenandoahEvacReserve) / 100;
1197   size_t old_reserve = 0;
1198   size_t mixed_candidates = old_heuristics()->unprocessed_old_collection_candidates();
1199   bool doing_mixed = (mixed_candidates > 0);
1200   bool doing_promotions = promo_load > 0;
1201 
1202   // round down
1203   size_t max_old_region_xfer = xfer_limit / region_size_bytes;
1204 
1205   // We can limit the reserve to the size of anticipated promotions
1206   size_t max_old_reserve = young_reserve * ShenandoahOldEvacRatioPercent / (100 - ShenandoahOldEvacRatioPercent);
1207   // Here's the algebra:
1208   //  TotalEvacuation = OldEvacuation + YoungEvacuation
1209   //  OldEvacuation = TotalEvacuation*(ShenandoahOldEvacRatioPercent/100)
1210   //  OldEvacuation = YoungEvacuation * (ShenandoahOldEvacRatioPercent/100)/(1 - ShenandoahOldEvacRatioPercent/100)
1211   //  OldEvacuation = YoungEvacuation * ShenandoahOldEvacRatioPercent/(100 - ShenandoahOldEvacRatioPercent)
1212 
1213   size_t reserve_for_mixed, reserve_for_promo;
1214   if (doing_mixed) {
1215     assert(old_generation()->available() >= old_generation()->free_unaffiliated_regions() * region_size_bytes,
1216            "Unaffiliated available must be less than total available");
1217 
1218     // We want this much memory to be unfragmented in order to reliably evacuate old.  This is conservative because we
1219     // may not evacuate the entirety of unprocessed candidates in a single mixed evacuation.
1220     size_t max_evac_need = (size_t)
1221       (old_heuristics()->unprocessed_old_collection_candidates_live_memory() * ShenandoahOldEvacWaste);
1222     size_t old_fragmented_available =
1223       old_generation()->available() - old_generation()->free_unaffiliated_regions() * region_size_bytes;
1224     reserve_for_mixed = max_evac_need + old_fragmented_available;
1225     if (reserve_for_mixed > max_old_reserve) {
1226       reserve_for_mixed = max_old_reserve;
1227     }
1228   } else {
1229     reserve_for_mixed = 0;
1230   }
1231 
1232   size_t available_for_promotions = max_old_reserve - reserve_for_mixed;
1233   if (doing_promotions) {
1234     // We're only promoting and we have a maximum bound on the amount to be promoted
1235     reserve_for_promo = (size_t) (promo_load * ShenandoahPromoEvacWaste);
1236     if (reserve_for_promo > available_for_promotions) {
1237       reserve_for_promo = available_for_promotions;
1238     }
1239   } else {
1240     reserve_for_promo = 0;
1241   }
1242   old_reserve = reserve_for_mixed + reserve_for_promo;
1243   assert(old_reserve <= max_old_reserve, "cannot reserve more than max for old evacuations");
1244   size_t old_available = old_generation()->available() + old_cset_regions * region_size_bytes;
1245   size_t young_available = young_generation()->available() + young_cset_regions * region_size_bytes;
1246   size_t old_region_deficit = 0;
1247   size_t old_region_surplus = 0;
1248   if (old_available >= old_reserve) {
1249     size_t old_excess = old_available - old_reserve;
1250     size_t excess_regions = old_excess / region_size_bytes;
1251     size_t unaffiliated_old_regions = old_generation()->free_unaffiliated_regions() + old_cset_regions;
1252     size_t unaffiliated_old = unaffiliated_old_regions * region_size_bytes;
1253     if (unaffiliated_old_regions < excess_regions) {
1254       // We'll give only unaffiliated old to young, which is known to be less than the excess.
1255       old_region_surplus = unaffiliated_old_regions;
1256     } else {
1257       // unaffiliated_old_regions > excess_regions, so we only give away the excess.
1258       old_region_surplus = excess_regions;
1259     }
1260   } else {
1261     // We need to request transfer from YOUNG.  Ignore that this will directly impact young_generation()->max_capacity(),
1262     // indirectly impacting young_reserve and old_reserve.  These computations are conservative.
1263     size_t old_need = old_reserve - old_available;
1264     // Round up the number of regions needed from YOUNG
1265     old_region_deficit = (old_need + region_size_bytes - 1) / region_size_bytes;
1266   }
1267   if (old_region_deficit > max_old_region_xfer) {
1268     // If we're running short on young-gen memory, limit the xfer.  Old-gen collection activities will be curtailed
1269     // if the budget is smaller than desired.
1270     old_region_deficit = max_old_region_xfer;
1271   }
1272   set_old_region_surplus(old_region_surplus);
1273   set_old_region_deficit(old_region_deficit);
1274 }
1275 
1276 // Called from stubs in JIT code or interpreter
1277 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t min_size,
1278                                             size_t requested_size,
1279                                             size_t* actual_size) {
1280   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_tlab(min_size, requested_size);
1281   HeapWord* res = allocate_memory(req, false);
1282   if (res != nullptr) {
1283     *actual_size = req.actual_size();
1284   } else {
1285     *actual_size = 0;
1286   }
1287   return res;
1288 }
1289 
1290 HeapWord* ShenandoahHeap::allocate_new_gclab(size_t min_size,
1291                                              size_t word_size,
1292                                              size_t* actual_size) {
1293   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_gclab(min_size, word_size);
1294   HeapWord* res = allocate_memory(req, false);
1295   if (res != nullptr) {
1296     *actual_size = req.actual_size();
1297   } else {
1298     *actual_size = 0;
1299   }
1300   return res;
1301 }
1302 
1303 HeapWord* ShenandoahHeap::allocate_new_plab(size_t min_size,
1304                                             size_t word_size,
1305                                             size_t* actual_size) {
1306   // Align requested sizes to card sized multiples
1307   size_t words_in_card = CardTable::card_size_in_words();
1308   size_t align_mask = ~(words_in_card - 1);
1309   min_size = (min_size + words_in_card - 1) & align_mask;
1310   word_size = (word_size + words_in_card - 1) & align_mask;
1311   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_plab(min_size, word_size);
1312   // Note that allocate_memory() sets a thread-local flag to prohibit further promotions by this thread
1313   // if we are at risk of infringing on the old-gen evacuation budget.
1314   HeapWord* res = allocate_memory(req, false);
1315   if (res != nullptr) {
1316     *actual_size = req.actual_size();
1317   } else {
1318     *actual_size = 0;
1319   }
1320   return res;
1321 }
1322 
1323 // is_promotion is true iff this allocation is known for sure to hold the result of young-gen evacuation
1324 // to old-gen.  plab allocates are not known as such, since they may hold old-gen evacuations.
1325 HeapWord* ShenandoahHeap::allocate_memory(ShenandoahAllocRequest& req, bool is_promotion) {
1326   intptr_t pacer_epoch = 0;
1327   bool in_new_region = false;
1328   HeapWord* result = nullptr;
1329 
1330   if (req.is_mutator_alloc()) {
1331     if (ShenandoahPacing) {
1332       pacer()->pace_for_alloc(req.size());
1333       pacer_epoch = pacer()->epoch();
1334     }
1335 
1336     if (!ShenandoahAllocFailureALot || !should_inject_alloc_failure()) {
1337       result = allocate_memory_under_lock(req, in_new_region, is_promotion);
1338     }
1339 
1340     // Allocation failed, block until control thread reacted, then retry allocation.
1341     //
1342     // It might happen that one of the threads requesting allocation would unblock
1343     // way later after GC happened, only to fail the second allocation, because
1344     // other threads have already depleted the free storage. In this case, a better
1345     // strategy is to try again, as long as GC makes progress (or until at least
1346     // one full GC has completed).
1347     size_t original_count = shenandoah_policy()->full_gc_count();
1348     while (result == nullptr
1349         && (_progress_last_gc.is_set() || original_count == shenandoah_policy()->full_gc_count())) {
1350       control_thread()->handle_alloc_failure(req);
1351       result = allocate_memory_under_lock(req, in_new_region, is_promotion);
1352     }
1353 
1354   } else {
1355     assert(req.is_gc_alloc(), "Can only accept GC allocs here");
1356     result = allocate_memory_under_lock(req, in_new_region, is_promotion);
1357     // Do not call handle_alloc_failure() here, because we cannot block.
1358     // The allocation failure would be handled by the LRB slowpath with handle_alloc_failure_evac().
1359   }
1360 
1361   if (in_new_region) {
1362     control_thread()->notify_heap_changed();
1363     regulator_thread()->notify_heap_changed();
1364   }
1365 
1366   if (result == nullptr) {
1367     req.set_actual_size(0);
1368   }
1369 
1370   // This is called regardless of the outcome of the allocation to account
1371   // for any waste created by retiring regions with this request.
1372   increase_used(req);
1373 
1374   if (result != nullptr) {
1375     size_t requested = req.size();
1376     size_t actual = req.actual_size();
1377 
1378     assert (req.is_lab_alloc() || (requested == actual),
1379             "Only LAB allocations are elastic: %s, requested = " SIZE_FORMAT ", actual = " SIZE_FORMAT,
1380             ShenandoahAllocRequest::alloc_type_to_string(req.type()), requested, actual);
1381 
1382     if (req.is_mutator_alloc()) {
1383       // If we requested more than we were granted, give the rest back to pacer.
1384       // This only matters if we are in the same pacing epoch: do not try to unpace
1385       // over the budget for the other phase.
1386       if (ShenandoahPacing && (pacer_epoch > 0) && (requested > actual)) {
1387         pacer()->unpace_for_alloc(pacer_epoch, requested - actual);
1388       }
1389     }
1390   }
1391 
1392   return result;
1393 }
1394 
1395 HeapWord* ShenandoahHeap::allocate_memory_under_lock(ShenandoahAllocRequest& req, bool& in_new_region, bool is_promotion) {
1396   bool try_smaller_lab_size = false;
1397   size_t smaller_lab_size;
1398   {
1399     // promotion_eligible pertains only to PLAB allocations, denoting that the PLAB is allowed to allocate for promotions.
1400     bool promotion_eligible = false;
1401     bool allow_allocation = true;
1402     bool plab_alloc = false;
1403     size_t requested_bytes = req.size() * HeapWordSize;
1404     HeapWord* result = nullptr;
1405     ShenandoahHeapLocker locker(lock());
1406     Thread* thread = Thread::current();
1407 
1408     if (mode()->is_generational()) {
1409       if (req.affiliation() == YOUNG_GENERATION) {
1410         if (req.is_mutator_alloc()) {
1411           size_t young_words_available = young_generation()->available() / HeapWordSize;
1412           if (ShenandoahElasticTLAB && req.is_lab_alloc() && (req.min_size() < young_words_available)) {
1413             // Allow ourselves to try a smaller lab size even if requested_bytes <= young_available.  We may need a smaller
1414             // lab size because young memory has become too fragmented.
1415             try_smaller_lab_size = true;
1416             smaller_lab_size = (young_words_available < req.size())? young_words_available: req.size();
1417           } else if (req.size() > young_words_available) {
1418             // Can't allocate because even min_size() is larger than remaining young_available
1419             log_info(gc, ergo)("Unable to shrink %s alloc request of minimum size: " SIZE_FORMAT
1420                                ", young words available: " SIZE_FORMAT, req.type_string(),
1421                                HeapWordSize * (req.is_lab_alloc()? req.min_size(): req.size()), young_words_available);
1422             return nullptr;
1423           }
1424         }
1425       } else {                    // reg.affiliation() == OLD_GENERATION
1426         assert(req.type() != ShenandoahAllocRequest::_alloc_gclab, "GCLAB pertains only to young-gen memory");
1427         if (req.type() ==  ShenandoahAllocRequest::_alloc_plab) {
1428           plab_alloc = true;
1429           size_t promotion_avail = get_promoted_reserve();
1430           size_t promotion_expended = get_promoted_expended();
1431           if (promotion_expended + requested_bytes > promotion_avail) {
1432             promotion_avail = 0;
1433             if (get_old_evac_reserve() == 0) {
1434               // There are no old-gen evacuations in this pass.  There's no value in creating a plab that cannot
1435               // be used for promotions.
1436               allow_allocation = false;
1437             }
1438           } else {
1439             promotion_avail = promotion_avail - (promotion_expended + requested_bytes);
1440             promotion_eligible = true;
1441           }
1442         } else if (is_promotion) {
1443           // This is a shared alloc for promotion
1444           size_t promotion_avail = get_promoted_reserve();
1445           size_t promotion_expended = get_promoted_expended();
1446           if (promotion_expended + requested_bytes > promotion_avail) {
1447             promotion_avail = 0;
1448           } else {
1449             promotion_avail = promotion_avail - (promotion_expended + requested_bytes);
1450           }
1451           if (promotion_avail == 0) {
1452             // We need to reserve the remaining memory for evacuation.  Reject this allocation.  The object will be
1453             // evacuated to young-gen memory and promoted during a future GC pass.
1454             return nullptr;
1455           }
1456           // Else, we'll allow the allocation to proceed.  (Since we hold heap lock, the tested condition remains true.)
1457         } else {
1458           // This is a shared allocation for evacuation.  Memory has already been reserved for this purpose.
1459         }
1460       }
1461     } // This ends the is_generational() block
1462 
1463     // First try the original request.  If TLAB request size is greater than available, allocate() will attempt to downsize
1464     // request to fit within available memory.
1465     result = (allow_allocation)? _free_set->allocate(req, in_new_region): nullptr;
1466     if (result != nullptr) {
1467       if (req.is_old()) {
1468         ShenandoahThreadLocalData::reset_plab_promoted(thread);
1469         if (req.is_gc_alloc()) {
1470           bool disable_plab_promotions = false;
1471           if (req.type() ==  ShenandoahAllocRequest::_alloc_plab) {
1472             if (promotion_eligible) {
1473               size_t actual_size = req.actual_size() * HeapWordSize;
1474               // The actual size of the allocation may be larger than the requested bytes (due to alignment on card boundaries).
1475               // If this puts us over our promotion budget, we need to disable future PLAB promotions for this thread.
1476               if (get_promoted_expended() + actual_size <= get_promoted_reserve()) {
1477                 // Assume the entirety of this PLAB will be used for promotion.  This prevents promotion from overreach.
1478                 // When we retire this plab, we'll unexpend what we don't really use.
1479                 ShenandoahThreadLocalData::enable_plab_promotions(thread);
1480                 expend_promoted(actual_size);
1481                 assert(get_promoted_expended() <= get_promoted_reserve(), "Do not expend more promotion than budgeted");
1482                 ShenandoahThreadLocalData::set_plab_preallocated_promoted(thread, actual_size);
1483               } else {
1484                 disable_plab_promotions = true;
1485               }
1486             } else {
1487               disable_plab_promotions = true;
1488             }
1489             if (disable_plab_promotions) {
1490               // Disable promotions in this thread because entirety of this PLAB must be available to hold old-gen evacuations.
1491               ShenandoahThreadLocalData::disable_plab_promotions(thread);
1492               ShenandoahThreadLocalData::set_plab_preallocated_promoted(thread, 0);
1493             }
1494           } else if (is_promotion) {
1495             // Shared promotion.  Assume size is requested_bytes.
1496             expend_promoted(requested_bytes);
1497             assert(get_promoted_expended() <= get_promoted_reserve(), "Do not expend more promotion than budgeted");
1498           }
1499         }
1500 
1501         // Register the newly allocated object while we're holding the global lock since there's no synchronization
1502         // built in to the implementation of register_object().  There are potential races when multiple independent
1503         // threads are allocating objects, some of which might span the same card region.  For example, consider
1504         // a card table's memory region within which three objects are being allocated by three different threads:
1505         //
1506         // objects being "concurrently" allocated:
1507         //    [-----a------][-----b-----][--------------c------------------]
1508         //            [---- card table memory range --------------]
1509         //
1510         // Before any objects are allocated, this card's memory range holds no objects.  Note that allocation of object a
1511         //   wants to set the starts-object, first-start, and last-start attributes of the preceding card region.
1512         //   allocation of object b wants to set the starts-object, first-start, and last-start attributes of this card region.
1513         //   allocation of object c also wants to set the starts-object, first-start, and last-start attributes of this
1514         //   card region.
1515         //
1516         // The thread allocating b and the thread allocating c can "race" in various ways, resulting in confusion, such as
1517         // last-start representing object b while first-start represents object c.  This is why we need to require all
1518         // register_object() invocations to be "mutually exclusive" with respect to each card's memory range.
1519         ShenandoahHeap::heap()->card_scan()->register_object(result);
1520       }
1521     } else {
1522       // The allocation failed.  If this was a plab allocation, We've already retired it and no longer have a plab.
1523       if (req.is_old() && req.is_gc_alloc() && (req.type() == ShenandoahAllocRequest::_alloc_plab)) {
1524         // We don't need to disable PLAB promotions because there is no PLAB.  We leave promotions enabled because
1525         // this allows the surrounding infrastructure to retry alloc_plab_slow() with a smaller PLAB size.
1526         ShenandoahThreadLocalData::set_plab_preallocated_promoted(thread, 0);
1527       }
1528     }
1529     if ((result != nullptr) || !try_smaller_lab_size) {
1530       return result;
1531     }
1532     // else, fall through to try_smaller_lab_size
1533   } // This closes the block that holds the heap lock, releasing the lock.
1534 
1535   // We failed to allocate the originally requested lab size.  Let's see if we can allocate a smaller lab size.
1536   if (req.size() == smaller_lab_size) {
1537     // If we were already trying to allocate min size, no value in attempting to repeat the same.  End the recursion.
1538     return nullptr;
1539   }
1540 
1541   // We arrive here if the tlab allocation request can be resized to fit within young_available
1542   assert((req.affiliation() == YOUNG_GENERATION) && req.is_lab_alloc() && req.is_mutator_alloc() &&
1543          (smaller_lab_size < req.size()), "Only shrink allocation request size for TLAB allocations");
1544 
1545   // By convention, ShenandoahAllocationRequest is primarily read-only.  The only mutable instance data is represented by
1546   // actual_size(), which is overwritten with the size of the allocaion when the allocation request is satisfied.  We use a
1547   // recursive call here rather than introducing new methods to mutate the existing ShenandoahAllocationRequest argument.
1548   // Mutation of the existing object might result in astonishing results if calling contexts assume the content of immutable
1549   // fields remain constant.  The original TLAB allocation request was for memory that exceeded the current capacity.  We'll
1550   // attempt to allocate a smaller TLAB.  If this is successful, we'll update actual_size() of our incoming
1551   // ShenandoahAllocRequest.  If the recursive request fails, we'll simply return nullptr.
1552 
1553   // Note that we've relinquished the HeapLock and some other thread may perform additional allocation before our recursive
1554   // call reacquires the lock.  If that happens, we will need another recursive call to further reduce the size of our request
1555   // for each time another thread allocates young memory during the brief intervals that the heap lock is available to
1556   // interfering threads.  We expect this interference to be rare.  The recursion bottoms out when young_available is
1557   // smaller than req.min_size().  The inner-nested call to allocate_memory_under_lock() uses the same min_size() value
1558   // as this call, but it uses a preferred size() that is smaller than our preferred size, and is no larger than what we most
1559   // recently saw as the memory currently available within the young generation.
1560 
1561   // TODO: At the expense of code clarity, we could rewrite this recursive solution to use iteration.  We need at most one
1562   // extra instance of the ShenandoahAllocRequest, which we can re-initialize multiple times inside a loop, with one iteration
1563   // of the loop required for each time the existing solution would recurse.  An iterative solution would be more efficient
1564   // in CPU time and stack memory utilization.  The expectation is that it is very rare that we would recurse more than once
1565   // so making this change is not currently seen as a high priority.
1566 
1567   ShenandoahAllocRequest smaller_req = ShenandoahAllocRequest::for_tlab(req.min_size(), smaller_lab_size);
1568 
1569   // Note that shrinking the preferred size gets us past the gatekeeper that checks whether there's available memory to
1570   // satisfy the allocation request.  The reality is the actual TLAB size is likely to be even smaller, because it will
1571   // depend on how much memory is available within mutator regions that are not yet fully used.
1572   HeapWord* result = allocate_memory_under_lock(smaller_req, in_new_region, is_promotion);
1573   if (result != nullptr) {
1574     req.set_actual_size(smaller_req.actual_size());
1575   }
1576   return result;
1577 }
1578 
1579 HeapWord* ShenandoahHeap::mem_allocate(size_t size,
1580                                         bool*  gc_overhead_limit_was_exceeded) {
1581   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared(size);
1582   return allocate_memory(req, false);
1583 }
1584 
1585 MetaWord* ShenandoahHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
1586                                                              size_t size,
1587                                                              Metaspace::MetadataType mdtype) {
1588   MetaWord* result;
1589 
1590   // Inform metaspace OOM to GC heuristics if class unloading is possible.
1591   ShenandoahHeuristics* h = global_generation()->heuristics();
1592   if (h->can_unload_classes()) {
1593     h->record_metaspace_oom();
1594   }
1595 
1596   // Expand and retry allocation
1597   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
1598   if (result != nullptr) {
1599     return result;
1600   }
1601 
1602   // Start full GC
1603   collect(GCCause::_metadata_GC_clear_soft_refs);
1604 
1605   // Retry allocation
1606   result = loader_data->metaspace_non_null()->allocate(size, mdtype);
1607   if (result != nullptr) {
1608     return result;
1609   }
1610 
1611   // Expand and retry allocation
1612   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
1613   if (result != nullptr) {
1614     return result;
1615   }
1616 
1617   // Out of memory
1618   return nullptr;
1619 }
1620 
1621 class ShenandoahConcurrentEvacuateRegionObjectClosure : public ObjectClosure {
1622 private:
1623   ShenandoahHeap* const _heap;
1624   Thread* const _thread;
1625 public:
1626   ShenandoahConcurrentEvacuateRegionObjectClosure(ShenandoahHeap* heap) :
1627     _heap(heap), _thread(Thread::current()) {}
1628 
1629   void do_object(oop p) {
1630     shenandoah_assert_marked(nullptr, p);
1631     if (!p->is_forwarded()) {
1632       _heap->evacuate_object(p, _thread);
1633     }
1634   }
1635 };
1636 
1637 class ShenandoahEvacuationTask : public WorkerTask {
1638 private:
1639   ShenandoahHeap* const _sh;
1640   ShenandoahCollectionSet* const _cs;
1641   bool _concurrent;
1642 public:
1643   ShenandoahEvacuationTask(ShenandoahHeap* sh,
1644                            ShenandoahCollectionSet* cs,
1645                            bool concurrent) :
1646     WorkerTask("Shenandoah Evacuation"),
1647     _sh(sh),
1648     _cs(cs),
1649     _concurrent(concurrent)
1650   {}
1651 
1652   void work(uint worker_id) {
1653     if (_concurrent) {
1654       ShenandoahConcurrentWorkerSession worker_session(worker_id);
1655       ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
1656       ShenandoahEvacOOMScope oom_evac_scope;
1657       do_work();
1658     } else {
1659       ShenandoahParallelWorkerSession worker_session(worker_id);
1660       ShenandoahEvacOOMScope oom_evac_scope;
1661       do_work();
1662     }
1663   }
1664 
1665 private:
1666   void do_work() {
1667     ShenandoahConcurrentEvacuateRegionObjectClosure cl(_sh);
1668     ShenandoahHeapRegion* r;
1669     while ((r =_cs->claim_next()) != nullptr) {
1670       assert(r->has_live(), "Region " SIZE_FORMAT " should have been reclaimed early", r->index());
1671 
1672       _sh->marked_object_iterate(r, &cl);
1673 
1674       if (ShenandoahPacing) {
1675         _sh->pacer()->report_evac(r->used() >> LogHeapWordSize);
1676       }
1677       if (_sh->check_cancelled_gc_and_yield(_concurrent)) {
1678         break;
1679       }
1680     }
1681   }
1682 };
1683 
1684 // Unlike ShenandoahEvacuationTask, this iterates over all regions rather than just the collection set.
1685 // This is needed in order to promote humongous start regions if age() >= tenure threshold.
1686 class ShenandoahGenerationalEvacuationTask : public WorkerTask {
1687 private:
1688   ShenandoahHeap* const _sh;
1689   ShenandoahRegionIterator *_regions;
1690   bool _concurrent;
1691   uint _tenuring_threshold;
1692 
1693 public:
1694   ShenandoahGenerationalEvacuationTask(ShenandoahHeap* sh,
1695                                        ShenandoahRegionIterator* iterator,
1696                                        bool concurrent) :
1697     WorkerTask("Shenandoah Evacuation"),
1698     _sh(sh),
1699     _regions(iterator),
1700     _concurrent(concurrent),
1701     _tenuring_threshold(0)
1702   {
1703     if (_sh->mode()->is_generational()) {
1704       _tenuring_threshold = _sh->age_census()->tenuring_threshold();
1705     }
1706   }
1707 
1708   void work(uint worker_id) {
1709     if (_concurrent) {
1710       ShenandoahConcurrentWorkerSession worker_session(worker_id);
1711       ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
1712       ShenandoahEvacOOMScope oom_evac_scope;
1713       do_work();
1714     } else {
1715       ShenandoahParallelWorkerSession worker_session(worker_id);
1716       ShenandoahEvacOOMScope oom_evac_scope;
1717       do_work();
1718     }
1719   }
1720 
1721 private:
1722   void do_work() {
1723     ShenandoahConcurrentEvacuateRegionObjectClosure cl(_sh);
1724     ShenandoahHeapRegion* r;
1725     ShenandoahMarkingContext* const ctx = ShenandoahHeap::heap()->marking_context();
1726     size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
1727     size_t old_garbage_threshold = (region_size_bytes * ShenandoahOldGarbageThreshold) / 100;
1728     while ((r = _regions->next()) != nullptr) {
1729       log_debug(gc)("GenerationalEvacuationTask do_work(), looking at %s region " SIZE_FORMAT ", (age: %d) [%s, %s, %s]",
1730                     r->is_old()? "old": r->is_young()? "young": "free", r->index(), r->age(),
1731                     r->is_active()? "active": "inactive",
1732                     r->is_humongous()? (r->is_humongous_start()? "humongous_start": "humongous_continuation"): "regular",
1733                     r->is_cset()? "cset": "not-cset");
1734 
1735       if (r->is_cset()) {
1736         assert(r->has_live(), "Region " SIZE_FORMAT " should have been reclaimed early", r->index());
1737         _sh->marked_object_iterate(r, &cl);
1738         if (ShenandoahPacing) {
1739           _sh->pacer()->report_evac(r->used() >> LogHeapWordSize);
1740         }
1741       } else if (r->is_young() && r->is_active() && (r->age() >= _tenuring_threshold)) {
1742         HeapWord* tams = ctx->top_at_mark_start(r);
1743         if (r->is_humongous_start()) {
1744           // We promote humongous_start regions along with their affiliated continuations during evacuation rather than
1745           // doing this work during a safepoint.  We cannot put humongous regions into the collection set because that
1746           // triggers the load-reference barrier (LRB) to copy on reference fetch.
1747           r->promote_humongous();
1748         } else if (r->is_regular() && (r->get_top_before_promote() != nullptr)) {
1749           assert(r->garbage_before_padded_for_promote() < old_garbage_threshold,
1750                  "Region " SIZE_FORMAT " has too much garbage for promotion", r->index());
1751           assert(r->get_top_before_promote() == tams,
1752                  "Region " SIZE_FORMAT " has been used for allocations before promotion", r->index());
1753           // Likewise, we cannot put promote-in-place regions into the collection set because that would also trigger
1754           // the LRB to copy on reference fetch.
1755           r->promote_in_place();
1756         }
1757         // Aged humongous continuation regions are handled with their start region.  If an aged regular region has
1758         // more garbage than ShenandoahOldGarbageTrheshold, we'll promote by evacuation.  If there is room for evacuation
1759         // in this cycle, the region will be in the collection set.  If there is not room, the region will be promoted
1760         // by evacuation in some future GC cycle.
1761 
1762         // If an aged regular region has received allocations during the current cycle, we do not promote because the
1763         // newly allocated objects do not have appropriate age; this region's age will be reset to zero at end of cycle.
1764       }
1765       // else, region is free, or OLD, or not in collection set, or humongous_continuation,
1766       // or is young humongous_start that is too young to be promoted
1767 
1768       if (_sh->check_cancelled_gc_and_yield(_concurrent)) {
1769         break;
1770       }
1771     }
1772   }
1773 };
1774 
1775 void ShenandoahHeap::evacuate_collection_set(bool concurrent) {
1776   if (ShenandoahHeap::heap()->mode()->is_generational()) {
1777     ShenandoahRegionIterator regions;
1778     ShenandoahGenerationalEvacuationTask task(this, &regions, concurrent);
1779     workers()->run_task(&task);
1780   } else {
1781     ShenandoahEvacuationTask task(this, _collection_set, concurrent);
1782     workers()->run_task(&task);
1783   }
1784 }
1785 
1786 void ShenandoahHeap::trash_cset_regions() {
1787   ShenandoahHeapLocker locker(lock());
1788 
1789   ShenandoahCollectionSet* set = collection_set();
1790   ShenandoahHeapRegion* r;
1791   set->clear_current_index();
1792   while ((r = set->next()) != nullptr) {
1793     r->make_trash();
1794   }
1795   collection_set()->clear();
1796 }
1797 
1798 void ShenandoahHeap::print_heap_regions_on(outputStream* st) const {
1799   st->print_cr("Heap Regions:");
1800   st->print_cr("Region state: EU=empty-uncommitted, EC=empty-committed, R=regular, H=humongous start, HP=pinned humongous start");
1801   st->print_cr("              HC=humongous continuation, CS=collection set, TR=trash, P=pinned, CSP=pinned collection set");
1802   st->print_cr("BTE=bottom/top/end, TAMS=top-at-mark-start");
1803   st->print_cr("UWM=update watermark, U=used");
1804   st->print_cr("T=TLAB allocs, G=GCLAB allocs");
1805   st->print_cr("S=shared allocs, L=live data");
1806   st->print_cr("CP=critical pins");
1807 
1808   for (size_t i = 0; i < num_regions(); i++) {
1809     get_region(i)->print_on(st);
1810   }
1811 }
1812 
1813 size_t ShenandoahHeap::trash_humongous_region_at(ShenandoahHeapRegion* start) {
1814   assert(start->is_humongous_start(), "reclaim regions starting with the first one");
1815 
1816   oop humongous_obj = cast_to_oop(start->bottom());
1817   size_t size = humongous_obj->size();
1818   size_t required_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize);
1819   size_t index = start->index() + required_regions - 1;
1820 
1821   assert(!start->has_live(), "liveness must be zero");
1822 
1823   for(size_t i = 0; i < required_regions; i++) {
1824     // Reclaim from tail. Otherwise, assertion fails when printing region to trace log,
1825     // as it expects that every region belongs to a humongous region starting with a humongous start region.
1826     ShenandoahHeapRegion* region = get_region(index --);
1827 
1828     assert(region->is_humongous(), "expect correct humongous start or continuation");
1829     assert(!region->is_cset(), "Humongous region should not be in collection set");
1830 
1831     region->make_trash_immediate();
1832   }
1833   return required_regions;
1834 }
1835 
1836 class ShenandoahCheckCleanGCLABClosure : public ThreadClosure {
1837 public:
1838   ShenandoahCheckCleanGCLABClosure() {}
1839   void do_thread(Thread* thread) {
1840     PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1841     assert(gclab != nullptr, "GCLAB should be initialized for %s", thread->name());
1842     assert(gclab->words_remaining() == 0, "GCLAB should not need retirement");
1843 
1844     PLAB* plab = ShenandoahThreadLocalData::plab(thread);
1845     assert(plab != nullptr, "PLAB should be initialized for %s", thread->name());
1846     assert(plab->words_remaining() == 0, "PLAB should not need retirement");
1847   }
1848 };
1849 
1850 class ShenandoahRetireGCLABClosure : public ThreadClosure {
1851 private:
1852   bool const _resize;
1853 public:
1854   ShenandoahRetireGCLABClosure(bool resize) : _resize(resize) {}
1855   void do_thread(Thread* thread) {
1856     PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1857     assert(gclab != nullptr, "GCLAB should be initialized for %s", thread->name());
1858     gclab->retire();
1859     if (_resize && ShenandoahThreadLocalData::gclab_size(thread) > 0) {
1860       ShenandoahThreadLocalData::set_gclab_size(thread, 0);
1861     }
1862 
1863     PLAB* plab = ShenandoahThreadLocalData::plab(thread);
1864     assert(plab != nullptr, "PLAB should be initialized for %s", thread->name());
1865 
1866     // There are two reasons to retire all plabs between old-gen evacuation passes.
1867     //  1. We need to make the plab memory parseable by remembered-set scanning.
1868     //  2. We need to establish a trustworthy UpdateWaterMark value within each old-gen heap region
1869     ShenandoahHeap::heap()->retire_plab(plab, thread);
1870     if (_resize && ShenandoahThreadLocalData::plab_size(thread) > 0) {
1871       ShenandoahThreadLocalData::set_plab_size(thread, 0);
1872     }
1873   }
1874 };
1875 
1876 void ShenandoahHeap::labs_make_parsable() {
1877   assert(UseTLAB, "Only call with UseTLAB");
1878 
1879   ShenandoahRetireGCLABClosure cl(false);
1880 
1881   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1882     ThreadLocalAllocBuffer& tlab = t->tlab();
1883     tlab.make_parsable();
1884     cl.do_thread(t);
1885   }
1886 
1887   workers()->threads_do(&cl);
1888 }
1889 
1890 void ShenandoahHeap::tlabs_retire(bool resize) {
1891   assert(UseTLAB, "Only call with UseTLAB");
1892   assert(!resize || ResizeTLAB, "Only call for resize when ResizeTLAB is enabled");
1893 
1894   ThreadLocalAllocStats stats;
1895 
1896   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1897     ThreadLocalAllocBuffer& tlab = t->tlab();
1898     tlab.retire(&stats);
1899     if (resize) {
1900       tlab.resize();
1901     }
1902   }
1903 
1904   stats.publish();
1905 
1906 #ifdef ASSERT
1907   ShenandoahCheckCleanGCLABClosure cl;
1908   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1909     cl.do_thread(t);
1910   }
1911   workers()->threads_do(&cl);
1912 #endif
1913 }
1914 
1915 void ShenandoahHeap::gclabs_retire(bool resize) {
1916   assert(UseTLAB, "Only call with UseTLAB");
1917   assert(!resize || ResizeTLAB, "Only call for resize when ResizeTLAB is enabled");
1918 
1919   ShenandoahRetireGCLABClosure cl(resize);
1920   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1921     cl.do_thread(t);
1922   }
1923   workers()->threads_do(&cl);
1924 
1925   if (safepoint_workers() != nullptr) {
1926     safepoint_workers()->threads_do(&cl);
1927   }
1928 }
1929 
1930 class ShenandoahTagGCLABClosure : public ThreadClosure {
1931 public:
1932   void do_thread(Thread* thread) {
1933     PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1934     assert(gclab != nullptr, "GCLAB should be initialized for %s", thread->name());
1935     if (gclab->words_remaining() > 0) {
1936       ShenandoahHeapRegion* r = ShenandoahHeap::heap()->heap_region_containing(gclab->allocate(0));
1937       r->set_young_lab_flag();
1938     }
1939   }
1940 };
1941 
1942 void ShenandoahHeap::set_young_lab_region_flags() {
1943   if (!UseTLAB) {
1944     return;
1945   }
1946   for (size_t i = 0; i < _num_regions; i++) {
1947     _regions[i]->clear_young_lab_flags();
1948   }
1949   ShenandoahTagGCLABClosure cl;
1950   workers()->threads_do(&cl);
1951   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1952     cl.do_thread(t);
1953     ThreadLocalAllocBuffer& tlab = t->tlab();
1954     if (tlab.end() != nullptr) {
1955       ShenandoahHeapRegion* r = heap_region_containing(tlab.start());
1956       r->set_young_lab_flag();
1957     }
1958   }
1959 }
1960 
1961 // Returns size in bytes
1962 size_t ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const {
1963   if (ShenandoahElasticTLAB) {
1964     if (mode()->is_generational()) {
1965       return MIN2(ShenandoahHeapRegion::max_tlab_size_bytes(), young_generation()->available());
1966     } else {
1967       // With Elastic TLABs, return the max allowed size, and let the allocation path
1968       // figure out the safe size for current allocation.
1969       return ShenandoahHeapRegion::max_tlab_size_bytes();
1970     }
1971   } else {
1972     return MIN2(_free_set->unsafe_peek_free(), ShenandoahHeapRegion::max_tlab_size_bytes());
1973   }
1974 }
1975 
1976 size_t ShenandoahHeap::max_tlab_size() const {
1977   // Returns size in words
1978   return ShenandoahHeapRegion::max_tlab_size_words();
1979 }
1980 
1981 void ShenandoahHeap::collect(GCCause::Cause cause) {
1982   control_thread()->request_gc(cause);
1983 }
1984 
1985 void ShenandoahHeap::do_full_collection(bool clear_all_soft_refs) {
1986   //assert(false, "Shouldn't need to do full collections");
1987 }
1988 
1989 HeapWord* ShenandoahHeap::block_start(const void* addr) const {
1990   ShenandoahHeapRegion* r = heap_region_containing(addr);
1991   if (r != nullptr) {
1992     return r->block_start(addr);
1993   }
1994   return nullptr;
1995 }
1996 
1997 bool ShenandoahHeap::block_is_obj(const HeapWord* addr) const {
1998   ShenandoahHeapRegion* r = heap_region_containing(addr);
1999   return r->block_is_obj(addr);
2000 }
2001 
2002 bool ShenandoahHeap::print_location(outputStream* st, void* addr) const {
2003   return BlockLocationPrinter<ShenandoahHeap>::print_location(st, addr);
2004 }
2005 
2006 void ShenandoahHeap::prepare_for_verify() {
2007   if (SafepointSynchronize::is_at_safepoint() && UseTLAB) {
2008     labs_make_parsable();
2009   }
2010 }
2011 
2012 void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const {
2013   if (_shenandoah_policy->is_at_shutdown()) {
2014     return;
2015   }
2016 
2017   tcl->do_thread(_control_thread);
2018   tcl->do_thread(_regulator_thread);
2019   workers()->threads_do(tcl);
2020   if (_safepoint_workers != nullptr) {
2021     _safepoint_workers->threads_do(tcl);
2022   }
2023 }
2024 
2025 void ShenandoahHeap::print_tracing_info() const {
2026   LogTarget(Info, gc, stats) lt;
2027   if (lt.is_enabled()) {
2028     ResourceMark rm;
2029     LogStream ls(lt);
2030 
2031     phase_timings()->print_global_on(&ls);
2032 
2033     ls.cr();
2034     ls.cr();
2035 
2036     shenandoah_policy()->print_gc_stats(&ls);
2037 
2038     ls.cr();
2039 
2040     evac_tracker()->print_global_on(&ls);
2041 
2042     ls.cr();
2043     ls.cr();
2044   }
2045 }
2046 
2047 void ShenandoahHeap::on_cycle_start(GCCause::Cause cause, ShenandoahGeneration* generation) {
2048   set_gc_cause(cause);
2049   set_gc_generation(generation);
2050 
2051   shenandoah_policy()->record_cycle_start();
2052   generation->heuristics()->record_cycle_start();
2053 }
2054 
2055 void ShenandoahHeap::on_cycle_end(ShenandoahGeneration* generation) {
2056   generation->heuristics()->record_cycle_end();
2057   if (mode()->is_generational() && (generation->is_global() || upgraded_to_full())) {
2058     // If we just completed a GLOBAL GC, claim credit for completion of young-gen and old-gen GC as well
2059     young_generation()->heuristics()->record_cycle_end();
2060     old_generation()->heuristics()->record_cycle_end();
2061   }
2062   set_gc_cause(GCCause::_no_gc);
2063 }
2064 
2065 void ShenandoahHeap::verify(VerifyOption vo) {
2066   if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
2067     if (ShenandoahVerify) {
2068       verifier()->verify_generic(vo);
2069     } else {
2070       // TODO: Consider allocating verification bitmaps on demand,
2071       // and turn this on unconditionally.
2072     }
2073   }
2074 }
2075 size_t ShenandoahHeap::tlab_capacity(Thread *thr) const {
2076   return _free_set->capacity();
2077 }
2078 
2079 class ObjectIterateScanRootClosure : public BasicOopIterateClosure {
2080 private:
2081   MarkBitMap* _bitmap;
2082   ShenandoahScanObjectStack* _oop_stack;
2083   ShenandoahHeap* const _heap;
2084   ShenandoahMarkingContext* const _marking_context;
2085 
2086   template <class T>
2087   void do_oop_work(T* p) {
2088     T o = RawAccess<>::oop_load(p);
2089     if (!CompressedOops::is_null(o)) {
2090       oop obj = CompressedOops::decode_not_null(o);
2091       if (_heap->is_concurrent_weak_root_in_progress() && !_marking_context->is_marked(obj)) {
2092         // There may be dead oops in weak roots in concurrent root phase, do not touch them.
2093         return;
2094       }
2095       obj = ShenandoahBarrierSet::barrier_set()->load_reference_barrier(obj);
2096 
2097       assert(oopDesc::is_oop(obj), "must be a valid oop");
2098       if (!_bitmap->is_marked(obj)) {
2099         _bitmap->mark(obj);
2100         _oop_stack->push(obj);
2101       }
2102     }
2103   }
2104 public:
2105   ObjectIterateScanRootClosure(MarkBitMap* bitmap, ShenandoahScanObjectStack* oop_stack) :
2106     _bitmap(bitmap), _oop_stack(oop_stack), _heap(ShenandoahHeap::heap()),
2107     _marking_context(_heap->marking_context()) {}
2108   void do_oop(oop* p)       { do_oop_work(p); }
2109   void do_oop(narrowOop* p) { do_oop_work(p); }
2110 };
2111 
2112 /*
2113  * This is public API, used in preparation of object_iterate().
2114  * Since we don't do linear scan of heap in object_iterate() (see comment below), we don't
2115  * need to make the heap parsable. For Shenandoah-internal linear heap scans that we can
2116  * control, we call SH::tlabs_retire, SH::gclabs_retire.
2117  */
2118 void ShenandoahHeap::ensure_parsability(bool retire_tlabs) {
2119   // No-op.
2120 }
2121 
2122 /*
2123  * Iterates objects in the heap. This is public API, used for, e.g., heap dumping.
2124  *
2125  * We cannot safely iterate objects by doing a linear scan at random points in time. Linear
2126  * scanning needs to deal with dead objects, which may have dead Klass* pointers (e.g.
2127  * calling oopDesc::size() would crash) or dangling reference fields (crashes) etc. Linear
2128  * scanning therefore depends on having a valid marking bitmap to support it. However, we only
2129  * have a valid marking bitmap after successful marking. In particular, we *don't* have a valid
2130  * marking bitmap during marking, after aborted marking or during/after cleanup (when we just
2131  * wiped the bitmap in preparation for next marking).
2132  *
2133  * For all those reasons, we implement object iteration as a single marking traversal, reporting
2134  * objects as we mark+traverse through the heap, starting from GC roots. JVMTI IterateThroughHeap
2135  * is allowed to report dead objects, but is not required to do so.
2136  */
2137 void ShenandoahHeap::object_iterate(ObjectClosure* cl) {
2138   // Reset bitmap
2139   if (!prepare_aux_bitmap_for_iteration())
2140     return;
2141 
2142   ShenandoahScanObjectStack oop_stack;
2143   ObjectIterateScanRootClosure oops(&_aux_bit_map, &oop_stack);
2144   // Seed the stack with root scan
2145   scan_roots_for_iteration(&oop_stack, &oops);
2146 
2147   // Work through the oop stack to traverse heap
2148   while (! oop_stack.is_empty()) {
2149     oop obj = oop_stack.pop();
2150     assert(oopDesc::is_oop(obj), "must be a valid oop");
2151     cl->do_object(obj);
2152     obj->oop_iterate(&oops);
2153   }
2154 
2155   assert(oop_stack.is_empty(), "should be empty");
2156   // Reclaim bitmap
2157   reclaim_aux_bitmap_for_iteration();
2158 }
2159 
2160 bool ShenandoahHeap::prepare_aux_bitmap_for_iteration() {
2161   assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
2162 
2163   if (!_aux_bitmap_region_special && !os::commit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size(), false)) {
2164     log_warning(gc)("Could not commit native memory for auxiliary marking bitmap for heap iteration");
2165     return false;
2166   }
2167   // Reset bitmap
2168   _aux_bit_map.clear();
2169   return true;
2170 }
2171 
2172 void ShenandoahHeap::scan_roots_for_iteration(ShenandoahScanObjectStack* oop_stack, ObjectIterateScanRootClosure* oops) {
2173   // Process GC roots according to current GC cycle
2174   // This populates the work stack with initial objects
2175   // It is important to relinquish the associated locks before diving
2176   // into heap dumper
2177   uint n_workers = safepoint_workers() != nullptr ? safepoint_workers()->active_workers() : 1;
2178   ShenandoahHeapIterationRootScanner rp(n_workers);
2179   rp.roots_do(oops);
2180 }
2181 
2182 void ShenandoahHeap::reclaim_aux_bitmap_for_iteration() {
2183   if (!_aux_bitmap_region_special && !os::uncommit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size())) {
2184     log_warning(gc)("Could not uncommit native memory for auxiliary marking bitmap for heap iteration");
2185   }
2186 }
2187 
2188 // Closure for parallelly iterate objects
2189 class ShenandoahObjectIterateParScanClosure : public BasicOopIterateClosure {
2190 private:
2191   MarkBitMap* _bitmap;
2192   ShenandoahObjToScanQueue* _queue;
2193   ShenandoahHeap* const _heap;
2194   ShenandoahMarkingContext* const _marking_context;
2195 
2196   template <class T>
2197   void do_oop_work(T* p) {
2198     T o = RawAccess<>::oop_load(p);
2199     if (!CompressedOops::is_null(o)) {
2200       oop obj = CompressedOops::decode_not_null(o);
2201       if (_heap->is_concurrent_weak_root_in_progress() && !_marking_context->is_marked(obj)) {
2202         // There may be dead oops in weak roots in concurrent root phase, do not touch them.
2203         return;
2204       }
2205       obj = ShenandoahBarrierSet::barrier_set()->load_reference_barrier(obj);
2206 
2207       assert(oopDesc::is_oop(obj), "Must be a valid oop");
2208       if (_bitmap->par_mark(obj)) {
2209         _queue->push(ShenandoahMarkTask(obj));
2210       }
2211     }
2212   }
2213 public:
2214   ShenandoahObjectIterateParScanClosure(MarkBitMap* bitmap, ShenandoahObjToScanQueue* q) :
2215     _bitmap(bitmap), _queue(q), _heap(ShenandoahHeap::heap()),
2216     _marking_context(_heap->marking_context()) {}
2217   void do_oop(oop* p)       { do_oop_work(p); }
2218   void do_oop(narrowOop* p) { do_oop_work(p); }
2219 };
2220 
2221 // Object iterator for parallel heap iteraion.
2222 // The root scanning phase happenes in construction as a preparation of
2223 // parallel marking queues.
2224 // Every worker processes it's own marking queue. work-stealing is used
2225 // to balance workload.
2226 class ShenandoahParallelObjectIterator : public ParallelObjectIteratorImpl {
2227 private:
2228   uint                         _num_workers;
2229   bool                         _init_ready;
2230   MarkBitMap*                  _aux_bit_map;
2231   ShenandoahHeap*              _heap;
2232   ShenandoahScanObjectStack    _roots_stack; // global roots stack
2233   ShenandoahObjToScanQueueSet* _task_queues;
2234 public:
2235   ShenandoahParallelObjectIterator(uint num_workers, MarkBitMap* bitmap) :
2236         _num_workers(num_workers),
2237         _init_ready(false),
2238         _aux_bit_map(bitmap),
2239         _heap(ShenandoahHeap::heap()) {
2240     // Initialize bitmap
2241     _init_ready = _heap->prepare_aux_bitmap_for_iteration();
2242     if (!_init_ready) {
2243       return;
2244     }
2245 
2246     ObjectIterateScanRootClosure oops(_aux_bit_map, &_roots_stack);
2247     _heap->scan_roots_for_iteration(&_roots_stack, &oops);
2248 
2249     _init_ready = prepare_worker_queues();
2250   }
2251 
2252   ~ShenandoahParallelObjectIterator() {
2253     // Reclaim bitmap
2254     _heap->reclaim_aux_bitmap_for_iteration();
2255     // Reclaim queue for workers
2256     if (_task_queues!= nullptr) {
2257       for (uint i = 0; i < _num_workers; ++i) {
2258         ShenandoahObjToScanQueue* q = _task_queues->queue(i);
2259         if (q != nullptr) {
2260           delete q;
2261           _task_queues->register_queue(i, nullptr);
2262         }
2263       }
2264       delete _task_queues;
2265       _task_queues = nullptr;
2266     }
2267   }
2268 
2269   virtual void object_iterate(ObjectClosure* cl, uint worker_id) {
2270     if (_init_ready) {
2271       object_iterate_parallel(cl, worker_id, _task_queues);
2272     }
2273   }
2274 
2275 private:
2276   // Divide global root_stack into worker queues
2277   bool prepare_worker_queues() {
2278     _task_queues = new ShenandoahObjToScanQueueSet((int) _num_workers);
2279     // Initialize queues for every workers
2280     for (uint i = 0; i < _num_workers; ++i) {
2281       ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue();
2282       _task_queues->register_queue(i, task_queue);
2283     }
2284     // Divide roots among the workers. Assume that object referencing distribution
2285     // is related with root kind, use round-robin to make every worker have same chance
2286     // to process every kind of roots
2287     size_t roots_num = _roots_stack.size();
2288     if (roots_num == 0) {
2289       // No work to do
2290       return false;
2291     }
2292 
2293     for (uint j = 0; j < roots_num; j++) {
2294       uint stack_id = j % _num_workers;
2295       oop obj = _roots_stack.pop();
2296       _task_queues->queue(stack_id)->push(ShenandoahMarkTask(obj));
2297     }
2298     return true;
2299   }
2300 
2301   void object_iterate_parallel(ObjectClosure* cl,
2302                                uint worker_id,
2303                                ShenandoahObjToScanQueueSet* queue_set) {
2304     assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
2305     assert(queue_set != nullptr, "task queue must not be null");
2306 
2307     ShenandoahObjToScanQueue* q = queue_set->queue(worker_id);
2308     assert(q != nullptr, "object iterate queue must not be null");
2309 
2310     ShenandoahMarkTask t;
2311     ShenandoahObjectIterateParScanClosure oops(_aux_bit_map, q);
2312 
2313     // Work through the queue to traverse heap.
2314     // Steal when there is no task in queue.
2315     while (q->pop(t) || queue_set->steal(worker_id, t)) {
2316       oop obj = t.obj();
2317       assert(oopDesc::is_oop(obj), "must be a valid oop");
2318       cl->do_object(obj);
2319       obj->oop_iterate(&oops);
2320     }
2321     assert(q->is_empty(), "should be empty");
2322   }
2323 };
2324 
2325 ParallelObjectIteratorImpl* ShenandoahHeap::parallel_object_iterator(uint workers) {
2326   return new ShenandoahParallelObjectIterator(workers, &_aux_bit_map);
2327 }
2328 
2329 // Keep alive an object that was loaded with AS_NO_KEEPALIVE.
2330 void ShenandoahHeap::keep_alive(oop obj) {
2331   if (is_concurrent_mark_in_progress() && (obj != nullptr)) {
2332     ShenandoahBarrierSet::barrier_set()->enqueue(obj);
2333   }
2334 }
2335 
2336 void ShenandoahHeap::heap_region_iterate(ShenandoahHeapRegionClosure* blk) const {
2337   for (size_t i = 0; i < num_regions(); i++) {
2338     ShenandoahHeapRegion* current = get_region(i);
2339     blk->heap_region_do(current);
2340   }
2341 }
2342 
2343 class ShenandoahParallelHeapRegionTask : public WorkerTask {
2344 private:
2345   ShenandoahHeap* const _heap;
2346   ShenandoahHeapRegionClosure* const _blk;
2347 
2348   shenandoah_padding(0);
2349   volatile size_t _index;
2350   shenandoah_padding(1);
2351 
2352 public:
2353   ShenandoahParallelHeapRegionTask(ShenandoahHeapRegionClosure* blk) :
2354           WorkerTask("Shenandoah Parallel Region Operation"),
2355           _heap(ShenandoahHeap::heap()), _blk(blk), _index(0) {}
2356 
2357   void work(uint worker_id) {
2358     ShenandoahParallelWorkerSession worker_session(worker_id);
2359     size_t stride = ShenandoahParallelRegionStride;
2360 
2361     size_t max = _heap->num_regions();
2362     while (Atomic::load(&_index) < max) {
2363       size_t cur = Atomic::fetch_then_add(&_index, stride, memory_order_relaxed);
2364       size_t start = cur;
2365       size_t end = MIN2(cur + stride, max);
2366       if (start >= max) break;
2367 
2368       for (size_t i = cur; i < end; i++) {
2369         ShenandoahHeapRegion* current = _heap->get_region(i);
2370         _blk->heap_region_do(current);
2371       }
2372     }
2373   }
2374 };
2375 
2376 void ShenandoahHeap::parallel_heap_region_iterate(ShenandoahHeapRegionClosure* blk) const {
2377   assert(blk->is_thread_safe(), "Only thread-safe closures here");
2378   if (num_regions() > ShenandoahParallelRegionStride) {
2379     ShenandoahParallelHeapRegionTask task(blk);
2380     workers()->run_task(&task);
2381   } else {
2382     heap_region_iterate(blk);
2383   }
2384 }
2385 
2386 class ShenandoahRendezvousClosure : public HandshakeClosure {
2387 public:
2388   inline ShenandoahRendezvousClosure() : HandshakeClosure("ShenandoahRendezvous") {}
2389   inline void do_thread(Thread* thread) {}
2390 };
2391 
2392 void ShenandoahHeap::rendezvous_threads() {
2393   ShenandoahRendezvousClosure cl;
2394   Handshake::execute(&cl);
2395 }
2396 
2397 void ShenandoahHeap::recycle_trash() {
2398   free_set()->recycle_trash();
2399 }
2400 
2401 void ShenandoahHeap::do_class_unloading() {
2402   _unloader.unload();
2403 }
2404 
2405 void ShenandoahHeap::stw_weak_refs(bool full_gc) {
2406   // Weak refs processing
2407   ShenandoahPhaseTimings::Phase phase = full_gc ? ShenandoahPhaseTimings::full_gc_weakrefs
2408                                                 : ShenandoahPhaseTimings::degen_gc_weakrefs;
2409   ShenandoahTimingsTracker t(phase);
2410   ShenandoahGCWorkerPhase worker_phase(phase);
2411   active_generation()->ref_processor()->process_references(phase, workers(), false /* concurrent */);
2412 }
2413 
2414 void ShenandoahHeap::prepare_update_heap_references(bool concurrent) {
2415   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
2416 
2417   // Evacuation is over, no GCLABs are needed anymore. GCLABs are under URWM, so we need to
2418   // make them parsable for update code to work correctly. Plus, we can compute new sizes
2419   // for future GCLABs here.
2420   if (UseTLAB) {
2421     ShenandoahGCPhase phase(concurrent ?
2422                             ShenandoahPhaseTimings::init_update_refs_manage_gclabs :
2423                             ShenandoahPhaseTimings::degen_gc_init_update_refs_manage_gclabs);
2424     gclabs_retire(ResizeTLAB);
2425   }
2426 
2427   _update_refs_iterator.reset();
2428 }
2429 
2430 void ShenandoahHeap::set_gc_state_all_threads(char state) {
2431   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
2432     ShenandoahThreadLocalData::set_gc_state(t, state);
2433   }
2434 }
2435 
2436 void ShenandoahHeap::set_gc_state_mask(uint mask, bool value) {
2437   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should really be Shenandoah safepoint");
2438   _gc_state.set_cond(mask, value);
2439   set_gc_state_all_threads(_gc_state.raw_value());
2440 }
2441 
2442 void ShenandoahHeap::set_evacuation_reserve_quantities(bool is_valid) {
2443   _has_evacuation_reserve_quantities = is_valid;
2444 }
2445 
2446 void ShenandoahHeap::set_concurrent_young_mark_in_progress(bool in_progress) {
2447   uint mask;
2448   assert(!has_forwarded_objects(), "Young marking is not concurrent with evacuation");
2449   if (!in_progress && is_concurrent_old_mark_in_progress()) {
2450     assert(mode()->is_generational(), "Only generational GC has old marking");
2451     assert(_gc_state.is_set(MARKING), "concurrent_old_marking_in_progress implies MARKING");
2452     // If old-marking is in progress when we turn off YOUNG_MARKING, leave MARKING (and OLD_MARKING) on
2453     mask = YOUNG_MARKING;
2454   } else {
2455     mask = MARKING | YOUNG_MARKING;
2456   }
2457   set_gc_state_mask(mask, in_progress);
2458   manage_satb_barrier(in_progress);
2459 }
2460 
2461 void ShenandoahHeap::set_concurrent_old_mark_in_progress(bool in_progress) {
2462 #ifdef ASSERT
2463   // has_forwarded_objects() iff UPDATEREFS or EVACUATION
2464   bool has_forwarded = has_forwarded_objects()? 1: 0;
2465   bool updating_or_evacuating = _gc_state.is_set(UPDATEREFS | EVACUATION)? 1: 0;
2466   assert (has_forwarded == updating_or_evacuating, "Has forwarded objects iff updating or evacuating");
2467 #endif
2468   if (!in_progress && is_concurrent_young_mark_in_progress()) {
2469     // If young-marking is in progress when we turn off OLD_MARKING, leave MARKING (and YOUNG_MARKING) on
2470     assert(_gc_state.is_set(MARKING), "concurrent_young_marking_in_progress implies MARKING");
2471     set_gc_state_mask(OLD_MARKING, in_progress);
2472   } else {
2473     set_gc_state_mask(MARKING | OLD_MARKING, in_progress);
2474   }
2475   manage_satb_barrier(in_progress);
2476 }
2477 
2478 void ShenandoahHeap::set_prepare_for_old_mark_in_progress(bool in_progress) {
2479   // Unlike other set-gc-state functions, this may happen outside safepoint.
2480   // Is only set and queried by control thread, so no coherence issues.
2481   _prepare_for_old_mark = in_progress;
2482 }
2483 
2484 void ShenandoahHeap::set_aging_cycle(bool in_progress) {
2485   _is_aging_cycle.set_cond(in_progress);
2486 }
2487 
2488 void ShenandoahHeap::manage_satb_barrier(bool active) {
2489   if (is_concurrent_mark_in_progress()) {
2490     // Ignore request to deactivate barrier while concurrent mark is in progress.
2491     // Do not attempt to re-activate the barrier if it is already active.
2492     if (active && !ShenandoahBarrierSet::satb_mark_queue_set().is_active()) {
2493       ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(active, !active);
2494     }
2495   } else {
2496     // No concurrent marking is in progress so honor request to deactivate,
2497     // but only if the barrier is already active.
2498     if (!active && ShenandoahBarrierSet::satb_mark_queue_set().is_active()) {
2499       ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(active, !active);
2500     }
2501   }
2502 }
2503 
2504 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
2505   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only call this at safepoint");
2506   set_gc_state_mask(EVACUATION, in_progress);
2507 }
2508 
2509 void ShenandoahHeap::set_concurrent_strong_root_in_progress(bool in_progress) {
2510   if (in_progress) {
2511     _concurrent_strong_root_in_progress.set();
2512   } else {
2513     _concurrent_strong_root_in_progress.unset();
2514   }
2515 }
2516 
2517 void ShenandoahHeap::set_concurrent_weak_root_in_progress(bool cond) {
2518   set_gc_state_mask(WEAK_ROOTS, cond);
2519 }
2520 
2521 GCTracer* ShenandoahHeap::tracer() {
2522   return shenandoah_policy()->tracer();
2523 }
2524 
2525 size_t ShenandoahHeap::tlab_used(Thread* thread) const {
2526   return _free_set->used();
2527 }
2528 
2529 bool ShenandoahHeap::try_cancel_gc() {
2530   jbyte prev = _cancelled_gc.cmpxchg(CANCELLED, CANCELLABLE);
2531   return prev == CANCELLABLE;
2532 }
2533 
2534 void ShenandoahHeap::cancel_concurrent_mark() {
2535   _young_generation->cancel_marking();
2536   _old_generation->cancel_marking();
2537   _global_generation->cancel_marking();
2538 
2539   ShenandoahBarrierSet::satb_mark_queue_set().abandon_partial_marking();
2540 }
2541 
2542 void ShenandoahHeap::cancel_gc(GCCause::Cause cause) {
2543   if (try_cancel_gc()) {
2544     FormatBuffer<> msg("Cancelling GC: %s", GCCause::to_string(cause));
2545     log_info(gc)("%s", msg.buffer());
2546     Events::log(Thread::current(), "%s", msg.buffer());
2547     _cancel_requested_time = os::elapsedTime();
2548     if (cause == GCCause::_shenandoah_upgrade_to_full_gc) {
2549       _upgraded_to_full = true;
2550     }
2551   }
2552 }
2553 
2554 uint ShenandoahHeap::max_workers() {
2555   return _max_workers;
2556 }
2557 
2558 void ShenandoahHeap::stop() {
2559   // The shutdown sequence should be able to terminate when GC is running.
2560 
2561   // Step 1. Notify policy to disable event recording and prevent visiting gc threads during shutdown
2562   _shenandoah_policy->record_shutdown();
2563 
2564   // Step 2. Stop requesting collections.
2565   regulator_thread()->stop();
2566 
2567   // Step 3. Notify control thread that we are in shutdown.
2568   // Note that we cannot do that with stop(), because stop() is blocking and waits for the actual shutdown.
2569   // Doing stop() here would wait for the normal GC cycle to complete, never falling through to cancel below.
2570   control_thread()->prepare_for_graceful_shutdown();
2571 
2572   // Step 4. Notify GC workers that we are cancelling GC.
2573   cancel_gc(GCCause::_shenandoah_stop_vm);
2574 
2575   // Step 5. Wait until GC worker exits normally.
2576   control_thread()->stop();
2577 }
2578 
2579 void ShenandoahHeap::stw_unload_classes(bool full_gc) {
2580   if (!unload_classes()) return;
2581   // Unload classes and purge SystemDictionary.
2582   {
2583     ShenandoahPhaseTimings::Phase phase = full_gc ?
2584                                           ShenandoahPhaseTimings::full_gc_purge_class_unload :
2585                                           ShenandoahPhaseTimings::degen_gc_purge_class_unload;
2586     ShenandoahIsAliveSelector is_alive;
2587     CodeCache::UnloadingScope scope(is_alive.is_alive_closure());
2588     ShenandoahGCPhase gc_phase(phase);
2589     ShenandoahGCWorkerPhase worker_phase(phase);
2590     bool purged_class = SystemDictionary::do_unloading(gc_timer());
2591 
2592     uint num_workers = _workers->active_workers();
2593     ShenandoahClassUnloadingTask unlink_task(phase, num_workers, purged_class);
2594     _workers->run_task(&unlink_task);
2595   }
2596 
2597   {
2598     ShenandoahGCPhase phase(full_gc ?
2599                             ShenandoahPhaseTimings::full_gc_purge_cldg :
2600                             ShenandoahPhaseTimings::degen_gc_purge_cldg);
2601     ClassLoaderDataGraph::purge(/*at_safepoint*/true);
2602   }
2603   // Resize and verify metaspace
2604   MetaspaceGC::compute_new_size();
2605   DEBUG_ONLY(MetaspaceUtils::verify();)
2606 }
2607 
2608 // Weak roots are either pre-evacuated (final mark) or updated (final updaterefs),
2609 // so they should not have forwarded oops.
2610 // However, we do need to "null" dead oops in the roots, if can not be done
2611 // in concurrent cycles.
2612 void ShenandoahHeap::stw_process_weak_roots(bool full_gc) {
2613   uint num_workers = _workers->active_workers();
2614   ShenandoahPhaseTimings::Phase timing_phase = full_gc ?
2615                                                ShenandoahPhaseTimings::full_gc_purge_weak_par :
2616                                                ShenandoahPhaseTimings::degen_gc_purge_weak_par;
2617   ShenandoahGCPhase phase(timing_phase);
2618   ShenandoahGCWorkerPhase worker_phase(timing_phase);
2619   // Cleanup weak roots
2620   if (has_forwarded_objects()) {
2621     ShenandoahForwardedIsAliveClosure is_alive;
2622     ShenandoahUpdateRefsClosure keep_alive;
2623     ShenandoahParallelWeakRootsCleaningTask<ShenandoahForwardedIsAliveClosure, ShenandoahUpdateRefsClosure>
2624       cleaning_task(timing_phase, &is_alive, &keep_alive, num_workers);
2625     _workers->run_task(&cleaning_task);
2626   } else {
2627     ShenandoahIsAliveClosure is_alive;
2628 #ifdef ASSERT
2629     ShenandoahAssertNotForwardedClosure verify_cl;
2630     ShenandoahParallelWeakRootsCleaningTask<ShenandoahIsAliveClosure, ShenandoahAssertNotForwardedClosure>
2631       cleaning_task(timing_phase, &is_alive, &verify_cl, num_workers);
2632 #else
2633     ShenandoahParallelWeakRootsCleaningTask<ShenandoahIsAliveClosure, DoNothingClosure>
2634       cleaning_task(timing_phase, &is_alive, &do_nothing_cl, num_workers);
2635 #endif
2636     _workers->run_task(&cleaning_task);
2637   }
2638 }
2639 
2640 void ShenandoahHeap::parallel_cleaning(bool full_gc) {
2641   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2642   assert(is_stw_gc_in_progress(), "Only for Degenerated and Full GC");
2643   ShenandoahGCPhase phase(full_gc ?
2644                           ShenandoahPhaseTimings::full_gc_purge :
2645                           ShenandoahPhaseTimings::degen_gc_purge);
2646   stw_weak_refs(full_gc);
2647   stw_process_weak_roots(full_gc);
2648   stw_unload_classes(full_gc);
2649 }
2650 
2651 void ShenandoahHeap::set_has_forwarded_objects(bool cond) {
2652   set_gc_state_mask(HAS_FORWARDED, cond);
2653 }
2654 
2655 void ShenandoahHeap::set_unload_classes(bool uc) {
2656   _unload_classes.set_cond(uc);
2657 }
2658 
2659 bool ShenandoahHeap::unload_classes() const {
2660   return _unload_classes.is_set();
2661 }
2662 
2663 address ShenandoahHeap::in_cset_fast_test_addr() {
2664   ShenandoahHeap* heap = ShenandoahHeap::heap();
2665   assert(heap->collection_set() != nullptr, "Sanity");
2666   return (address) heap->collection_set()->biased_map_address();
2667 }
2668 
2669 address ShenandoahHeap::gc_state_addr() {
2670   return (address) ShenandoahHeap::heap()->_gc_state.addr_of();
2671 }
2672 
2673 void ShenandoahHeap::reset_bytes_allocated_since_gc_start() {
2674   if (mode()->is_generational()) {
2675     young_generation()->reset_bytes_allocated_since_gc_start();
2676     old_generation()->reset_bytes_allocated_since_gc_start();
2677   }
2678 
2679   global_generation()->reset_bytes_allocated_since_gc_start();
2680 }
2681 
2682 void ShenandoahHeap::set_degenerated_gc_in_progress(bool in_progress) {
2683   _degenerated_gc_in_progress.set_cond(in_progress);
2684 }
2685 
2686 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) {
2687   _full_gc_in_progress.set_cond(in_progress);
2688 }
2689 
2690 void ShenandoahHeap::set_full_gc_move_in_progress(bool in_progress) {
2691   assert (is_full_gc_in_progress(), "should be");
2692   _full_gc_move_in_progress.set_cond(in_progress);
2693 }
2694 
2695 void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) {
2696   set_gc_state_mask(UPDATEREFS, in_progress);
2697 }
2698 
2699 void ShenandoahHeap::register_nmethod(nmethod* nm) {
2700   ShenandoahCodeRoots::register_nmethod(nm);
2701 }
2702 
2703 void ShenandoahHeap::unregister_nmethod(nmethod* nm) {
2704   ShenandoahCodeRoots::unregister_nmethod(nm);
2705 }
2706 
2707 void ShenandoahHeap::pin_object(JavaThread* thr, oop o) {
2708   heap_region_containing(o)->record_pin();
2709 }
2710 
2711 void ShenandoahHeap::unpin_object(JavaThread* thr, oop o) {
2712   ShenandoahHeapRegion* r = heap_region_containing(o);
2713   assert(r != nullptr, "Sanity");
2714   assert(r->pin_count() > 0, "Region " SIZE_FORMAT " should have non-zero pins", r->index());
2715   r->record_unpin();
2716 }
2717 
2718 void ShenandoahHeap::sync_pinned_region_status() {
2719   ShenandoahHeapLocker locker(lock());
2720 
2721   for (size_t i = 0; i < num_regions(); i++) {
2722     ShenandoahHeapRegion *r = get_region(i);
2723     if (r->is_active()) {
2724       if (r->is_pinned()) {
2725         if (r->pin_count() == 0) {
2726           r->make_unpinned();
2727         }
2728       } else {
2729         if (r->pin_count() > 0) {
2730           r->make_pinned();
2731         }
2732       }
2733     }
2734   }
2735 
2736   assert_pinned_region_status();
2737 }
2738 
2739 #ifdef ASSERT
2740 void ShenandoahHeap::assert_pinned_region_status() {
2741   for (size_t i = 0; i < num_regions(); i++) {
2742     ShenandoahHeapRegion* r = get_region(i);
2743     if (active_generation()->contains(r)) {
2744       assert((r->is_pinned() && r->pin_count() > 0) || (!r->is_pinned() && r->pin_count() == 0),
2745              "Region " SIZE_FORMAT " pinning status is inconsistent", i);
2746     }
2747   }
2748 }
2749 #endif
2750 
2751 ConcurrentGCTimer* ShenandoahHeap::gc_timer() const {
2752   return _gc_timer;
2753 }
2754 
2755 void ShenandoahHeap::prepare_concurrent_roots() {
2756   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2757   assert(!is_stw_gc_in_progress(), "Only concurrent GC");
2758   set_concurrent_strong_root_in_progress(!collection_set()->is_empty());
2759   set_concurrent_weak_root_in_progress(true);
2760   if (unload_classes()) {
2761     _unloader.prepare();
2762   }
2763 }
2764 
2765 void ShenandoahHeap::finish_concurrent_roots() {
2766   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2767   assert(!is_stw_gc_in_progress(), "Only concurrent GC");
2768   if (unload_classes()) {
2769     _unloader.finish();
2770   }
2771 }
2772 
2773 #ifdef ASSERT
2774 void ShenandoahHeap::assert_gc_workers(uint nworkers) {
2775   assert(nworkers > 0 && nworkers <= max_workers(), "Sanity");
2776 
2777   if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
2778     if (UseDynamicNumberOfGCThreads) {
2779       assert(nworkers <= ParallelGCThreads, "Cannot use more than it has");
2780     } else {
2781       // Use ParallelGCThreads inside safepoints
2782       assert(nworkers == ParallelGCThreads, "Use ParallelGCThreads within safepoints");
2783     }
2784   } else {
2785     if (UseDynamicNumberOfGCThreads) {
2786       assert(nworkers <= ConcGCThreads, "Cannot use more than it has");
2787     } else {
2788       // Use ConcGCThreads outside safepoints
2789       assert(nworkers == ConcGCThreads, "Use ConcGCThreads outside safepoints");
2790     }
2791   }
2792 }
2793 #endif
2794 
2795 ShenandoahVerifier* ShenandoahHeap::verifier() {
2796   guarantee(ShenandoahVerify, "Should be enabled");
2797   assert (_verifier != nullptr, "sanity");
2798   return _verifier;
2799 }
2800 
2801 template<bool CONCURRENT>
2802 class ShenandoahUpdateHeapRefsTask : public WorkerTask {
2803 private:
2804   ShenandoahHeap* _heap;
2805   ShenandoahRegionIterator* _regions;
2806   ShenandoahRegionChunkIterator* _work_chunks;
2807 
2808 public:
2809   explicit ShenandoahUpdateHeapRefsTask(ShenandoahRegionIterator* regions,
2810                                         ShenandoahRegionChunkIterator* work_chunks) :
2811     WorkerTask("Shenandoah Update References"),
2812     _heap(ShenandoahHeap::heap()),
2813     _regions(regions),
2814     _work_chunks(work_chunks)
2815   {
2816   }
2817 
2818   void work(uint worker_id) {
2819     if (CONCURRENT) {
2820       ShenandoahConcurrentWorkerSession worker_session(worker_id);
2821       ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
2822       do_work<ShenandoahConcUpdateRefsClosure>(worker_id);
2823     } else {
2824       ShenandoahParallelWorkerSession worker_session(worker_id);
2825       do_work<ShenandoahSTWUpdateRefsClosure>(worker_id);
2826     }
2827   }
2828 
2829 private:
2830   template<class T>
2831   void do_work(uint worker_id) {
2832     T cl;
2833     if (CONCURRENT && (worker_id == 0)) {
2834       // We ask the first worker to replenish the Mutator free set by moving regions previously reserved to hold the
2835       // results of evacuation.  These reserves are no longer necessary because evacuation has completed.
2836       size_t cset_regions = _heap->collection_set()->count();
2837       // We cannot transfer any more regions than will be reclaimed when the existing collection set is recycled, because
2838       // we need the reclaimed collection set regions to replenish the collector reserves
2839       _heap->free_set()->move_collector_sets_to_mutator(cset_regions);
2840     }
2841     // If !CONCURRENT, there's no value in expanding Mutator free set
2842 
2843     ShenandoahHeapRegion* r = _regions->next();
2844     // We update references for global, old, and young collections.
2845     assert(_heap->active_generation()->is_mark_complete(), "Expected complete marking");
2846     ShenandoahMarkingContext* const ctx = _heap->marking_context();
2847     bool is_mixed = _heap->collection_set()->has_old_regions();
2848     while (r != nullptr) {
2849       HeapWord* update_watermark = r->get_update_watermark();
2850       assert (update_watermark >= r->bottom(), "sanity");
2851 
2852       log_debug(gc)("ShenandoahUpdateHeapRefsTask::do_work(%u) looking at region " SIZE_FORMAT, worker_id, r->index());
2853       bool region_progress = false;
2854       if (r->is_active() && !r->is_cset()) {
2855         if (!_heap->mode()->is_generational() || r->is_young()) {
2856           _heap->marked_object_oop_iterate(r, &cl, update_watermark);
2857           region_progress = true;
2858         } else if (r->is_old()) {
2859           if (_heap->active_generation()->is_global()) {
2860             // Note that GLOBAL collection is not as effectively balanced as young and mixed cycles.  This is because
2861             // concurrent GC threads are parceled out entire heap regions of work at a time and there
2862             // is no "catchup phase" consisting of remembered set scanning, during which parcels of work are smaller
2863             // and more easily distributed more fairly across threads.
2864 
2865             // TODO: Consider an improvement to load balance GLOBAL GC.
2866             _heap->marked_object_oop_iterate(r, &cl, update_watermark);
2867             region_progress = true;
2868           }
2869           // Otherwise, this is an old region in a young or mixed cycle.  Process it during a second phase, below.
2870           // Don't bother to report pacing progress in this case.
2871         } else {
2872           // Because updating of references runs concurrently, it is possible that a FREE inactive region transitions
2873           // to a non-free active region while this loop is executing.  Whenever this happens, the changing of a region's
2874           // active status may propagate at a different speed than the changing of the region's affiliation.
2875 
2876           // When we reach this control point, it is because a race has allowed a region's is_active() status to be seen
2877           // by this thread before the region's affiliation() is seen by this thread.
2878 
2879           // It's ok for this race to occur because the newly transformed region does not have any references to be
2880           // updated.
2881 
2882           assert(r->get_update_watermark() == r->bottom(),
2883                  "%s Region " SIZE_FORMAT " is_active but not recognized as YOUNG or OLD so must be newly transitioned from FREE",
2884                  r->affiliation_name(), r->index());
2885         }
2886       }
2887       if (region_progress && ShenandoahPacing) {
2888         _heap->pacer()->report_updaterefs(pointer_delta(update_watermark, r->bottom()));
2889       }
2890       if (_heap->check_cancelled_gc_and_yield(CONCURRENT)) {
2891         return;
2892       }
2893       r = _regions->next();
2894     }
2895 
2896     if (_heap->mode()->is_generational() && !_heap->active_generation()->is_global()) {
2897       // Since this is generational and not GLOBAL, we have to process the remembered set.  There's no remembered
2898       // set processing if not in generational mode or if GLOBAL mode.
2899 
2900       // After this thread has exhausted its traditional update-refs work, it continues with updating refs within remembered set.
2901       // The remembered set workload is better balanced between threads, so threads that are "behind" can catch up with other
2902       // threads during this phase, allowing all threads to work more effectively in parallel.
2903       struct ShenandoahRegionChunk assignment;
2904       RememberedScanner* scanner = _heap->card_scan();
2905 
2906       while (!_heap->check_cancelled_gc_and_yield(CONCURRENT) && _work_chunks->next(&assignment)) {
2907         // Keep grabbing next work chunk to process until finished, or asked to yield
2908         ShenandoahHeapRegion* r = assignment._r;
2909         if (r->is_active() && !r->is_cset() && r->is_old()) {
2910           HeapWord* start_of_range = r->bottom() + assignment._chunk_offset;
2911           HeapWord* end_of_range = r->get_update_watermark();
2912           if (end_of_range > start_of_range + assignment._chunk_size) {
2913             end_of_range = start_of_range + assignment._chunk_size;
2914           }
2915 
2916           // Old region in a young cycle or mixed cycle.
2917           if (is_mixed) {
2918             // TODO: For mixed evac, consider building an old-gen remembered set that allows restricted updating
2919             // within old-gen HeapRegions.  This remembered set can be constructed by old-gen concurrent marking
2920             // and augmented by card marking.  For example, old-gen concurrent marking can remember for each old-gen
2921             // card which other old-gen regions it refers to: none, one-other specifically, multiple-other non-specific.
2922             // Update-references when _mixed_evac processess each old-gen memory range that has a traditional DIRTY
2923             // card or if the "old-gen remembered set" indicates that this card holds pointers specifically to an
2924             // old-gen region in the most recent collection set, or if this card holds pointers to other non-specific
2925             // old-gen heap regions.
2926 
2927             if (r->is_humongous()) {
2928               if (start_of_range < end_of_range) {
2929                 // Need to examine both dirty and clean cards during mixed evac.
2930                 r->oop_iterate_humongous_slice(&cl, false, start_of_range, assignment._chunk_size, true);
2931               }
2932             } else {
2933               // Since this is mixed evacuation, old regions that are candidates for collection have not been coalesced
2934               // and filled.  Use mark bits to find objects that need to be updated.
2935               //
2936               // Future TODO: establish a second remembered set to identify which old-gen regions point to other old-gen
2937               // regions which are in the collection set for a particular mixed evacuation.
2938               if (start_of_range < end_of_range) {
2939                 HeapWord* p = nullptr;
2940                 size_t card_index = scanner->card_index_for_addr(start_of_range);
2941                 // In case last object in my range spans boundary of my chunk, I may need to scan all the way to top()
2942                 ShenandoahObjectToOopBoundedClosure<T> objs(&cl, start_of_range, r->top());
2943 
2944                 // Any object that begins in a previous range is part of a different scanning assignment.  Any object that
2945                 // starts after end_of_range is also not my responsibility.  (Either allocated during evacuation, so does
2946                 // not hold pointers to from-space, or is beyond the range of my assigned work chunk.)
2947 
2948                 // Find the first object that begins in my range, if there is one.
2949                 p = start_of_range;
2950                 oop obj = cast_to_oop(p);
2951                 HeapWord* tams = ctx->top_at_mark_start(r);
2952                 if (p >= tams) {
2953                   // We cannot use ctx->is_marked(obj) to test whether an object begins at this address.  Instead,
2954                   // we need to use the remembered set crossing map to advance p to the first object that starts
2955                   // within the enclosing card.
2956 
2957                   while (true) {
2958                     HeapWord* first_object = scanner->first_object_in_card(card_index);
2959                     if (first_object != nullptr) {
2960                       p = first_object;
2961                       break;
2962                     } else if (scanner->addr_for_card_index(card_index + 1) < end_of_range) {
2963                       card_index++;
2964                     } else {
2965                       // Force the loop that follows to immediately terminate.
2966                       p = end_of_range;
2967                       break;
2968                     }
2969                   }
2970                   obj = cast_to_oop(p);
2971                   // Note: p may be >= end_of_range
2972                 } else if (!ctx->is_marked(obj)) {
2973                   p = ctx->get_next_marked_addr(p, tams);
2974                   obj = cast_to_oop(p);
2975                   // If there are no more marked objects before tams, this returns tams.
2976                   // Note that tams is either >= end_of_range, or tams is the start of an object that is marked.
2977                 }
2978                 while (p < end_of_range) {
2979                   // p is known to point to the beginning of marked object obj
2980                   objs.do_object(obj);
2981                   HeapWord* prev_p = p;
2982                   p += obj->size();
2983                   if (p < tams) {
2984                     p = ctx->get_next_marked_addr(p, tams);
2985                     // If there are no more marked objects before tams, this returns tams.  Note that tams is
2986                     // either >= end_of_range, or tams is the start of an object that is marked.
2987                   }
2988                   assert(p != prev_p, "Lack of forward progress");
2989                   obj = cast_to_oop(p);
2990                 }
2991               }
2992             }
2993           } else {
2994             // This is a young evac..
2995             if (start_of_range < end_of_range) {
2996               size_t cluster_size =
2997                 CardTable::card_size_in_words() * ShenandoahCardCluster<ShenandoahDirectCardMarkRememberedSet>::CardsPerCluster;
2998               size_t clusters = assignment._chunk_size / cluster_size;
2999               assert(clusters * cluster_size == assignment._chunk_size, "Chunk assignment must align on cluster boundaries");
3000               scanner->process_region_slice(r, assignment._chunk_offset, clusters, end_of_range, &cl, true, worker_id);
3001             }
3002           }
3003           if (ShenandoahPacing && (start_of_range < end_of_range)) {
3004             _heap->pacer()->report_updaterefs(pointer_delta(end_of_range, start_of_range));
3005           }
3006         }
3007       }
3008     }
3009   }
3010 };
3011 
3012 void ShenandoahHeap::update_heap_references(bool concurrent) {
3013   assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
3014   uint nworkers = workers()->active_workers();
3015   ShenandoahRegionChunkIterator work_list(nworkers);
3016 
3017   if (concurrent) {
3018     ShenandoahUpdateHeapRefsTask<true> task(&_update_refs_iterator, &work_list);
3019     workers()->run_task(&task);
3020   } else {
3021     ShenandoahUpdateHeapRefsTask<false> task(&_update_refs_iterator, &work_list);
3022     workers()->run_task(&task);
3023   }
3024   if (ShenandoahEnableCardStats && card_scan()!=nullptr) { // generational check proxy
3025     card_scan()->log_card_stats(nworkers, CARD_STAT_UPDATE_REFS);
3026   }
3027 }
3028 
3029 class ShenandoahFinalUpdateRefsUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
3030 private:
3031   ShenandoahMarkingContext* _ctx;
3032   ShenandoahHeapLock* const _lock;
3033   bool _is_generational;
3034 
3035 public:
3036   ShenandoahFinalUpdateRefsUpdateRegionStateClosure(
3037     ShenandoahMarkingContext* ctx) : _ctx(ctx), _lock(ShenandoahHeap::heap()->lock()),
3038                                      _is_generational(ShenandoahHeap::heap()->mode()->is_generational()) { }
3039 
3040   void heap_region_do(ShenandoahHeapRegion* r) {
3041 
3042     // Maintenance of region age must follow evacuation in order to account for evacuation allocations within survivor
3043     // regions.  We consult region age during the subsequent evacuation to determine whether certain objects need to
3044     // be promoted.
3045     if (_is_generational && r->is_young() && r->is_active()) {
3046       HeapWord *tams = _ctx->top_at_mark_start(r);
3047       HeapWord *top = r->top();
3048 
3049       // Allocations move the watermark when top moves.  However compacting
3050       // objects will sometimes lower top beneath the watermark, after which,
3051       // attempts to read the watermark will assert out (watermark should not be
3052       // higher than top).
3053       if (top > tams) {
3054         // There have been allocations in this region since the start of the cycle.
3055         // Any objects new to this region must not assimilate elevated age.
3056         r->reset_age();
3057       } else if (ShenandoahHeap::heap()->is_aging_cycle()) {
3058         r->increment_age();
3059       }
3060     }
3061 
3062     // Drop unnecessary "pinned" state from regions that does not have CP marks
3063     // anymore, as this would allow trashing them.
3064     if (r->is_active()) {
3065       if (r->is_pinned()) {
3066         if (r->pin_count() == 0) {
3067           ShenandoahHeapLocker locker(_lock);
3068           r->make_unpinned();
3069         }
3070       } else {
3071         if (r->pin_count() > 0) {
3072           ShenandoahHeapLocker locker(_lock);
3073           r->make_pinned();
3074         }
3075       }
3076     }
3077   }
3078 
3079   bool is_thread_safe() { return true; }
3080 };
3081 
3082 void ShenandoahHeap::update_heap_region_states(bool concurrent) {
3083   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
3084   assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
3085 
3086   {
3087     ShenandoahGCPhase phase(concurrent ?
3088                             ShenandoahPhaseTimings::final_update_refs_update_region_states :
3089                             ShenandoahPhaseTimings::degen_gc_final_update_refs_update_region_states);
3090     ShenandoahFinalUpdateRefsUpdateRegionStateClosure cl (active_generation()->complete_marking_context());
3091     parallel_heap_region_iterate(&cl);
3092 
3093     assert_pinned_region_status();
3094   }
3095 
3096   {
3097     ShenandoahGCPhase phase(concurrent ?
3098                             ShenandoahPhaseTimings::final_update_refs_trash_cset :
3099                             ShenandoahPhaseTimings::degen_gc_final_update_refs_trash_cset);
3100     trash_cset_regions();
3101   }
3102 }
3103 
3104 void ShenandoahHeap::rebuild_free_set(bool concurrent) {
3105   ShenandoahGCPhase phase(concurrent ?
3106                           ShenandoahPhaseTimings::final_update_refs_rebuild_freeset :
3107                           ShenandoahPhaseTimings::degen_gc_final_update_refs_rebuild_freeset);
3108   size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
3109   ShenandoahHeapLocker locker(lock());
3110   size_t young_cset_regions, old_cset_regions;
3111   _free_set->prepare_to_rebuild(young_cset_regions, old_cset_regions);
3112 
3113   if (mode()->is_generational()) {
3114     assert(verify_generation_usage(true, old_generation()->used_regions(),
3115                                    old_generation()->used(), old_generation()->get_humongous_waste(),
3116                                    true, young_generation()->used_regions(),
3117                                    young_generation()->used(), young_generation()->get_humongous_waste()),
3118            "Generation accounts are inaccurate");
3119 
3120     // The computation of bytes_of_allocation_runway_before_gc_trigger is quite conservative so consider all of this
3121     // available for transfer to old. Note that transfer of humongous regions does not impact available.
3122     size_t allocation_runway = young_heuristics()->bytes_of_allocation_runway_before_gc_trigger(young_cset_regions);
3123     adjust_generation_sizes_for_next_cycle(allocation_runway, young_cset_regions, old_cset_regions);
3124 
3125     // Total old_available may have been expanded to hold anticipated promotions.  We trigger if the fragmented available
3126     // memory represents more than 16 regions worth of data.  Note that fragmentation may increase when we promote regular
3127     // regions in place when many of these regular regions have an abundant amount of available memory within them.  Fragmentation
3128     // will decrease as promote-by-copy consumes the available memory within these partially consumed regions.
3129     //
3130     // We consider old-gen to have excessive fragmentation if more than 12.5% of old-gen is free memory that resides
3131     // within partially consumed regions of memory.
3132   }
3133   // Rebuild free set based on adjusted generation sizes.
3134   _free_set->rebuild(young_cset_regions, old_cset_regions);
3135 
3136   if (mode()->is_generational()) {
3137     size_t old_available = old_generation()->available();
3138     size_t old_unaffiliated_available = old_generation()->free_unaffiliated_regions() * region_size_bytes;
3139     size_t old_fragmented_available;
3140     assert(old_available >= old_unaffiliated_available, "unaffiliated available is a subset of total available");
3141     old_fragmented_available = old_available - old_unaffiliated_available;
3142 
3143     size_t old_capacity = old_generation()->max_capacity();
3144     size_t heap_capacity = capacity();
3145     if ((old_capacity > heap_capacity / 8) && (old_fragmented_available > old_capacity / 8)) {
3146       old_heuristics()->trigger_old_is_fragmented();
3147     }
3148 
3149     size_t old_used = old_generation()->used() + old_generation()->get_humongous_waste();
3150     size_t trigger_threshold = old_generation()->usage_trigger_threshold();
3151     // Detects unsigned arithmetic underflow
3152     assert(old_used <= capacity(),
3153            "Old used (" SIZE_FORMAT ", " SIZE_FORMAT") must not be more than heap capacity (" SIZE_FORMAT ")",
3154            old_generation()->used(), old_generation()->get_humongous_waste(), capacity());
3155 
3156     if (old_used > trigger_threshold) {
3157       old_heuristics()->trigger_old_has_grown();
3158     }
3159   }
3160 }
3161 
3162 void ShenandoahHeap::print_extended_on(outputStream *st) const {
3163   print_on(st);
3164   st->cr();
3165   print_heap_regions_on(st);
3166 }
3167 
3168 bool ShenandoahHeap::is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self) {
3169   size_t slice = r->index() / _bitmap_regions_per_slice;
3170 
3171   size_t regions_from = _bitmap_regions_per_slice * slice;
3172   size_t regions_to   = MIN2(num_regions(), _bitmap_regions_per_slice * (slice + 1));
3173   for (size_t g = regions_from; g < regions_to; g++) {
3174     assert (g / _bitmap_regions_per_slice == slice, "same slice");
3175     if (skip_self && g == r->index()) continue;
3176     if (get_region(g)->is_committed()) {
3177       return true;
3178     }
3179   }
3180   return false;
3181 }
3182 
3183 bool ShenandoahHeap::commit_bitmap_slice(ShenandoahHeapRegion* r) {
3184   shenandoah_assert_heaplocked();
3185 
3186   // Bitmaps in special regions do not need commits
3187   if (_bitmap_region_special) {
3188     return true;
3189   }
3190 
3191   if (is_bitmap_slice_committed(r, true)) {
3192     // Some other region from the group is already committed, meaning the bitmap
3193     // slice is already committed, we exit right away.
3194     return true;
3195   }
3196 
3197   // Commit the bitmap slice:
3198   size_t slice = r->index() / _bitmap_regions_per_slice;
3199   size_t off = _bitmap_bytes_per_slice * slice;
3200   size_t len = _bitmap_bytes_per_slice;
3201   char* start = (char*) _bitmap_region.start() + off;
3202 
3203   if (!os::commit_memory(start, len, false)) {
3204     return false;
3205   }
3206 
3207   if (AlwaysPreTouch) {
3208     os::pretouch_memory(start, start + len, _pretouch_bitmap_page_size);
3209   }
3210 
3211   return true;
3212 }
3213 
3214 bool ShenandoahHeap::uncommit_bitmap_slice(ShenandoahHeapRegion *r) {
3215   shenandoah_assert_heaplocked();
3216 
3217   // Bitmaps in special regions do not need uncommits
3218   if (_bitmap_region_special) {
3219     return true;
3220   }
3221 
3222   if (is_bitmap_slice_committed(r, true)) {
3223     // Some other region from the group is still committed, meaning the bitmap
3224     // slice is should stay committed, exit right away.
3225     return true;
3226   }
3227 
3228   // Uncommit the bitmap slice:
3229   size_t slice = r->index() / _bitmap_regions_per_slice;
3230   size_t off = _bitmap_bytes_per_slice * slice;
3231   size_t len = _bitmap_bytes_per_slice;
3232   if (!os::uncommit_memory((char*)_bitmap_region.start() + off, len)) {
3233     return false;
3234   }
3235   return true;
3236 }
3237 
3238 void ShenandoahHeap::safepoint_synchronize_begin() {
3239   if (ShenandoahSuspendibleWorkers) {
3240     SuspendibleThreadSet::synchronize();
3241   }
3242 }
3243 
3244 void ShenandoahHeap::safepoint_synchronize_end() {
3245   if (ShenandoahSuspendibleWorkers) {
3246     SuspendibleThreadSet::desynchronize();
3247   }
3248 }
3249 
3250 void ShenandoahHeap::entry_uncommit(double shrink_before, size_t shrink_until) {
3251   static const char *msg = "Concurrent uncommit";
3252   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_uncommit, true /* log_heap_usage */);
3253   EventMark em("%s", msg);
3254 
3255   op_uncommit(shrink_before, shrink_until);
3256 }
3257 
3258 void ShenandoahHeap::try_inject_alloc_failure() {
3259   if (ShenandoahAllocFailureALot && !cancelled_gc() && ((os::random() % 1000) > 950)) {
3260     _inject_alloc_failure.set();
3261     os::naked_short_sleep(1);
3262     if (cancelled_gc()) {
3263       log_info(gc)("Allocation failure was successfully injected");
3264     }
3265   }
3266 }
3267 
3268 bool ShenandoahHeap::should_inject_alloc_failure() {
3269   return _inject_alloc_failure.is_set() && _inject_alloc_failure.try_unset();
3270 }
3271 
3272 void ShenandoahHeap::initialize_serviceability() {
3273   if (mode()->is_generational()) {
3274     _young_gen_memory_pool = new ShenandoahYoungGenMemoryPool(this);
3275     _old_gen_memory_pool = new ShenandoahOldGenMemoryPool(this);
3276     _cycle_memory_manager.add_pool(_young_gen_memory_pool);
3277     _cycle_memory_manager.add_pool(_old_gen_memory_pool);
3278     _stw_memory_manager.add_pool(_young_gen_memory_pool);
3279     _stw_memory_manager.add_pool(_old_gen_memory_pool);
3280   } else {
3281     _memory_pool = new ShenandoahMemoryPool(this);
3282     _cycle_memory_manager.add_pool(_memory_pool);
3283     _stw_memory_manager.add_pool(_memory_pool);
3284   }
3285 }
3286 
3287 GrowableArray<GCMemoryManager*> ShenandoahHeap::memory_managers() {
3288   GrowableArray<GCMemoryManager*> memory_managers(2);
3289   memory_managers.append(&_cycle_memory_manager);
3290   memory_managers.append(&_stw_memory_manager);
3291   return memory_managers;
3292 }
3293 
3294 GrowableArray<MemoryPool*> ShenandoahHeap::memory_pools() {
3295   GrowableArray<MemoryPool*> memory_pools(1);
3296   if (mode()->is_generational()) {
3297     memory_pools.append(_young_gen_memory_pool);
3298     memory_pools.append(_old_gen_memory_pool);
3299   } else {
3300     memory_pools.append(_memory_pool);
3301   }
3302   return memory_pools;
3303 }
3304 
3305 MemoryUsage ShenandoahHeap::memory_usage() {
3306   return MemoryUsage(_initial_size, used(), committed(), max_capacity());
3307 }
3308 
3309 ShenandoahRegionIterator::ShenandoahRegionIterator() :
3310   _heap(ShenandoahHeap::heap()),
3311   _index(0) {}
3312 
3313 ShenandoahRegionIterator::ShenandoahRegionIterator(ShenandoahHeap* heap) :
3314   _heap(heap),
3315   _index(0) {}
3316 
3317 void ShenandoahRegionIterator::reset() {
3318   _index = 0;
3319 }
3320 
3321 bool ShenandoahRegionIterator::has_next() const {
3322   return _index < _heap->num_regions();
3323 }
3324 
3325 char ShenandoahHeap::gc_state() const {
3326   return _gc_state.raw_value();
3327 }
3328 
3329 ShenandoahLiveData* ShenandoahHeap::get_liveness_cache(uint worker_id) {
3330 #ifdef ASSERT
3331   assert(_liveness_cache != nullptr, "sanity");
3332   assert(worker_id < _max_workers, "sanity");
3333   for (uint i = 0; i < num_regions(); i++) {
3334     assert(_liveness_cache[worker_id][i] == 0, "liveness cache should be empty");
3335   }
3336 #endif
3337   return _liveness_cache[worker_id];
3338 }
3339 
3340 void ShenandoahHeap::flush_liveness_cache(uint worker_id) {
3341   assert(worker_id < _max_workers, "sanity");
3342   assert(_liveness_cache != nullptr, "sanity");
3343   ShenandoahLiveData* ld = _liveness_cache[worker_id];
3344 
3345   for (uint i = 0; i < num_regions(); i++) {
3346     ShenandoahLiveData live = ld[i];
3347     if (live > 0) {
3348       ShenandoahHeapRegion* r = get_region(i);
3349       r->increase_live_data_gc_words(live);
3350       ld[i] = 0;
3351     }
3352   }
3353 }
3354 
3355 bool ShenandoahHeap::requires_barriers(stackChunkOop obj) const {
3356   if (is_idle()) return false;
3357 
3358   // Objects allocated after marking start are implicitly alive, don't need any barriers during
3359   // marking phase.
3360   if (is_concurrent_mark_in_progress() &&
3361      !marking_context()->allocated_after_mark_start(obj)) {
3362     return true;
3363   }
3364 
3365   // Can not guarantee obj is deeply good.
3366   if (has_forwarded_objects()) {
3367     return true;
3368   }
3369 
3370   return false;
3371 }
3372 
3373 void ShenandoahHeap::transfer_old_pointers_from_satb() {
3374   _old_generation->transfer_pointers_from_satb();
3375 }
3376 
3377 template<>
3378 void ShenandoahGenerationRegionClosure<YOUNG>::heap_region_do(ShenandoahHeapRegion* region) {
3379   // Visit young and free regions
3380   if (!region->is_old()) {
3381     _cl->heap_region_do(region);
3382   }
3383 }
3384 
3385 template<>
3386 void ShenandoahGenerationRegionClosure<OLD>::heap_region_do(ShenandoahHeapRegion* region) {
3387   // Visit old and free regions
3388   if (!region->is_young()) {
3389     _cl->heap_region_do(region);
3390   }
3391 }
3392 
3393 template<>
3394 void ShenandoahGenerationRegionClosure<GLOBAL_GEN>::heap_region_do(ShenandoahHeapRegion* region) {
3395   _cl->heap_region_do(region);
3396 }
3397 
3398 template<>
3399 void ShenandoahGenerationRegionClosure<GLOBAL_NON_GEN>::heap_region_do(ShenandoahHeapRegion* region) {
3400   _cl->heap_region_do(region);
3401 }
3402 
3403 bool ShenandoahHeap::verify_generation_usage(bool verify_old, size_t old_regions, size_t old_bytes, size_t old_waste,
3404                                              bool verify_young, size_t young_regions, size_t young_bytes, size_t young_waste) {
3405   size_t tally_old_regions = 0;
3406   size_t tally_old_bytes = 0;
3407   size_t tally_old_waste = 0;
3408   size_t tally_young_regions = 0;
3409   size_t tally_young_bytes = 0;
3410   size_t tally_young_waste = 0;
3411 
3412   shenandoah_assert_heaplocked_or_safepoint();
3413   for (size_t i = 0; i < num_regions(); i++) {
3414     ShenandoahHeapRegion* r = get_region(i);
3415     if (r->is_old()) {
3416       tally_old_regions++;
3417       tally_old_bytes += r->used();
3418       if (r->is_humongous()) {
3419         ShenandoahHeapRegion* start = r->humongous_start_region();
3420         HeapWord* obj_addr = start->bottom();
3421         oop obj = cast_to_oop(obj_addr);
3422         size_t word_size = obj->size();
3423         HeapWord* end_addr = obj_addr + word_size;
3424         if (end_addr <= r->end()) {
3425           tally_old_waste += (r->end() - end_addr) * HeapWordSize;
3426         }
3427       }
3428     } else if (r->is_young()) {
3429       tally_young_regions++;
3430       tally_young_bytes += r->used();
3431       if (r->is_humongous()) {
3432         ShenandoahHeapRegion* start = r->humongous_start_region();
3433         HeapWord* obj_addr = start->bottom();
3434         oop obj = cast_to_oop(obj_addr);
3435         size_t word_size = obj->size();
3436         HeapWord* end_addr = obj_addr + word_size;
3437         if (end_addr <= r->end()) {
3438           tally_young_waste += (r->end() - end_addr) * HeapWordSize;
3439         }
3440       }
3441     }
3442   }
3443   if (verify_young &&
3444       ((young_regions != tally_young_regions) || (young_bytes != tally_young_bytes) || (young_waste != tally_young_waste))) {
3445     return false;
3446   } else if (verify_old &&
3447              ((old_regions != tally_old_regions) || (old_bytes != tally_old_bytes) || (old_waste != tally_old_waste))) {
3448     return false;
3449   } else {
3450     return true;
3451   }
3452 }
3453 
3454 ShenandoahGeneration* ShenandoahHeap::generation_for(ShenandoahAffiliation affiliation) const {
3455   if (!mode()->is_generational()) {
3456     return global_generation();
3457   } else if (affiliation == YOUNG_GENERATION) {
3458     return young_generation();
3459   } else if (affiliation == OLD_GENERATION) {
3460     return old_generation();
3461   }
3462 
3463   ShouldNotReachHere();
3464   return nullptr;
3465 }
3466 
3467 void ShenandoahHeap::log_heap_status(const char* msg) const {
3468   if (mode()->is_generational()) {
3469     young_generation()->log_status(msg);
3470     old_generation()->log_status(msg);
3471   } else {
3472     global_generation()->log_status(msg);
3473   }
3474 }