1 /*
   2  * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2013, 2022, Red Hat, Inc. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "memory/allocation.hpp"
  28 #include "memory/universe.hpp"
  29 
  30 #include "gc/shared/gcArguments.hpp"
  31 #include "gc/shared/gcTimer.hpp"
  32 #include "gc/shared/gcTraceTime.inline.hpp"
  33 #include "gc/shared/locationPrinter.inline.hpp"
  34 #include "gc/shared/memAllocator.hpp"
  35 #include "gc/shared/plab.hpp"
  36 #include "gc/shared/tlab_globals.hpp"
  37 
  38 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  39 #include "gc/shenandoah/shenandoahCardTable.hpp"
  40 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
  41 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  42 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  43 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
  44 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  45 #include "gc/shenandoah/shenandoahControlThread.hpp"
  46 #include "gc/shenandoah/shenandoahRegulatorThread.hpp"
  47 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  48 #include "gc/shenandoah/shenandoahGlobalGeneration.hpp"
  49 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  50 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  51 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
  52 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  53 #include "gc/shenandoah/shenandoahInitLogger.hpp"
  54 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  55 #include "gc/shenandoah/shenandoahMemoryPool.hpp"
  56 #include "gc/shenandoah/shenandoahMetrics.hpp"
  57 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  58 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
  59 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  60 #include "gc/shenandoah/shenandoahPacer.inline.hpp"
  61 #include "gc/shenandoah/shenandoahPadding.hpp"
  62 #include "gc/shenandoah/shenandoahParallelCleaning.inline.hpp"
  63 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
  64 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
  65 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
  66 #include "gc/shenandoah/shenandoahStringDedup.hpp"
  67 #include "gc/shenandoah/shenandoahSTWMark.hpp"
  68 #include "gc/shenandoah/shenandoahUtils.hpp"
  69 #include "gc/shenandoah/shenandoahVerifier.hpp"
  70 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
  71 #include "gc/shenandoah/shenandoahVMOperations.hpp"
  72 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
  73 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  74 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
  75 #include "gc/shenandoah/mode/shenandoahGenerationalMode.hpp"
  76 #include "gc/shenandoah/mode/shenandoahIUMode.hpp"
  77 #include "gc/shenandoah/mode/shenandoahPassiveMode.hpp"
  78 #include "gc/shenandoah/mode/shenandoahSATBMode.hpp"
  79 
  80 #if INCLUDE_JFR
  81 #include "gc/shenandoah/shenandoahJfrSupport.hpp"
  82 #endif
  83 
  84 #include "gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp"
  85 
  86 #include "classfile/systemDictionary.hpp"
  87 #include "code/codeCache.hpp"
  88 #include "memory/classLoaderMetaspace.hpp"
  89 #include "memory/metaspaceUtils.hpp"
  90 #include "oops/compressedOops.inline.hpp"
  91 #include "prims/jvmtiTagMap.hpp"
  92 #include "runtime/atomic.hpp"
  93 #include "runtime/globals.hpp"
  94 #include "runtime/interfaceSupport.inline.hpp"
  95 #include "runtime/java.hpp"
  96 #include "runtime/orderAccess.hpp"
  97 #include "runtime/safepointMechanism.hpp"
  98 #include "runtime/vmThread.hpp"
  99 #include "services/mallocTracker.hpp"
 100 #include "services/memTracker.hpp"
 101 #include "utilities/events.hpp"
 102 #include "utilities/powerOfTwo.hpp"
 103 
 104 class ShenandoahPretouchHeapTask : public WorkerTask {
 105 private:
 106   ShenandoahRegionIterator _regions;
 107   const size_t _page_size;
 108 public:
 109   ShenandoahPretouchHeapTask(size_t page_size) :
 110     WorkerTask("Shenandoah Pretouch Heap"),
 111     _page_size(page_size) {}
 112 
 113   virtual void work(uint worker_id) {
 114     ShenandoahHeapRegion* r = _regions.next();
 115     while (r != nullptr) {
 116       if (r->is_committed()) {
 117         os::pretouch_memory(r->bottom(), r->end(), _page_size);
 118       }
 119       r = _regions.next();
 120     }
 121   }
 122 };
 123 
 124 class ShenandoahPretouchBitmapTask : public WorkerTask {
 125 private:
 126   ShenandoahRegionIterator _regions;
 127   char* _bitmap_base;
 128   const size_t _bitmap_size;
 129   const size_t _page_size;
 130 public:
 131   ShenandoahPretouchBitmapTask(char* bitmap_base, size_t bitmap_size, size_t page_size) :
 132     WorkerTask("Shenandoah Pretouch Bitmap"),
 133     _bitmap_base(bitmap_base),
 134     _bitmap_size(bitmap_size),
 135     _page_size(page_size) {}
 136 
 137   virtual void work(uint worker_id) {
 138     ShenandoahHeapRegion* r = _regions.next();
 139     while (r != nullptr) {
 140       size_t start = r->index()       * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
 141       size_t end   = (r->index() + 1) * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
 142       assert (end <= _bitmap_size, "end is sane: " SIZE_FORMAT " < " SIZE_FORMAT, end, _bitmap_size);
 143 
 144       if (r->is_committed()) {
 145         os::pretouch_memory(_bitmap_base + start, _bitmap_base + end, _page_size);
 146       }
 147 
 148       r = _regions.next();
 149     }
 150   }
 151 };
 152 
 153 jint ShenandoahHeap::initialize() {
 154   //
 155   // Figure out heap sizing
 156   //
 157 
 158   size_t init_byte_size = InitialHeapSize;
 159   size_t min_byte_size  = MinHeapSize;
 160   size_t max_byte_size  = MaxHeapSize;
 161   size_t heap_alignment = HeapAlignment;
 162 
 163   size_t reg_size_bytes = ShenandoahHeapRegion::region_size_bytes();
 164 
 165   Universe::check_alignment(max_byte_size,  reg_size_bytes, "Shenandoah heap");
 166   Universe::check_alignment(init_byte_size, reg_size_bytes, "Shenandoah heap");
 167 
 168   _num_regions = ShenandoahHeapRegion::region_count();
 169   assert(_num_regions == (max_byte_size / reg_size_bytes),
 170          "Regions should cover entire heap exactly: " SIZE_FORMAT " != " SIZE_FORMAT "/" SIZE_FORMAT,
 171          _num_regions, max_byte_size, reg_size_bytes);
 172 
 173   size_t num_committed_regions = init_byte_size / reg_size_bytes;
 174   num_committed_regions = MIN2(num_committed_regions, _num_regions);
 175   assert(num_committed_regions <= _num_regions, "sanity");
 176   _initial_size = num_committed_regions * reg_size_bytes;
 177 
 178   size_t num_min_regions = min_byte_size / reg_size_bytes;
 179   num_min_regions = MIN2(num_min_regions, _num_regions);
 180   assert(num_min_regions <= _num_regions, "sanity");
 181   _minimum_size = num_min_regions * reg_size_bytes;
 182 
 183   // Default to max heap size.
 184   _soft_max_size = _num_regions * reg_size_bytes;
 185 
 186   _committed = _initial_size;
 187 
 188   // Now we know the number of regions and heap sizes, initialize the heuristics.
 189   initialize_generations();
 190   initialize_heuristics();
 191 
 192   size_t heap_page_size   = UseLargePages ? os::large_page_size() : os::vm_page_size();
 193   size_t bitmap_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
 194   size_t region_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
 195 
 196   //
 197   // Reserve and commit memory for heap
 198   //
 199 
 200   ReservedHeapSpace heap_rs = Universe::reserve_heap(max_byte_size, heap_alignment);
 201   initialize_reserved_region(heap_rs);
 202   _heap_region = MemRegion((HeapWord*)heap_rs.base(), heap_rs.size() / HeapWordSize);
 203   _heap_region_special = heap_rs.special();
 204 
 205   assert((((size_t) base()) & ShenandoahHeapRegion::region_size_bytes_mask()) == 0,
 206          "Misaligned heap: " PTR_FORMAT, p2i(base()));
 207 
 208 #if SHENANDOAH_OPTIMIZED_MARKTASK
 209   // The optimized ShenandoahMarkTask takes some bits away from the full object bits.
 210   // Fail if we ever attempt to address more than we can.
 211   if ((uintptr_t)heap_rs.end() >= ShenandoahMarkTask::max_addressable()) {
 212     FormatBuffer<512> buf("Shenandoah reserved [" PTR_FORMAT ", " PTR_FORMAT") for the heap, \n"
 213                           "but max object address is " PTR_FORMAT ". Try to reduce heap size, or try other \n"
 214                           "VM options that allocate heap at lower addresses (HeapBaseMinAddress, AllocateHeapAt, etc).",
 215                 p2i(heap_rs.base()), p2i(heap_rs.end()), ShenandoahMarkTask::max_addressable());
 216     vm_exit_during_initialization("Fatal Error", buf);
 217   }
 218 #endif
 219 
 220   ReservedSpace sh_rs = heap_rs.first_part(max_byte_size);
 221   if (!_heap_region_special) {
 222     os::commit_memory_or_exit(sh_rs.base(), _initial_size, heap_alignment, false,
 223                               "Cannot commit heap memory");
 224   }
 225 
 226   BarrierSet::set_barrier_set(new ShenandoahBarrierSet(this, _heap_region));
 227 
 228   //
 229   // After reserving the Java heap, create the card table, barriers, and workers, in dependency order
 230   //
 231   if (mode()->is_generational()) {
 232     ShenandoahDirectCardMarkRememberedSet *rs;
 233     ShenandoahCardTable* card_table = ShenandoahBarrierSet::barrier_set()->card_table();
 234     size_t card_count = card_table->cards_required(heap_rs.size() / HeapWordSize);
 235     rs = new ShenandoahDirectCardMarkRememberedSet(ShenandoahBarrierSet::barrier_set()->card_table(), card_count);
 236     _card_scan = new ShenandoahScanRemembered<ShenandoahDirectCardMarkRememberedSet>(rs);
 237   }
 238 
 239   _workers = new ShenandoahWorkerThreads("Shenandoah GC Threads", _max_workers);
 240   if (_workers == nullptr) {
 241     vm_exit_during_initialization("Failed necessary allocation.");
 242   } else {
 243     _workers->initialize_workers();
 244   }
 245 
 246   if (ParallelGCThreads > 1) {
 247     _safepoint_workers = new ShenandoahWorkerThreads("Safepoint Cleanup Thread", ParallelGCThreads);
 248     _safepoint_workers->initialize_workers();
 249   }
 250 
 251   //
 252   // Reserve and commit memory for bitmap(s)
 253   //
 254 
 255   _bitmap_size = ShenandoahMarkBitMap::compute_size(heap_rs.size());
 256   _bitmap_size = align_up(_bitmap_size, bitmap_page_size);
 257 
 258   size_t bitmap_bytes_per_region = reg_size_bytes / ShenandoahMarkBitMap::heap_map_factor();
 259 
 260   guarantee(bitmap_bytes_per_region != 0,
 261             "Bitmap bytes per region should not be zero");
 262   guarantee(is_power_of_2(bitmap_bytes_per_region),
 263             "Bitmap bytes per region should be power of two: " SIZE_FORMAT, bitmap_bytes_per_region);
 264 
 265   if (bitmap_page_size > bitmap_bytes_per_region) {
 266     _bitmap_regions_per_slice = bitmap_page_size / bitmap_bytes_per_region;
 267     _bitmap_bytes_per_slice = bitmap_page_size;
 268   } else {
 269     _bitmap_regions_per_slice = 1;
 270     _bitmap_bytes_per_slice = bitmap_bytes_per_region;
 271   }
 272 
 273   guarantee(_bitmap_regions_per_slice >= 1,
 274             "Should have at least one region per slice: " SIZE_FORMAT,
 275             _bitmap_regions_per_slice);
 276 
 277   guarantee(((_bitmap_bytes_per_slice) % bitmap_page_size) == 0,
 278             "Bitmap slices should be page-granular: bps = " SIZE_FORMAT ", page size = " SIZE_FORMAT,
 279             _bitmap_bytes_per_slice, bitmap_page_size);
 280 
 281   ReservedSpace bitmap(_bitmap_size, bitmap_page_size);
 282   MemTracker::record_virtual_memory_type(bitmap.base(), mtGC);
 283   _bitmap_region = MemRegion((HeapWord*) bitmap.base(), bitmap.size() / HeapWordSize);
 284   _bitmap_region_special = bitmap.special();
 285 
 286   size_t bitmap_init_commit = _bitmap_bytes_per_slice *
 287                               align_up(num_committed_regions, _bitmap_regions_per_slice) / _bitmap_regions_per_slice;
 288   bitmap_init_commit = MIN2(_bitmap_size, bitmap_init_commit);
 289   if (!_bitmap_region_special) {
 290     os::commit_memory_or_exit((char *) _bitmap_region.start(), bitmap_init_commit, bitmap_page_size, false,
 291                               "Cannot commit bitmap memory");
 292   }
 293 
 294   _marking_context = new ShenandoahMarkingContext(_heap_region, _bitmap_region, _num_regions);
 295 
 296   if (ShenandoahVerify) {
 297     ReservedSpace verify_bitmap(_bitmap_size, bitmap_page_size);
 298     if (!verify_bitmap.special()) {
 299       os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), bitmap_page_size, false,
 300                                 "Cannot commit verification bitmap memory");
 301     }
 302     MemTracker::record_virtual_memory_type(verify_bitmap.base(), mtGC);
 303     MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize);
 304     _verification_bit_map.initialize(_heap_region, verify_bitmap_region);
 305     _verifier = new ShenandoahVerifier(this, &_verification_bit_map);
 306   }
 307 
 308   // Reserve aux bitmap for use in object_iterate(). We don't commit it here.
 309   ReservedSpace aux_bitmap(_bitmap_size, bitmap_page_size);
 310   MemTracker::record_virtual_memory_type(aux_bitmap.base(), mtGC);
 311   _aux_bitmap_region = MemRegion((HeapWord*) aux_bitmap.base(), aux_bitmap.size() / HeapWordSize);
 312   _aux_bitmap_region_special = aux_bitmap.special();
 313   _aux_bit_map.initialize(_heap_region, _aux_bitmap_region);
 314 
 315   //
 316   // Create regions and region sets
 317   //
 318   size_t region_align = align_up(sizeof(ShenandoahHeapRegion), SHENANDOAH_CACHE_LINE_SIZE);
 319   size_t region_storage_size = align_up(region_align * _num_regions, region_page_size);
 320   region_storage_size = align_up(region_storage_size, os::vm_allocation_granularity());
 321 
 322   ReservedSpace region_storage(region_storage_size, region_page_size);
 323   MemTracker::record_virtual_memory_type(region_storage.base(), mtGC);
 324   if (!region_storage.special()) {
 325     os::commit_memory_or_exit(region_storage.base(), region_storage_size, region_page_size, false,
 326                               "Cannot commit region memory");
 327   }
 328 
 329   // Try to fit the collection set bitmap at lower addresses. This optimizes code generation for cset checks.
 330   // Go up until a sensible limit (subject to encoding constraints) and try to reserve the space there.
 331   // If not successful, bite a bullet and allocate at whatever address.
 332   {
 333     size_t cset_align = MAX2<size_t>(os::vm_page_size(), os::vm_allocation_granularity());
 334     size_t cset_size = align_up(((size_t) sh_rs.base() + sh_rs.size()) >> ShenandoahHeapRegion::region_size_bytes_shift(), cset_align);
 335 
 336     uintptr_t min = round_up_power_of_2(cset_align);
 337     uintptr_t max = (1u << 30u);
 338 
 339     for (uintptr_t addr = min; addr <= max; addr <<= 1u) {
 340       char* req_addr = (char*)addr;
 341       assert(is_aligned(req_addr, cset_align), "Should be aligned");
 342       ReservedSpace cset_rs(cset_size, cset_align, os::vm_page_size(), req_addr);
 343       if (cset_rs.is_reserved()) {
 344         assert(cset_rs.base() == req_addr, "Allocated where requested: " PTR_FORMAT ", " PTR_FORMAT, p2i(cset_rs.base()), addr);
 345         _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
 346         break;
 347       }
 348     }
 349 
 350     if (_collection_set == nullptr) {
 351       ReservedSpace cset_rs(cset_size, cset_align, os::vm_page_size());
 352       _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
 353     }
 354   }
 355 
 356   _regions = NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, _num_regions, mtGC);
 357   _affiliations = NEW_C_HEAP_ARRAY(uint8_t, _num_regions, mtGC);
 358   _free_set = new ShenandoahFreeSet(this, _num_regions);
 359 
 360   {
 361     ShenandoahHeapLocker locker(lock());
 362 
 363 
 364     for (size_t i = 0; i < _num_regions; i++) {
 365       HeapWord* start = (HeapWord*)sh_rs.base() + ShenandoahHeapRegion::region_size_words() * i;
 366       bool is_committed = i < num_committed_regions;
 367       void* loc = region_storage.base() + i * region_align;
 368 
 369       ShenandoahHeapRegion* r = new (loc) ShenandoahHeapRegion(start, i, is_committed);
 370       assert(is_aligned(r, SHENANDOAH_CACHE_LINE_SIZE), "Sanity");
 371 
 372       _marking_context->initialize_top_at_mark_start(r);
 373       _regions[i] = r;
 374       assert(!collection_set()->is_in(i), "New region should not be in collection set");
 375 
 376       _affiliations[i] = ShenandoahRegionAffiliation::FREE;
 377     }
 378 
 379     // Initialize to complete
 380     _marking_context->mark_complete();
 381 
 382     _free_set->rebuild();
 383   }
 384 
 385   if (AlwaysPreTouch) {
 386     // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads,
 387     // before initialize() below zeroes it with initializing thread. For any given region,
 388     // we touch the region and the corresponding bitmaps from the same thread.
 389     ShenandoahPushWorkerScope scope(workers(), _max_workers, false);
 390 
 391     _pretouch_heap_page_size = heap_page_size;
 392     _pretouch_bitmap_page_size = bitmap_page_size;
 393 
 394 #ifdef LINUX
 395     // UseTransparentHugePages would madvise that backing memory can be coalesced into huge
 396     // pages. But, the kernel needs to know that every small page is used, in order to coalesce
 397     // them into huge one. Therefore, we need to pretouch with smaller pages.
 398     if (UseTransparentHugePages) {
 399       _pretouch_heap_page_size = (size_t)os::vm_page_size();
 400       _pretouch_bitmap_page_size = (size_t)os::vm_page_size();
 401     }
 402 #endif
 403 
 404     // OS memory managers may want to coalesce back-to-back pages. Make their jobs
 405     // simpler by pre-touching continuous spaces (heap and bitmap) separately.
 406 
 407     ShenandoahPretouchBitmapTask bcl(bitmap.base(), _bitmap_size, _pretouch_bitmap_page_size);
 408     _workers->run_task(&bcl);
 409 
 410     ShenandoahPretouchHeapTask hcl(_pretouch_heap_page_size);
 411     _workers->run_task(&hcl);
 412   }
 413 
 414   //
 415   // Initialize the rest of GC subsystems
 416   //
 417 
 418   _liveness_cache = NEW_C_HEAP_ARRAY(ShenandoahLiveData*, _max_workers, mtGC);
 419   for (uint worker = 0; worker < _max_workers; worker++) {
 420     _liveness_cache[worker] = NEW_C_HEAP_ARRAY(ShenandoahLiveData, _num_regions, mtGC);
 421     Copy::fill_to_bytes(_liveness_cache[worker], _num_regions * sizeof(ShenandoahLiveData));
 422   }
 423 
 424   // There should probably be Shenandoah-specific options for these,
 425   // just as there are G1-specific options.
 426   {
 427     ShenandoahSATBMarkQueueSet& satbqs = ShenandoahBarrierSet::satb_mark_queue_set();
 428     satbqs.set_process_completed_buffers_threshold(20); // G1SATBProcessCompletedThreshold
 429     satbqs.set_buffer_enqueue_threshold_percentage(60); // G1SATBBufferEnqueueingThresholdPercent
 430   }
 431 
 432   _monitoring_support = new ShenandoahMonitoringSupport(this);
 433   _phase_timings = new ShenandoahPhaseTimings(max_workers());
 434   ShenandoahCodeRoots::initialize();
 435 
 436   if (ShenandoahPacing) {
 437     _pacer = new ShenandoahPacer(this);
 438     _pacer->setup_for_idle();
 439   } else {
 440     _pacer = nullptr;
 441   }
 442 
 443   _control_thread = new ShenandoahControlThread();
 444   _regulator_thread = new ShenandoahRegulatorThread(_control_thread);
 445 
 446   ShenandoahInitLogger::print();
 447 
 448   return JNI_OK;
 449 }
 450 
 451 size_t ShenandoahHeap::max_size_for(ShenandoahGeneration* generation) const {
 452   switch (generation->generation_mode()) {
 453     case YOUNG:  return _generation_sizer.max_young_size();
 454     case OLD:    return max_capacity() - _generation_sizer.min_young_size();
 455     case GLOBAL: return max_capacity();
 456     default:
 457       ShouldNotReachHere();
 458       return 0;
 459   }
 460 }
 461 
 462 size_t ShenandoahHeap::min_size_for(ShenandoahGeneration* generation) const {
 463   switch (generation->generation_mode()) {
 464     case YOUNG:  return _generation_sizer.min_young_size();
 465     case OLD:    return max_capacity() - _generation_sizer.max_young_size();
 466     case GLOBAL: return min_capacity();
 467     default:
 468       ShouldNotReachHere();
 469       return 0;
 470   }
 471 }
 472 
 473 void ShenandoahHeap::initialize_generations() {
 474   // Max capacity is the maximum _allowed_ capacity. That is, the maximum allowed capacity
 475   // for old would be total heap - minimum capacity of young. This means the sum of the maximum
 476   // allowed for old and young could exceed the total heap size. It remains the case that the
 477   // _actual_ capacity of young + old = total.
 478   _generation_sizer.heap_size_changed(soft_max_capacity());
 479   size_t initial_capacity_young = _generation_sizer.max_young_size();
 480   size_t max_capacity_young = _generation_sizer.max_young_size();
 481   size_t initial_capacity_old = max_capacity() - max_capacity_young;
 482   size_t max_capacity_old = max_capacity() - initial_capacity_young;
 483 
 484   _young_generation = new ShenandoahYoungGeneration(_max_workers, max_capacity_young, initial_capacity_young);
 485   _old_generation = new ShenandoahOldGeneration(_max_workers, max_capacity_old, initial_capacity_old);
 486   _global_generation = new ShenandoahGlobalGeneration(_max_workers, soft_max_capacity(), soft_max_capacity());
 487 }
 488 
 489 void ShenandoahHeap::initialize_heuristics() {
 490   if (ShenandoahGCMode != nullptr) {
 491     if (strcmp(ShenandoahGCMode, "satb") == 0) {
 492       _gc_mode = new ShenandoahSATBMode();
 493     } else if (strcmp(ShenandoahGCMode, "iu") == 0) {
 494       _gc_mode = new ShenandoahIUMode();
 495     } else if (strcmp(ShenandoahGCMode, "passive") == 0) {
 496       _gc_mode = new ShenandoahPassiveMode();
 497     } else if (strcmp(ShenandoahGCMode, "generational") == 0) {
 498       _gc_mode = new ShenandoahGenerationalMode();
 499     } else {
 500       vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option");
 501     }
 502   } else {
 503     vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option (null)");
 504   }
 505   _gc_mode->initialize_flags();
 506   if (_gc_mode->is_diagnostic() && !UnlockDiagnosticVMOptions) {
 507     vm_exit_during_initialization(
 508             err_msg("GC mode \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
 509                     _gc_mode->name()));
 510   }
 511   if (_gc_mode->is_experimental() && !UnlockExperimentalVMOptions) {
 512     vm_exit_during_initialization(
 513             err_msg("GC mode \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
 514                     _gc_mode->name()));
 515   }
 516 
 517   _global_generation->initialize_heuristics(_gc_mode);
 518   if (mode()->is_generational()) {
 519     _young_generation->initialize_heuristics(_gc_mode);
 520     _old_generation->initialize_heuristics(_gc_mode);
 521 
 522     ShenandoahEvacWaste = ShenandoahGenerationalEvacWaste;
 523   }
 524 }
 525 
 526 #ifdef _MSC_VER
 527 #pragma warning( push )
 528 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
 529 #endif
 530 
 531 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
 532   CollectedHeap(),
 533   _gc_generation(nullptr),
 534   _prepare_for_old_mark(false),
 535   _initial_size(0),
 536   _used(0),
 537   _committed(0),
 538   _max_workers(MAX3(ConcGCThreads, ParallelGCThreads, 1U)),
 539   _workers(nullptr),
 540   _safepoint_workers(nullptr),
 541   _heap_region_special(false),
 542   _num_regions(0),
 543   _regions(nullptr),
 544   _affiliations(nullptr),
 545   _update_refs_iterator(this),
 546   _alloc_supplement_reserve(0),
 547   _promoted_reserve(0),
 548   _old_evac_reserve(0),
 549   _old_evac_expended(0),
 550   _young_evac_reserve(0),
 551   _captured_old_usage(0),
 552   _previous_promotion(0),
 553   _cancel_requested_time(0),
 554   _young_generation(nullptr),
 555   _global_generation(nullptr),
 556   _old_generation(nullptr),
 557   _control_thread(nullptr),
 558   _regulator_thread(nullptr),
 559   _shenandoah_policy(policy),
 560   _free_set(nullptr),
 561   _pacer(nullptr),
 562   _verifier(nullptr),
 563   _phase_timings(nullptr),
 564   _evac_tracker(new ShenandoahEvacuationTracker()),
 565   _mmu_tracker(),
 566   _generation_sizer(&_mmu_tracker),
 567   _monitoring_support(nullptr),
 568   _memory_pool(nullptr),
 569   _young_gen_memory_pool(nullptr),
 570   _old_gen_memory_pool(nullptr),
 571   _stw_memory_manager("Shenandoah Pauses", "end of GC pause"),
 572   _cycle_memory_manager("Shenandoah Cycles", "end of GC cycle"),
 573   _gc_timer(new ConcurrentGCTimer()),
 574   _soft_ref_policy(),
 575   _log_min_obj_alignment_in_bytes(LogMinObjAlignmentInBytes),
 576   _marking_context(nullptr),
 577   _bitmap_size(0),
 578   _bitmap_regions_per_slice(0),
 579   _bitmap_bytes_per_slice(0),
 580   _bitmap_region_special(false),
 581   _aux_bitmap_region_special(false),
 582   _liveness_cache(nullptr),
 583   _collection_set(nullptr),
 584   _card_scan(nullptr)
 585 {
 586 }
 587 
 588 #ifdef _MSC_VER
 589 #pragma warning( pop )
 590 #endif
 591 
 592 void ShenandoahHeap::print_on(outputStream* st) const {
 593   st->print_cr("Shenandoah Heap");
 594   st->print_cr(" " SIZE_FORMAT "%s max, " SIZE_FORMAT "%s soft max, " SIZE_FORMAT "%s committed, " SIZE_FORMAT "%s used",
 595                byte_size_in_proper_unit(max_capacity()), proper_unit_for_byte_size(max_capacity()),
 596                byte_size_in_proper_unit(soft_max_capacity()), proper_unit_for_byte_size(soft_max_capacity()),
 597                byte_size_in_proper_unit(committed()),    proper_unit_for_byte_size(committed()),
 598                byte_size_in_proper_unit(used()),         proper_unit_for_byte_size(used()));
 599   st->print_cr(" " SIZE_FORMAT " x " SIZE_FORMAT"%s regions",
 600                num_regions(),
 601                byte_size_in_proper_unit(ShenandoahHeapRegion::region_size_bytes()),
 602                proper_unit_for_byte_size(ShenandoahHeapRegion::region_size_bytes()));
 603 
 604   st->print("Status: ");
 605   if (has_forwarded_objects())                 st->print("has forwarded objects, ");
 606   if (is_concurrent_old_mark_in_progress())    st->print("old marking, ");
 607   if (is_concurrent_young_mark_in_progress())  st->print("young marking, ");
 608   if (is_evacuation_in_progress())             st->print("evacuating, ");
 609   if (is_update_refs_in_progress())            st->print("updating refs, ");
 610   if (is_degenerated_gc_in_progress())         st->print("degenerated gc, ");
 611   if (is_full_gc_in_progress())                st->print("full gc, ");
 612   if (is_full_gc_move_in_progress())           st->print("full gc move, ");
 613   if (is_concurrent_weak_root_in_progress())   st->print("concurrent weak roots, ");
 614   if (is_concurrent_strong_root_in_progress() &&
 615       !is_concurrent_weak_root_in_progress())  st->print("concurrent strong roots, ");
 616 
 617   if (cancelled_gc()) {
 618     st->print("cancelled");
 619   } else {
 620     st->print("not cancelled");
 621   }
 622   st->cr();
 623 
 624   st->print_cr("Reserved region:");
 625   st->print_cr(" - [" PTR_FORMAT ", " PTR_FORMAT ") ",
 626                p2i(reserved_region().start()),
 627                p2i(reserved_region().end()));
 628 
 629   ShenandoahCollectionSet* cset = collection_set();
 630   st->print_cr("Collection set:");
 631   if (cset != nullptr) {
 632     st->print_cr(" - map (vanilla): " PTR_FORMAT, p2i(cset->map_address()));
 633     st->print_cr(" - map (biased):  " PTR_FORMAT, p2i(cset->biased_map_address()));
 634   } else {
 635     st->print_cr(" (null)");
 636   }
 637 
 638   st->cr();
 639   MetaspaceUtils::print_on(st);
 640 
 641   if (Verbose) {
 642     print_heap_regions_on(st);
 643   }
 644 }
 645 
 646 class ShenandoahInitWorkerGCLABClosure : public ThreadClosure {
 647 public:
 648   void do_thread(Thread* thread) {
 649     assert(thread != nullptr, "Sanity");
 650     assert(thread->is_Worker_thread(), "Only worker thread expected");
 651     ShenandoahThreadLocalData::initialize_gclab(thread);
 652   }
 653 };
 654 
 655 void ShenandoahHeap::post_initialize() {
 656   CollectedHeap::post_initialize();
 657   _mmu_tracker.initialize();
 658 
 659   MutexLocker ml(Threads_lock);
 660 
 661   ShenandoahInitWorkerGCLABClosure init_gclabs;
 662   _workers->threads_do(&init_gclabs);
 663 
 664   // gclab can not be initialized early during VM startup, as it can not determinate its max_size.
 665   // Now, we will let WorkerThreads to initialize gclab when new worker is created.
 666   _workers->set_initialize_gclab();
 667   if (_safepoint_workers != nullptr) {
 668     _safepoint_workers->threads_do(&init_gclabs);
 669     _safepoint_workers->set_initialize_gclab();
 670   }
 671 
 672   JFR_ONLY(ShenandoahJFRSupport::register_jfr_type_serializers());
 673 }
 674 
 675 
 676 ShenandoahOldHeuristics* ShenandoahHeap::old_heuristics() {
 677   return (ShenandoahOldHeuristics*) _old_generation->heuristics();
 678 }
 679 
 680 bool ShenandoahHeap::doing_mixed_evacuations() {
 681   return old_heuristics()->unprocessed_old_collection_candidates() > 0;
 682 }
 683 
 684 bool ShenandoahHeap::is_old_bitmap_stable() const {
 685   ShenandoahOldGeneration::State state = _old_generation->state();
 686   return state != ShenandoahOldGeneration::MARKING
 687       && state != ShenandoahOldGeneration::BOOTSTRAPPING;
 688 }
 689 
 690 bool ShenandoahHeap::is_gc_generation_young() const {
 691   return _gc_generation != nullptr && _gc_generation->generation_mode() == YOUNG;
 692 }
 693 
 694 size_t ShenandoahHeap::used() const {
 695   return Atomic::load(&_used);
 696 }
 697 
 698 size_t ShenandoahHeap::committed() const {
 699   return Atomic::load(&_committed);
 700 }
 701 
 702 void ShenandoahHeap::increase_committed(size_t bytes) {
 703   shenandoah_assert_heaplocked_or_safepoint();
 704   _committed += bytes;
 705 }
 706 
 707 void ShenandoahHeap::decrease_committed(size_t bytes) {
 708   shenandoah_assert_heaplocked_or_safepoint();
 709   _committed -= bytes;
 710 }
 711 
 712 void ShenandoahHeap::increase_used(size_t bytes) {
 713   Atomic::add(&_used, bytes, memory_order_relaxed);
 714 }
 715 
 716 void ShenandoahHeap::set_used(size_t bytes) {
 717   Atomic::store(&_used, bytes);
 718 }
 719 
 720 void ShenandoahHeap::decrease_used(size_t bytes) {
 721   assert(used() >= bytes, "never decrease heap size by more than we've left");
 722   Atomic::sub(&_used, bytes, memory_order_relaxed);
 723 }
 724 
 725 void ShenandoahHeap::notify_mutator_alloc_words(size_t words, bool waste) {
 726   size_t bytes = words * HeapWordSize;
 727   if (!waste) {
 728     increase_used(bytes);
 729   }
 730 
 731   if (ShenandoahPacing) {
 732     control_thread()->pacing_notify_alloc(words);
 733     if (waste) {
 734       pacer()->claim_for_alloc(words, true);
 735     }
 736   }
 737 }
 738 
 739 size_t ShenandoahHeap::capacity() const {
 740   return committed();
 741 }
 742 
 743 size_t ShenandoahHeap::max_capacity() const {
 744   return _num_regions * ShenandoahHeapRegion::region_size_bytes();
 745 }
 746 
 747 size_t ShenandoahHeap::soft_max_capacity() const {
 748   size_t v = Atomic::load(&_soft_max_size);
 749   assert(min_capacity() <= v && v <= max_capacity(),
 750          "Should be in bounds: " SIZE_FORMAT " <= " SIZE_FORMAT " <= " SIZE_FORMAT,
 751          min_capacity(), v, max_capacity());
 752   return v;
 753 }
 754 
 755 void ShenandoahHeap::set_soft_max_capacity(size_t v) {
 756   assert(min_capacity() <= v && v <= max_capacity(),
 757          "Should be in bounds: " SIZE_FORMAT " <= " SIZE_FORMAT " <= " SIZE_FORMAT,
 758          min_capacity(), v, max_capacity());
 759   Atomic::store(&_soft_max_size, v);
 760 
 761   if (mode()->is_generational()) {
 762     _generation_sizer.heap_size_changed(_soft_max_size);
 763     size_t soft_max_capacity_young = _generation_sizer.max_young_size();
 764     size_t soft_max_capacity_old = _soft_max_size - soft_max_capacity_young;
 765     _young_generation->set_soft_max_capacity(soft_max_capacity_young);
 766     _old_generation->set_soft_max_capacity(soft_max_capacity_old);
 767   }
 768 }
 769 
 770 size_t ShenandoahHeap::min_capacity() const {
 771   return _minimum_size;
 772 }
 773 
 774 size_t ShenandoahHeap::initial_capacity() const {
 775   return _initial_size;
 776 }
 777 
 778 void ShenandoahHeap::op_uncommit(double shrink_before, size_t shrink_until) {
 779   assert (ShenandoahUncommit, "should be enabled");
 780 
 781   // Application allocates from the beginning of the heap, and GC allocates at
 782   // the end of it. It is more efficient to uncommit from the end, so that applications
 783   // could enjoy the near committed regions. GC allocations are much less frequent,
 784   // and therefore can accept the committing costs.
 785 
 786   size_t count = 0;
 787   for (size_t i = num_regions(); i > 0; i--) { // care about size_t underflow
 788     ShenandoahHeapRegion* r = get_region(i - 1);
 789     if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
 790       ShenandoahHeapLocker locker(lock());
 791       if (r->is_empty_committed()) {
 792         if (committed() < shrink_until + ShenandoahHeapRegion::region_size_bytes()) {
 793           break;
 794         }
 795 
 796         r->make_uncommitted();
 797         count++;
 798       }
 799     }
 800     SpinPause(); // allow allocators to take the lock
 801   }
 802 
 803   if (count > 0) {
 804     control_thread()->notify_heap_changed();
 805     regulator_thread()->notify_heap_changed();
 806   }
 807 }
 808 
 809 void ShenandoahHeap::handle_old_evacuation(HeapWord* obj, size_t words, bool promotion) {
 810   // Only register the copy of the object that won the evacuation race.
 811   card_scan()->register_object_wo_lock(obj);
 812 
 813   // Mark the entire range of the evacuated object as dirty.  At next remembered set scan,
 814   // we will clear dirty bits that do not hold interesting pointers.  It's more efficient to
 815   // do this in batch, in a background GC thread than to try to carefully dirty only cards
 816   // that hold interesting pointers right now.
 817   card_scan()->mark_range_as_dirty(obj, words);
 818 
 819   if (promotion) {
 820     // This evacuation was a promotion, track this as allocation against old gen
 821     old_generation()->increase_allocated(words * HeapWordSize);
 822   }
 823 }
 824 
 825 void ShenandoahHeap::handle_old_evacuation_failure() {
 826   if (_old_gen_oom_evac.try_set()) {
 827     log_info(gc)("Old gen evac failure.");
 828   }
 829 }
 830 
 831 void ShenandoahHeap::handle_promotion_failure() {
 832   old_heuristics()->handle_promotion_failure();
 833 }
 834 
 835 void ShenandoahHeap::report_promotion_failure(Thread* thread, size_t size) {
 836   // We squelch excessive reports to reduce noise in logs.  Squelch enforcement is not "perfect" because
 837   // this same code can be in-lined in multiple contexts, and each context will have its own copy of the static
 838   // last_report_epoch and this_epoch_report_count variables.
 839   const size_t MaxReportsPerEpoch = 4;
 840   static size_t last_report_epoch = 0;
 841   static size_t epoch_report_count = 0;
 842 
 843   size_t promotion_reserve;
 844   size_t promotion_expended;
 845 
 846   size_t gc_id = control_thread()->get_gc_id();
 847 
 848   if ((gc_id != last_report_epoch) || (epoch_report_count++ < MaxReportsPerEpoch)) {
 849     {
 850       // Promotion failures should be very rare.  Invest in providing useful diagnostic info.
 851       ShenandoahHeapLocker locker(lock());
 852       promotion_reserve = get_promoted_reserve();
 853       promotion_expended = get_promoted_expended();
 854     }
 855     PLAB* plab = ShenandoahThreadLocalData::plab(thread);
 856     size_t words_remaining = (plab == nullptr)? 0: plab->words_remaining();
 857     const char* promote_enabled = ShenandoahThreadLocalData::allow_plab_promotions(thread)? "enabled": "disabled";
 858 
 859     log_info(gc, ergo)("Promotion failed, size " SIZE_FORMAT ", has plab? %s, PLAB remaining: " SIZE_FORMAT
 860                        ", plab promotions %s, promotion reserve: " SIZE_FORMAT ", promotion expended: " SIZE_FORMAT,
 861                        size, plab == nullptr? "no": "yes",
 862                        words_remaining, promote_enabled, promotion_reserve, promotion_expended);
 863     if ((gc_id == last_report_epoch) && (epoch_report_count >= MaxReportsPerEpoch)) {
 864       log_info(gc, ergo)("Squelching additional promotion failure reports for current epoch");
 865     } else if (gc_id != last_report_epoch) {
 866       last_report_epoch = gc_id;;
 867       epoch_report_count = 1;
 868     }
 869   }
 870 }
 871 
 872 HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) {
 873   // New object should fit the GCLAB size
 874   size_t min_size = MAX2(size, PLAB::min_size());
 875 
 876   // Figure out size of new GCLAB, looking back at heuristics. Expand aggressively.
 877   size_t new_size = ShenandoahThreadLocalData::gclab_size(thread) * 2;
 878 
 879   // Limit growth of GCLABs to ShenandoahMaxEvacLABRatio * the minimum size.  This enables more equitable distribution of
 880   // available evacuation buidget between the many threads that are coordinating in the evacuation effort.
 881   if (ShenandoahMaxEvacLABRatio > 0) {
 882     log_debug(gc, free)("Allocate new gclab: " SIZE_FORMAT ", " SIZE_FORMAT, new_size, PLAB::min_size() * ShenandoahMaxEvacLABRatio);
 883     new_size = MIN2(new_size, PLAB::min_size() * ShenandoahMaxEvacLABRatio);
 884   }
 885 
 886   new_size = MIN2(new_size, PLAB::max_size());
 887   new_size = MAX2(new_size, PLAB::min_size());
 888 
 889   // Record new heuristic value even if we take any shortcut. This captures
 890   // the case when moderately-sized objects always take a shortcut. At some point,
 891   // heuristics should catch up with them.
 892   ShenandoahThreadLocalData::set_gclab_size(thread, new_size);
 893 
 894   if (new_size < size) {
 895     // New size still does not fit the object. Fall back to shared allocation.
 896     // This avoids retiring perfectly good GCLABs, when we encounter a large object.
 897     log_debug(gc, free)("New gclab size (" SIZE_FORMAT ") is too small for " SIZE_FORMAT, new_size, size);
 898     return nullptr;
 899   }
 900 
 901   // Retire current GCLAB, and allocate a new one.
 902   PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
 903   gclab->retire();
 904 
 905   size_t actual_size = 0;
 906   HeapWord* gclab_buf = allocate_new_gclab(min_size, new_size, &actual_size);
 907   if (gclab_buf == nullptr) {
 908     return nullptr;
 909   }
 910 
 911   assert (size <= actual_size, "allocation should fit");
 912 
 913   if (ZeroTLAB) {
 914     // ..and clear it.
 915     Copy::zero_to_words(gclab_buf, actual_size);
 916   } else {
 917     // ...and zap just allocated object.
 918 #ifdef ASSERT
 919     // Skip mangling the space corresponding to the object header to
 920     // ensure that the returned space is not considered parsable by
 921     // any concurrent GC thread.
 922     size_t hdr_size = oopDesc::header_size();
 923     Copy::fill_to_words(gclab_buf + hdr_size, actual_size - hdr_size, badHeapWordVal);
 924 #endif // ASSERT
 925   }
 926   gclab->set_buf(gclab_buf, actual_size);
 927   return gclab->allocate(size);
 928 }
 929 
 930 // Establish a new PLAB and allocate size HeapWords within it.
 931 HeapWord* ShenandoahHeap::allocate_from_plab_slow(Thread* thread, size_t size, bool is_promotion) {
 932   // New object should fit the PLAB size
 933   size_t min_size = MAX2(size, PLAB::min_size());
 934 
 935   // Figure out size of new PLAB, looking back at heuristics. Expand aggressively.
 936   size_t cur_size = ShenandoahThreadLocalData::plab_size(thread);
 937   if (cur_size == 0) {
 938     cur_size = PLAB::min_size();
 939   }
 940   size_t future_size = cur_size * 2;
 941   // Limit growth of PLABs to ShenandoahMaxEvacLABRatio * the minimum size.  This enables more equitable distribution of
 942   // available evacuation buidget between the many threads that are coordinating in the evacuation effort.
 943   if (ShenandoahMaxEvacLABRatio > 0) {
 944     future_size = MIN2(future_size, PLAB::min_size() * ShenandoahMaxEvacLABRatio);
 945   }
 946   future_size = MIN2(future_size, PLAB::max_size());
 947   future_size = MAX2(future_size, PLAB::min_size());
 948 
 949   size_t unalignment = future_size % CardTable::card_size_in_words();
 950   if (unalignment != 0) {
 951     future_size = future_size - unalignment + CardTable::card_size_in_words();
 952   }
 953 
 954   // Record new heuristic value even if we take any shortcut. This captures
 955   // the case when moderately-sized objects always take a shortcut. At some point,
 956   // heuristics should catch up with them.  Note that the requested cur_size may
 957   // not be honored, but we remember that this is the preferred size.
 958   ShenandoahThreadLocalData::set_plab_size(thread, future_size);
 959   if (cur_size < size) {
 960     // The PLAB to be allocated is still not large enough to hold the object. Fall back to shared allocation.
 961     // This avoids retiring perfectly good PLABs in order to represent a single large object allocation.
 962     return nullptr;
 963   }
 964 
 965   // Retire current PLAB, and allocate a new one.
 966   PLAB* plab = ShenandoahThreadLocalData::plab(thread);
 967   if (plab->words_remaining() < PLAB::min_size()) {
 968     // Retire current PLAB, and allocate a new one.
 969     // CAUTION: retire_plab may register the remnant filler object with the remembered set scanner without a lock.  This
 970     // is safe iff it is assured that each PLAB is a whole-number multiple of card-mark memory size and each PLAB is
 971     // aligned with the start of a card's memory range.
 972 
 973     retire_plab(plab, thread);
 974 
 975     size_t actual_size = 0;
 976     // allocate_new_plab resets plab_evacuated and plab_promoted and disables promotions if old-gen available is
 977     // less than the remaining evacuation need.  It also adjusts plab_preallocated and expend_promoted if appropriate.
 978     HeapWord* plab_buf = allocate_new_plab(min_size, cur_size, &actual_size);
 979     if (plab_buf == nullptr) {
 980       return nullptr;
 981     } else {
 982       ShenandoahThreadLocalData::enable_plab_retries(thread);
 983     }
 984     assert (size <= actual_size, "allocation should fit");
 985     if (ZeroTLAB) {
 986       // ..and clear it.
 987       Copy::zero_to_words(plab_buf, actual_size);
 988     } else {
 989       // ...and zap just allocated object.
 990 #ifdef ASSERT
 991       // Skip mangling the space corresponding to the object header to
 992       // ensure that the returned space is not considered parsable by
 993       // any concurrent GC thread.
 994       size_t hdr_size = oopDesc::header_size();
 995       Copy::fill_to_words(plab_buf + hdr_size, actual_size - hdr_size, badHeapWordVal);
 996 #endif // ASSERT
 997     }
 998     plab->set_buf(plab_buf, actual_size);
 999 
1000     if (is_promotion && !ShenandoahThreadLocalData::allow_plab_promotions(thread)) {
1001       return nullptr;
1002     }
1003     return plab->allocate(size);
1004   } else {
1005     // If there's still at least min_size() words available within the current plab, don't retire it.  Let's gnaw
1006     // away on this plab as long as we can.  Meanwhile, return nullptr to force this particular allocation request
1007     // to be satisfied with a shared allocation.  By packing more promotions into the previously allocated PLAB, we
1008     // reduce the likelihood of evacuation failures, and we we reduce the need for downsizing our PLABs.
1009     return nullptr;
1010   }
1011 }
1012 
1013 // TODO: It is probably most efficient to register all objects (both promotions and evacuations) that were allocated within
1014 // this plab at the time we retire the plab.  A tight registration loop will run within both code and data caches.  This change
1015 // would allow smaller and faster in-line implementation of alloc_from_plab().  Since plabs are aligned on card-table boundaries,
1016 // this object registration loop can be performed without acquiring a lock.
1017 void ShenandoahHeap::retire_plab(PLAB* plab, Thread* thread) {
1018   // We don't enforce limits on plab_evacuated.  We let it consume all available old-gen memory in order to reduce
1019   // probability of an evacuation failure.  We do enforce limits on promotion, to make sure that excessive promotion
1020   // does not result in an old-gen evacuation failure.  Note that a failed promotion is relatively harmless.  Any
1021   // object that fails to promote in the current cycle will be eligible for promotion in a subsequent cycle.
1022 
1023   // When the plab was instantiated, its entirety was treated as if the entire buffer was going to be dedicated to
1024   // promotions.  Now that we are retiring the buffer, we adjust for the reality that the plab is not entirely promotions.
1025   //  1. Some of the plab may have been dedicated to evacuations.
1026   //  2. Some of the plab may have been abandoned due to waste (at the end of the plab).
1027   size_t not_promoted =
1028     ShenandoahThreadLocalData::get_plab_preallocated_promoted(thread) - ShenandoahThreadLocalData::get_plab_promoted(thread);
1029   ShenandoahThreadLocalData::reset_plab_promoted(thread);
1030   ShenandoahThreadLocalData::reset_plab_evacuated(thread);
1031   ShenandoahThreadLocalData::set_plab_preallocated_promoted(thread, 0);
1032   if (not_promoted > 0) {
1033     unexpend_promoted(not_promoted);
1034   }
1035   size_t waste = plab->waste();
1036   HeapWord* top = plab->top();
1037   plab->retire();
1038   if (top != nullptr && plab->waste() > waste && is_in_old(top)) {
1039     // If retiring the plab created a filler object, then we
1040     // need to register it with our card scanner so it can
1041     // safely walk the region backing the plab.
1042     log_debug(gc)("retire_plab() is registering remnant of size " SIZE_FORMAT " at " PTR_FORMAT,
1043                   plab->waste() - waste, p2i(top));
1044     card_scan()->register_object_wo_lock(top);
1045   }
1046 }
1047 
1048 void ShenandoahHeap::retire_plab(PLAB* plab) {
1049   Thread* thread = Thread::current();
1050   retire_plab(plab, thread);
1051 }
1052 
1053 void ShenandoahHeap::cancel_old_gc() {
1054   shenandoah_assert_safepoint();
1055   assert(_old_generation != nullptr, "Should only have mixed collections in generation mode.");
1056   log_info(gc)("Terminating old gc cycle.");
1057 
1058   // Stop marking
1059   old_generation()->cancel_marking();
1060   // Stop coalescing undead objects
1061   set_prepare_for_old_mark_in_progress(false);
1062   // Stop tracking old regions
1063   old_heuristics()->abandon_collection_candidates();
1064   // Remove old generation access to young generation mark queues
1065   young_generation()->set_old_gen_task_queues(nullptr);
1066   // Transition to IDLE now.
1067   _old_generation->transition_to(ShenandoahOldGeneration::IDLE);
1068 }
1069 
1070 bool ShenandoahHeap::is_old_gc_active() {
1071   return is_concurrent_old_mark_in_progress()
1072          || is_prepare_for_old_mark_in_progress()
1073          || old_heuristics()->unprocessed_old_collection_candidates() > 0
1074          || young_generation()->old_gen_task_queues() != nullptr;
1075 }
1076 
1077 void ShenandoahHeap::coalesce_and_fill_old_regions() {
1078   class ShenandoahGlobalCoalesceAndFill : public ShenandoahHeapRegionClosure {
1079    public:
1080     virtual void heap_region_do(ShenandoahHeapRegion* region) override {
1081       // old region is not in the collection set and was not immediately trashed
1082       if (region->is_old() && region->is_active() && !region->is_humongous()) {
1083         // Reset the coalesce and fill boundary because this is a global collect
1084         // and cannot be preempted by young collects. We want to be sure the entire
1085         // region is coalesced here and does not resume from a previously interrupted
1086         // or completed coalescing.
1087         region->begin_preemptible_coalesce_and_fill();
1088         region->oop_fill_and_coalesce();
1089       }
1090     }
1091 
1092     virtual bool is_thread_safe() override {
1093       return true;
1094     }
1095   };
1096   ShenandoahGlobalCoalesceAndFill coalesce;
1097   parallel_heap_region_iterate(&coalesce);
1098 }
1099 
1100 bool ShenandoahHeap::adjust_generation_sizes() {
1101   if (mode()->is_generational()) {
1102     return _generation_sizer.adjust_generation_sizes();
1103   }
1104   return false;
1105 }
1106 
1107 // Called from stubs in JIT code or interpreter
1108 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t min_size,
1109                                             size_t requested_size,
1110                                             size_t* actual_size) {
1111   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_tlab(min_size, requested_size);
1112   HeapWord* res = allocate_memory(req, false);
1113   if (res != nullptr) {
1114     *actual_size = req.actual_size();
1115   } else {
1116     *actual_size = 0;
1117   }
1118   return res;
1119 }
1120 
1121 HeapWord* ShenandoahHeap::allocate_new_gclab(size_t min_size,
1122                                              size_t word_size,
1123                                              size_t* actual_size) {
1124   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_gclab(min_size, word_size);
1125   HeapWord* res = allocate_memory(req, false);
1126   if (res != nullptr) {
1127     *actual_size = req.actual_size();
1128   } else {
1129     *actual_size = 0;
1130   }
1131   return res;
1132 }
1133 
1134 HeapWord* ShenandoahHeap::allocate_new_plab(size_t min_size,
1135                                             size_t word_size,
1136                                             size_t* actual_size) {
1137   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_plab(min_size, word_size);
1138   // Note that allocate_memory() sets a thread-local flag to prohibit further promotions by this thread
1139   // if we are at risk of exceeding the old-gen evacuation budget.
1140   HeapWord* res = allocate_memory(req, false);
1141   if (res != nullptr) {
1142     *actual_size = req.actual_size();
1143   } else {
1144     *actual_size = 0;
1145   }
1146   return res;
1147 }
1148 
1149 // is_promotion is true iff this allocation is known for sure to hold the result of young-gen evacuation
1150 // to old-gen.  plab allocates are not known as such, since they may hold old-gen evacuations.
1151 HeapWord* ShenandoahHeap::allocate_memory(ShenandoahAllocRequest& req, bool is_promotion) {
1152   intptr_t pacer_epoch = 0;
1153   bool in_new_region = false;
1154   HeapWord* result = nullptr;
1155 
1156   if (req.is_mutator_alloc()) {
1157     if (ShenandoahPacing) {
1158       pacer()->pace_for_alloc(req.size());
1159       pacer_epoch = pacer()->epoch();
1160     }
1161 
1162     if (!ShenandoahAllocFailureALot || !should_inject_alloc_failure()) {
1163       result = allocate_memory_under_lock(req, in_new_region, is_promotion);
1164     }
1165 
1166     // Allocation failed, block until control thread reacted, then retry allocation.
1167     //
1168     // It might happen that one of the threads requesting allocation would unblock
1169     // way later after GC happened, only to fail the second allocation, because
1170     // other threads have already depleted the free storage. In this case, a better
1171     // strategy is to try again, as long as GC makes progress.
1172     //
1173     // Then, we need to make sure the allocation was retried after at least one
1174     // Full GC, which means we want to try more than ShenandoahFullGCThreshold times.
1175     size_t tries = 0;
1176     while (result == nullptr && _progress_last_gc.is_set()) {
1177       tries++;
1178       control_thread()->handle_alloc_failure(req);
1179       result = allocate_memory_under_lock(req, in_new_region, is_promotion);
1180     }
1181     while (result == nullptr && tries <= ShenandoahFullGCThreshold) {
1182       tries++;
1183       control_thread()->handle_alloc_failure(req);
1184       result = allocate_memory_under_lock(req, in_new_region, is_promotion);
1185     }
1186   } else {
1187     assert(req.is_gc_alloc(), "Can only accept GC allocs here");
1188     result = allocate_memory_under_lock(req, in_new_region, is_promotion);
1189     // Do not call handle_alloc_failure() here, because we cannot block.
1190     // The allocation failure would be handled by the LRB slowpath with handle_alloc_failure_evac().
1191   }
1192 
1193   if (in_new_region) {
1194     control_thread()->notify_heap_changed();
1195     regulator_thread()->notify_heap_changed();
1196   }
1197 
1198   if (result != nullptr) {
1199     ShenandoahGeneration* alloc_generation = generation_for(req.affiliation());
1200     size_t requested = req.size();
1201     size_t actual = req.actual_size();
1202     size_t actual_bytes = actual * HeapWordSize;
1203 
1204     assert (req.is_lab_alloc() || (requested == actual),
1205             "Only LAB allocations are elastic: %s, requested = " SIZE_FORMAT ", actual = " SIZE_FORMAT,
1206             ShenandoahAllocRequest::alloc_type_to_string(req.type()), requested, actual);
1207 
1208     if (req.is_mutator_alloc()) {
1209       notify_mutator_alloc_words(actual, false);
1210       alloc_generation->increase_allocated(actual_bytes);
1211 
1212       // If we requested more than we were granted, give the rest back to pacer.
1213       // This only matters if we are in the same pacing epoch: do not try to unpace
1214       // over the budget for the other phase.
1215       if (ShenandoahPacing && (pacer_epoch > 0) && (requested > actual)) {
1216         pacer()->unpace_for_alloc(pacer_epoch, requested - actual);
1217       }
1218     } else {
1219       increase_used(actual_bytes);
1220     }
1221   }
1222 
1223   return result;
1224 }
1225 
1226 HeapWord* ShenandoahHeap::allocate_memory_under_lock(ShenandoahAllocRequest& req, bool& in_new_region, bool is_promotion) {
1227   bool try_smaller_lab_size = false;
1228   size_t smaller_lab_size;
1229   {
1230     // promotion_eligible pertains only to PLAB allocations, denoting that the PLAB is allowed to allocate for promotions.
1231     bool promotion_eligible = false;
1232     bool allow_allocation = true;
1233     bool plab_alloc = false;
1234     size_t requested_bytes = req.size() * HeapWordSize;
1235     HeapWord* result = nullptr;
1236     ShenandoahHeapLocker locker(lock());
1237     Thread* thread = Thread::current();
1238 
1239     if (mode()->is_generational()) {
1240       if (req.affiliation() == YOUNG_GENERATION) {
1241         if (req.is_mutator_alloc()) {
1242           size_t young_available = young_generation()->adjusted_available();
1243           if (requested_bytes > young_available) {
1244             // We know this is not a GCLAB.  This must be a TLAB or a shared allocation.
1245             if (req.is_lab_alloc() && (young_available >= req.min_size())) {
1246               try_smaller_lab_size = true;
1247               smaller_lab_size = young_available / HeapWordSize;
1248             } else {
1249               // Can't allocate because even min_size() is larger than remaining young_available
1250               log_info(gc, ergo)("Unable to shrink %s alloc request of minimum size: " SIZE_FORMAT
1251                                  ", young available: " SIZE_FORMAT,
1252                                  req.is_lab_alloc()? "TLAB": "shared",
1253                                  HeapWordSize * (req.is_lab_alloc()? req.min_size(): req.size()), young_available);
1254               return nullptr;
1255             }
1256           }
1257         }
1258       } else {                    // reg.affiliation() == OLD_GENERATION
1259         assert(req.type() != ShenandoahAllocRequest::_alloc_gclab, "GCLAB pertains only to young-gen memory");
1260         if (req.type() ==  ShenandoahAllocRequest::_alloc_plab) {
1261           plab_alloc = true;
1262           size_t promotion_avail = get_promoted_reserve();
1263           size_t promotion_expended = get_promoted_expended();
1264           if (promotion_expended + requested_bytes > promotion_avail) {
1265             promotion_avail = 0;
1266             if (get_old_evac_reserve() == 0) {
1267               // There are no old-gen evacuations in this pass.  There's no value in creating a plab that cannot
1268               // be used for promotions.
1269               allow_allocation = false;
1270             }
1271           } else {
1272             promotion_avail = promotion_avail - (promotion_expended + requested_bytes);
1273             promotion_eligible = true;
1274           }
1275         } else if (is_promotion) {
1276           // This is a shared alloc for promotion
1277           size_t promotion_avail = get_promoted_reserve();
1278           size_t promotion_expended = get_promoted_expended();
1279           if (promotion_expended + requested_bytes > promotion_avail) {
1280             promotion_avail = 0;
1281           } else {
1282             promotion_avail = promotion_avail - (promotion_expended + requested_bytes);
1283           }
1284           if (promotion_avail == 0) {
1285             // We need to reserve the remaining memory for evacuation.  Reject this allocation.  The object will be
1286             // evacuated to young-gen memory and promoted during a future GC pass.
1287             return nullptr;
1288           }
1289           // Else, we'll allow the allocation to proceed.  (Since we hold heap lock, the tested condition remains true.)
1290         } else {
1291           // This is a shared allocation for evacuation.  Memory has already been reserved for this purpose.
1292         }
1293       }
1294     } // This ends the is_generational() block
1295 
1296     if (!try_smaller_lab_size) {
1297       result = (allow_allocation)? _free_set->allocate(req, in_new_region): nullptr;
1298       if (result != nullptr) {
1299         if (req.affiliation() == ShenandoahRegionAffiliation::OLD_GENERATION) {
1300           ShenandoahThreadLocalData::reset_plab_promoted(thread);
1301           if (req.is_gc_alloc()) {
1302             if (req.type() ==  ShenandoahAllocRequest::_alloc_plab) {
1303               if (promotion_eligible) {
1304                 size_t actual_size = req.actual_size() * HeapWordSize;
1305                 // Assume the entirety of this PLAB will be used for promotion.  This prevents promotion from overreach.
1306                 // When we retire this plab, we'll unexpend what we don't really use.
1307                 ShenandoahThreadLocalData::enable_plab_promotions(thread);
1308                 expend_promoted(actual_size);
1309                 assert(get_promoted_expended() <= get_promoted_reserve(), "Do not expend more promotion than budgeted");
1310                 ShenandoahThreadLocalData::set_plab_preallocated_promoted(thread, actual_size);
1311               } else {
1312                 // Disable promotions in this thread because entirety of this PLAB must be available to hold old-gen evacuations.
1313                 ShenandoahThreadLocalData::disable_plab_promotions(thread);
1314                 ShenandoahThreadLocalData::set_plab_preallocated_promoted(thread, 0);
1315               }
1316             } else if (is_promotion) {
1317               // Shared promotion.  Assume size is requested_bytes.
1318               expend_promoted(requested_bytes);
1319               assert(get_promoted_expended() <= get_promoted_reserve(), "Do not expend more promotion than budgeted");
1320             }
1321           }
1322 
1323           // Register the newly allocated object while we're holding the global lock since there's no synchronization
1324           // built in to the implementation of register_object().  There are potential races when multiple independent
1325           // threads are allocating objects, some of which might span the same card region.  For example, consider
1326           // a card table's memory region within which three objects are being allocated by three different threads:
1327           //
1328           // objects being "concurrently" allocated:
1329           //    [-----a------][-----b-----][--------------c------------------]
1330           //            [---- card table memory range --------------]
1331           //
1332           // Before any objects are allocated, this card's memory range holds no objects.  Note that allocation of object a
1333           //   wants to set the has-object, first-start, and last-start attributes of the preceding card region.
1334           //   allocation of object b wants to set the has-object, first-start, and last-start attributes of this card region.
1335           //   allocation of object c also wants to set the has-object, first-start, and last-start attributes of this card region.
1336           //
1337           // The thread allocating b and the thread allocating c can "race" in various ways, resulting in confusion, such as
1338           // last-start representing object b while first-start represents object c.  This is why we need to require all
1339           // register_object() invocations to be "mutually exclusive" with respect to each card's memory range.
1340           ShenandoahHeap::heap()->card_scan()->register_object(result);
1341         }
1342       } else {
1343         // The allocation failed.  If this was a plab allocation, We've already retired it and no longer have a plab.
1344         if ((req.affiliation() == ShenandoahRegionAffiliation::OLD_GENERATION) && req.is_gc_alloc() &&
1345             (req.type() == ShenandoahAllocRequest::_alloc_plab)) {
1346           // We don't need to disable PLAB promotions because there is no PLAB.  We leave promotions enabled because
1347           // this allows the surrounding infrastructure to retry alloc_plab_slow() with a smaller PLAB size.
1348           ShenandoahThreadLocalData::set_plab_preallocated_promoted(thread, 0);
1349         }
1350       }
1351       return result;
1352     }
1353     // else, try_smaller_lab_size is true so we fall through and recurse with a smaller lab size
1354   } // This closes the block that holds the heap lock.  This releases the lock.
1355 
1356   // We arrive here if the tlab allocation request can be resized to fit within young_available
1357   assert((req.affiliation() == YOUNG_GENERATION) && req.is_lab_alloc() && req.is_mutator_alloc() &&
1358          (smaller_lab_size < req.size()), "Only shrink allocation request size for TLAB allocations");
1359 
1360   // By convention, ShenandoahAllocationRequest is primarily read-only.  The only mutable instance data is represented by
1361   // actual_size(), which is overwritten with the size of the allocaion when the allocation request is satisfied.  We use a
1362   // recursive call here rather than introducing new methods to mutate the existing ShenandoahAllocationRequest argument.
1363   // Mutation of the existing object might result in astonishing results if calling contexts assume the content of immutable
1364   // fields remain constant.  The original TLAB allocation request was for memory that exceeded the current capacity.  We'll
1365   // attempt to allocate a smaller TLAB.  If this is successful, we'll update actual_size() of our incoming
1366   // ShenandoahAllocRequest.  If the recursive request fails, we'll simply return nullptr.
1367 
1368   // Note that we've relinquished the HeapLock and some other thread may perform additional allocation before our recursive
1369   // call reacquires the lock.  If that happens, we will need another recursive call to further reduce the size of our request
1370   // for each time another thread allocates young memory during the brief intervals that the heap lock is available to
1371   // interfering threads.  We expect this interference to be rare.  The recursion bottoms out when young_available is
1372   // smaller than req.min_size().  The inner-nested call to allocate_memory_under_lock() uses the same min_size() value
1373   // as this call, but it uses a preferred size() that is smaller than our preferred size, and is no larger than what we most
1374   // recently saw as the memory currently available within the young generation.
1375 
1376   // TODO: At the expense of code clarity, we could rewrite this recursive solution to use iteration.  We need at most one
1377   // extra instance of the ShenandoahAllocRequest, which we can re-initialize multiple times inside a loop, with one iteration
1378   // of the loop required for each time the existing solution would recurse.  An iterative solution would be more efficient
1379   // in CPU time and stack memory utilization.  The expectation is that it is very rare that we would recurse more than once
1380   // so making this change is not currently seen as a high priority.
1381 
1382   ShenandoahAllocRequest smaller_req = ShenandoahAllocRequest::for_tlab(req.min_size(), smaller_lab_size);
1383 
1384   // Note that shrinking the preferred size gets us past the gatekeeper that checks whether there's available memory to
1385   // satisfy the allocation request.  The reality is the actual TLAB size is likely to be even smaller, because it will
1386   // depend on how much memory is available within mutator regions that are not yet fully used.
1387   HeapWord* result = allocate_memory_under_lock(smaller_req, in_new_region, is_promotion);
1388   if (result != nullptr) {
1389     req.set_actual_size(smaller_req.actual_size());
1390   }
1391   return result;
1392 }
1393 
1394 HeapWord* ShenandoahHeap::mem_allocate(size_t size,
1395                                         bool*  gc_overhead_limit_was_exceeded) {
1396   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared(size);
1397   return allocate_memory(req, false);
1398 }
1399 
1400 MetaWord* ShenandoahHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
1401                                                              size_t size,
1402                                                              Metaspace::MetadataType mdtype) {
1403   MetaWord* result;
1404 
1405   // Inform metaspace OOM to GC heuristics if class unloading is possible.
1406   ShenandoahHeuristics* h = global_generation()->heuristics();
1407   if (h->can_unload_classes()) {
1408     h->record_metaspace_oom();
1409   }
1410 
1411   // Expand and retry allocation
1412   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
1413   if (result != nullptr) {
1414     return result;
1415   }
1416 
1417   // Start full GC
1418   collect(GCCause::_metadata_GC_clear_soft_refs);
1419 
1420   // Retry allocation
1421   result = loader_data->metaspace_non_null()->allocate(size, mdtype);
1422   if (result != nullptr) {
1423     return result;
1424   }
1425 
1426   // Expand and retry allocation
1427   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
1428   if (result != nullptr) {
1429     return result;
1430   }
1431 
1432   // Out of memory
1433   return nullptr;
1434 }
1435 
1436 class ShenandoahConcurrentEvacuateRegionObjectClosure : public ObjectClosure {
1437 private:
1438   ShenandoahHeap* const _heap;
1439   Thread* const _thread;
1440 public:
1441   ShenandoahConcurrentEvacuateRegionObjectClosure(ShenandoahHeap* heap) :
1442     _heap(heap), _thread(Thread::current()) {}
1443 
1444   void do_object(oop p) {
1445     shenandoah_assert_marked(nullptr, p);
1446     if (!p->is_forwarded()) {
1447       _heap->evacuate_object(p, _thread);
1448     }
1449   }
1450 };
1451 
1452 class ShenandoahEvacuationTask : public WorkerTask {
1453 private:
1454   ShenandoahHeap* const _sh;
1455   ShenandoahCollectionSet* const _cs;
1456   bool _concurrent;
1457 public:
1458   ShenandoahEvacuationTask(ShenandoahHeap* sh,
1459                            ShenandoahCollectionSet* cs,
1460                            bool concurrent) :
1461     WorkerTask("Shenandoah Evacuation"),
1462     _sh(sh),
1463     _cs(cs),
1464     _concurrent(concurrent)
1465   {}
1466 
1467   void work(uint worker_id) {
1468     if (_concurrent) {
1469       ShenandoahConcurrentWorkerSession worker_session(worker_id);
1470       ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
1471       ShenandoahEvacOOMScope oom_evac_scope;
1472       do_work();
1473     } else {
1474       ShenandoahParallelWorkerSession worker_session(worker_id);
1475       ShenandoahEvacOOMScope oom_evac_scope;
1476       do_work();
1477     }
1478   }
1479 
1480 private:
1481   void do_work() {
1482     ShenandoahConcurrentEvacuateRegionObjectClosure cl(_sh);
1483     ShenandoahHeapRegion* r;
1484     while ((r =_cs->claim_next()) != nullptr) {
1485       assert(r->has_live(), "Region " SIZE_FORMAT " should have been reclaimed early", r->index());
1486 
1487       _sh->marked_object_iterate(r, &cl);
1488 
1489       if (ShenandoahPacing) {
1490         _sh->pacer()->report_evac(r->used() >> LogHeapWordSize);
1491       }
1492       if (_sh->check_cancelled_gc_and_yield(_concurrent)) {
1493         break;
1494       }
1495     }
1496   }
1497 };
1498 
1499 // Unlike ShenandoahEvacuationTask, this iterates over all regions rather than just the collection set.
1500 // This is needed in order to promote humongous start regions if age() >= tenure threshold.
1501 class ShenandoahGenerationalEvacuationTask : public WorkerTask {
1502 private:
1503   ShenandoahHeap* const _sh;
1504   ShenandoahRegionIterator *_regions;
1505   bool _concurrent;
1506 public:
1507   ShenandoahGenerationalEvacuationTask(ShenandoahHeap* sh,
1508                                        ShenandoahRegionIterator* iterator,
1509                                        bool concurrent) :
1510     WorkerTask("Shenandoah Evacuation"),
1511     _sh(sh),
1512     _regions(iterator),
1513     _concurrent(concurrent)
1514   {}
1515 
1516   void work(uint worker_id) {
1517     if (_concurrent) {
1518       ShenandoahConcurrentWorkerSession worker_session(worker_id);
1519       ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
1520       ShenandoahEvacOOMScope oom_evac_scope;
1521       do_work();
1522     } else {
1523       ShenandoahParallelWorkerSession worker_session(worker_id);
1524       ShenandoahEvacOOMScope oom_evac_scope;
1525       do_work();
1526     }
1527   }
1528 
1529 private:
1530   void do_work() {
1531     ShenandoahConcurrentEvacuateRegionObjectClosure cl(_sh);
1532     ShenandoahHeapRegion* r;
1533     while ((r = _regions->next()) != nullptr) {
1534       log_debug(gc)("GenerationalEvacuationTask do_work(), looking at %s region " SIZE_FORMAT ", (age: %d) [%s, %s]",
1535                     r->is_old()? "old": r->is_young()? "young": "free", r->index(), r->age(),
1536                     r->is_active()? "active": "inactive",
1537                     r->is_humongous()? (r->is_humongous_start()? "humongous_start": "humongous_continuation"): "regular");
1538       if (r->is_cset()) {
1539         assert(r->has_live(), "Region " SIZE_FORMAT " should have been reclaimed early", r->index());
1540         _sh->marked_object_iterate(r, &cl);
1541         if (ShenandoahPacing) {
1542           _sh->pacer()->report_evac(r->used() >> LogHeapWordSize);
1543         }
1544       } else if (r->is_young() && r->is_active() && r->is_humongous_start() && (r->age() > InitialTenuringThreshold)) {
1545         // We promote humongous_start regions along with their affiliated continuations during evacuation rather than
1546         // doing this work during a safepoint.  We cannot put humongous regions into the collection set because that
1547         // triggers the load-reference barrier (LRB) to copy on reference fetch.
1548         if (r->promote_humongous() == 0) {
1549           // We chose not to promote because old-gen is out of memory.  Report and handle the promotion failure because
1550           // this suggests need for expanding old-gen and/or performing collection of old-gen.
1551           ShenandoahHeap* heap = ShenandoahHeap::heap();
1552           oop obj = cast_to_oop(r->bottom());
1553           size_t size = obj->size();
1554           Thread* thread = Thread::current();
1555           heap->report_promotion_failure(thread, size);
1556           heap->handle_promotion_failure();
1557         }
1558       }
1559       // else, region is free, or OLD, or not in collection set, or humongous_continuation,
1560       // or is young humongous_start that is too young to be promoted
1561 
1562       if (_sh->check_cancelled_gc_and_yield(_concurrent)) {
1563         break;
1564       }
1565     }
1566   }
1567 };
1568 
1569 void ShenandoahHeap::evacuate_collection_set(bool concurrent) {
1570   if (ShenandoahHeap::heap()->mode()->is_generational()) {
1571     ShenandoahRegionIterator regions;
1572     ShenandoahGenerationalEvacuationTask task(this, &regions, concurrent);
1573     workers()->run_task(&task);
1574   } else {
1575     ShenandoahEvacuationTask task(this, _collection_set, concurrent);
1576     workers()->run_task(&task);
1577   }
1578 }
1579 
1580 void ShenandoahHeap::trash_cset_regions() {
1581   ShenandoahHeapLocker locker(lock());
1582 
1583   ShenandoahCollectionSet* set = collection_set();
1584   ShenandoahHeapRegion* r;
1585   set->clear_current_index();
1586   while ((r = set->next()) != nullptr) {
1587     r->make_trash();
1588   }
1589   collection_set()->clear();
1590 }
1591 
1592 void ShenandoahHeap::print_heap_regions_on(outputStream* st) const {
1593   st->print_cr("Heap Regions:");
1594   st->print_cr("EU=empty-uncommitted, EC=empty-committed, R=regular, H=humongous start, HC=humongous continuation, CS=collection set, T=trash, P=pinned");
1595   st->print_cr("BTE=bottom/top/end, U=used, T=TLAB allocs, G=GCLAB allocs, S=shared allocs, L=live data");
1596   st->print_cr("R=root, CP=critical pins, TAMS=top-at-mark-start, UWM=update watermark");
1597   st->print_cr("SN=alloc sequence number");
1598 
1599   for (size_t i = 0; i < num_regions(); i++) {
1600     get_region(i)->print_on(st);
1601   }
1602 }
1603 
1604 size_t ShenandoahHeap::trash_humongous_region_at(ShenandoahHeapRegion* start) {
1605   assert(start->is_humongous_start(), "reclaim regions starting with the first one");
1606 
1607   oop humongous_obj = cast_to_oop(start->bottom());
1608   size_t size = humongous_obj->size();
1609   size_t required_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize);
1610   size_t index = start->index() + required_regions - 1;
1611 
1612   assert(!start->has_live(), "liveness must be zero");
1613 
1614   for(size_t i = 0; i < required_regions; i++) {
1615     // Reclaim from tail. Otherwise, assertion fails when printing region to trace log,
1616     // as it expects that every region belongs to a humongous region starting with a humongous start region.
1617     ShenandoahHeapRegion* region = get_region(index --);
1618 
1619     assert(region->is_humongous(), "expect correct humongous start or continuation");
1620     assert(!region->is_cset(), "Humongous region should not be in collection set");
1621 
1622     region->make_trash_immediate();
1623   }
1624   return required_regions;
1625 }
1626 
1627 class ShenandoahCheckCleanGCLABClosure : public ThreadClosure {
1628 public:
1629   ShenandoahCheckCleanGCLABClosure() {}
1630   void do_thread(Thread* thread) {
1631     PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1632     assert(gclab != nullptr, "GCLAB should be initialized for %s", thread->name());
1633     assert(gclab->words_remaining() == 0, "GCLAB should not need retirement");
1634 
1635     PLAB* plab = ShenandoahThreadLocalData::plab(thread);
1636     assert(plab != nullptr, "PLAB should be initialized for %s", thread->name());
1637     assert(plab->words_remaining() == 0, "PLAB should not need retirement");
1638   }
1639 };
1640 
1641 class ShenandoahRetireGCLABClosure : public ThreadClosure {
1642 private:
1643   bool const _resize;
1644 public:
1645   ShenandoahRetireGCLABClosure(bool resize) : _resize(resize) {}
1646   void do_thread(Thread* thread) {
1647     PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1648     assert(gclab != nullptr, "GCLAB should be initialized for %s", thread->name());
1649     gclab->retire();
1650     if (_resize && ShenandoahThreadLocalData::gclab_size(thread) > 0) {
1651       ShenandoahThreadLocalData::set_gclab_size(thread, 0);
1652     }
1653 
1654     PLAB* plab = ShenandoahThreadLocalData::plab(thread);
1655     assert(plab != nullptr, "PLAB should be initialized for %s", thread->name());
1656 
1657     // There are two reasons to retire all plabs between old-gen evacuation passes.
1658     //  1. We need to make the plab memory parseable by remembered-set scanning.
1659     //  2. We need to establish a trustworthy UpdateWaterMark value within each old-gen heap region
1660     ShenandoahHeap::heap()->retire_plab(plab, thread);
1661     if (_resize && ShenandoahThreadLocalData::plab_size(thread) > 0) {
1662       ShenandoahThreadLocalData::set_plab_size(thread, 0);
1663     }
1664   }
1665 };
1666 
1667 void ShenandoahHeap::labs_make_parsable() {
1668   assert(UseTLAB, "Only call with UseTLAB");
1669 
1670   ShenandoahRetireGCLABClosure cl(false);
1671 
1672   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1673     ThreadLocalAllocBuffer& tlab = t->tlab();
1674     tlab.make_parsable();
1675     cl.do_thread(t);
1676   }
1677 
1678   workers()->threads_do(&cl);
1679 }
1680 
1681 void ShenandoahHeap::tlabs_retire(bool resize) {
1682   assert(UseTLAB, "Only call with UseTLAB");
1683   assert(!resize || ResizeTLAB, "Only call for resize when ResizeTLAB is enabled");
1684 
1685   ThreadLocalAllocStats stats;
1686 
1687   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1688     ThreadLocalAllocBuffer& tlab = t->tlab();
1689     tlab.retire(&stats);
1690     if (resize) {
1691       tlab.resize();
1692     }
1693   }
1694 
1695   stats.publish();
1696 
1697 #ifdef ASSERT
1698   ShenandoahCheckCleanGCLABClosure cl;
1699   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1700     cl.do_thread(t);
1701   }
1702   workers()->threads_do(&cl);
1703 #endif
1704 }
1705 
1706 void ShenandoahHeap::gclabs_retire(bool resize) {
1707   assert(UseTLAB, "Only call with UseTLAB");
1708   assert(!resize || ResizeTLAB, "Only call for resize when ResizeTLAB is enabled");
1709 
1710   ShenandoahRetireGCLABClosure cl(resize);
1711   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1712     cl.do_thread(t);
1713   }
1714   workers()->threads_do(&cl);
1715 
1716   if (safepoint_workers() != nullptr) {
1717     safepoint_workers()->threads_do(&cl);
1718   }
1719 }
1720 
1721 class ShenandoahTagGCLABClosure : public ThreadClosure {
1722 public:
1723   void do_thread(Thread* thread) {
1724     PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1725     assert(gclab != nullptr, "GCLAB should be initialized for %s", thread->name());
1726     if (gclab->words_remaining() > 0) {
1727       ShenandoahHeapRegion* r = ShenandoahHeap::heap()->heap_region_containing(gclab->allocate(0));
1728       r->set_young_lab_flag();
1729     }
1730   }
1731 };
1732 
1733 void ShenandoahHeap::set_young_lab_region_flags() {
1734   if (!UseTLAB) {
1735     return;
1736   }
1737   for (size_t i = 0; i < _num_regions; i++) {
1738     _regions[i]->clear_young_lab_flags();
1739   }
1740   ShenandoahTagGCLABClosure cl;
1741   workers()->threads_do(&cl);
1742   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1743     cl.do_thread(t);
1744     ThreadLocalAllocBuffer& tlab = t->tlab();
1745     if (tlab.end() != nullptr) {
1746       ShenandoahHeapRegion* r = heap_region_containing(tlab.start());
1747       r->set_young_lab_flag();
1748     }
1749   }
1750 }
1751 
1752 // Returns size in bytes
1753 size_t ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const {
1754   if (ShenandoahElasticTLAB) {
1755     if (mode()->is_generational()) {
1756       return MIN2(ShenandoahHeapRegion::max_tlab_size_bytes(), young_generation()->adjusted_available());
1757     } else {
1758       // With Elastic TLABs, return the max allowed size, and let the allocation path
1759       // figure out the safe size for current allocation.
1760       return ShenandoahHeapRegion::max_tlab_size_bytes();
1761     }
1762   } else {
1763     return MIN2(_free_set->unsafe_peek_free(), ShenandoahHeapRegion::max_tlab_size_bytes());
1764   }
1765 }
1766 
1767 size_t ShenandoahHeap::max_tlab_size() const {
1768   // Returns size in words
1769   return ShenandoahHeapRegion::max_tlab_size_words();
1770 }
1771 
1772 void ShenandoahHeap::collect(GCCause::Cause cause) {
1773   control_thread()->request_gc(cause);
1774 }
1775 
1776 void ShenandoahHeap::do_full_collection(bool clear_all_soft_refs) {
1777   //assert(false, "Shouldn't need to do full collections");
1778 }
1779 
1780 HeapWord* ShenandoahHeap::block_start(const void* addr) const {
1781   ShenandoahHeapRegion* r = heap_region_containing(addr);
1782   if (r != nullptr) {
1783     return r->block_start(addr);
1784   }
1785   return nullptr;
1786 }
1787 
1788 bool ShenandoahHeap::block_is_obj(const HeapWord* addr) const {
1789   ShenandoahHeapRegion* r = heap_region_containing(addr);
1790   return r->block_is_obj(addr);
1791 }
1792 
1793 bool ShenandoahHeap::print_location(outputStream* st, void* addr) const {
1794   return BlockLocationPrinter<ShenandoahHeap>::print_location(st, addr);
1795 }
1796 
1797 void ShenandoahHeap::prepare_for_verify() {
1798   if (SafepointSynchronize::is_at_safepoint() && UseTLAB) {
1799     labs_make_parsable();
1800   }
1801 }
1802 
1803 void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const {
1804   if (_shenandoah_policy->is_at_shutdown()) {
1805     return;
1806   }
1807 
1808   tcl->do_thread(_control_thread);
1809   tcl->do_thread(_regulator_thread);
1810   workers()->threads_do(tcl);
1811   if (_safepoint_workers != nullptr) {
1812     _safepoint_workers->threads_do(tcl);
1813   }
1814   if (ShenandoahStringDedup::is_enabled()) {
1815     ShenandoahStringDedup::threads_do(tcl);
1816   }
1817 }
1818 
1819 void ShenandoahHeap::print_tracing_info() const {
1820   LogTarget(Info, gc, stats) lt;
1821   if (lt.is_enabled()) {
1822     ResourceMark rm;
1823     LogStream ls(lt);
1824 
1825     phase_timings()->print_global_on(&ls);
1826 
1827     ls.cr();
1828     ls.cr();
1829 
1830     shenandoah_policy()->print_gc_stats(&ls);
1831 
1832     ls.cr();
1833 
1834     evac_tracker()->print_global_on(&ls);
1835 
1836     ls.cr();
1837     ls.cr();
1838   }
1839 }
1840 
1841 void ShenandoahHeap::on_cycle_start(GCCause::Cause cause, ShenandoahGeneration* generation) {
1842   set_gc_cause(cause);
1843   set_gc_generation(generation);
1844 
1845   shenandoah_policy()->record_cycle_start();
1846   generation->heuristics()->record_cycle_start();
1847 
1848   // When a cycle starts, attribute any thread activity when the collector
1849   // is idle to the global generation.
1850   _mmu_tracker.record(global_generation());
1851 }
1852 
1853 void ShenandoahHeap::on_cycle_end(ShenandoahGeneration* generation) {
1854   generation->heuristics()->record_cycle_end();
1855 
1856   if (mode()->is_generational() &&
1857       ((generation->generation_mode() == GLOBAL) || upgraded_to_full())) {
1858     // If we just completed a GLOBAL GC, claim credit for completion of young-gen and old-gen GC as well
1859     young_generation()->heuristics()->record_cycle_end();
1860     old_generation()->heuristics()->record_cycle_end();
1861   }
1862   set_gc_cause(GCCause::_no_gc);
1863 
1864   // When a cycle ends, the thread activity is attributed to the respective generation
1865   _mmu_tracker.record(generation);
1866 }
1867 
1868 void ShenandoahHeap::verify(VerifyOption vo) {
1869   if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
1870     if (ShenandoahVerify) {
1871       verifier()->verify_generic(vo);
1872     } else {
1873       // TODO: Consider allocating verification bitmaps on demand,
1874       // and turn this on unconditionally.
1875     }
1876   }
1877 }
1878 size_t ShenandoahHeap::tlab_capacity(Thread *thr) const {
1879   return _free_set->capacity();
1880 }
1881 
1882 class ObjectIterateScanRootClosure : public BasicOopIterateClosure {
1883 private:
1884   MarkBitMap* _bitmap;
1885   ShenandoahScanObjectStack* _oop_stack;
1886   ShenandoahHeap* const _heap;
1887   ShenandoahMarkingContext* const _marking_context;
1888 
1889   template <class T>
1890   void do_oop_work(T* p) {
1891     T o = RawAccess<>::oop_load(p);
1892     if (!CompressedOops::is_null(o)) {
1893       oop obj = CompressedOops::decode_not_null(o);
1894       if (_heap->is_concurrent_weak_root_in_progress() && !_marking_context->is_marked(obj)) {
1895         // There may be dead oops in weak roots in concurrent root phase, do not touch them.
1896         return;
1897       }
1898       obj = ShenandoahBarrierSet::barrier_set()->load_reference_barrier(obj);
1899 
1900       assert(oopDesc::is_oop(obj), "must be a valid oop");
1901       if (!_bitmap->is_marked(obj)) {
1902         _bitmap->mark(obj);
1903         _oop_stack->push(obj);
1904       }
1905     }
1906   }
1907 public:
1908   ObjectIterateScanRootClosure(MarkBitMap* bitmap, ShenandoahScanObjectStack* oop_stack) :
1909     _bitmap(bitmap), _oop_stack(oop_stack), _heap(ShenandoahHeap::heap()),
1910     _marking_context(_heap->marking_context()) {}
1911   void do_oop(oop* p)       { do_oop_work(p); }
1912   void do_oop(narrowOop* p) { do_oop_work(p); }
1913 };
1914 
1915 /*
1916  * This is public API, used in preparation of object_iterate().
1917  * Since we don't do linear scan of heap in object_iterate() (see comment below), we don't
1918  * need to make the heap parsable. For Shenandoah-internal linear heap scans that we can
1919  * control, we call SH::tlabs_retire, SH::gclabs_retire.
1920  */
1921 void ShenandoahHeap::ensure_parsability(bool retire_tlabs) {
1922   // No-op.
1923 }
1924 
1925 /*
1926  * Iterates objects in the heap. This is public API, used for, e.g., heap dumping.
1927  *
1928  * We cannot safely iterate objects by doing a linear scan at random points in time. Linear
1929  * scanning needs to deal with dead objects, which may have dead Klass* pointers (e.g.
1930  * calling oopDesc::size() would crash) or dangling reference fields (crashes) etc. Linear
1931  * scanning therefore depends on having a valid marking bitmap to support it. However, we only
1932  * have a valid marking bitmap after successful marking. In particular, we *don't* have a valid
1933  * marking bitmap during marking, after aborted marking or during/after cleanup (when we just
1934  * wiped the bitmap in preparation for next marking).
1935  *
1936  * For all those reasons, we implement object iteration as a single marking traversal, reporting
1937  * objects as we mark+traverse through the heap, starting from GC roots. JVMTI IterateThroughHeap
1938  * is allowed to report dead objects, but is not required to do so.
1939  */
1940 void ShenandoahHeap::object_iterate(ObjectClosure* cl) {
1941   // Reset bitmap
1942   if (!prepare_aux_bitmap_for_iteration())
1943     return;
1944 
1945   ShenandoahScanObjectStack oop_stack;
1946   ObjectIterateScanRootClosure oops(&_aux_bit_map, &oop_stack);
1947   // Seed the stack with root scan
1948   scan_roots_for_iteration(&oop_stack, &oops);
1949 
1950   // Work through the oop stack to traverse heap
1951   while (! oop_stack.is_empty()) {
1952     oop obj = oop_stack.pop();
1953     assert(oopDesc::is_oop(obj), "must be a valid oop");
1954     cl->do_object(obj);
1955     obj->oop_iterate(&oops);
1956   }
1957 
1958   assert(oop_stack.is_empty(), "should be empty");
1959   // Reclaim bitmap
1960   reclaim_aux_bitmap_for_iteration();
1961 }
1962 
1963 bool ShenandoahHeap::prepare_aux_bitmap_for_iteration() {
1964   assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1965 
1966   if (!_aux_bitmap_region_special && !os::commit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size(), false)) {
1967     log_warning(gc)("Could not commit native memory for auxiliary marking bitmap for heap iteration");
1968     return false;
1969   }
1970   // Reset bitmap
1971   _aux_bit_map.clear();
1972   return true;
1973 }
1974 
1975 void ShenandoahHeap::scan_roots_for_iteration(ShenandoahScanObjectStack* oop_stack, ObjectIterateScanRootClosure* oops) {
1976   // Process GC roots according to current GC cycle
1977   // This populates the work stack with initial objects
1978   // It is important to relinquish the associated locks before diving
1979   // into heap dumper
1980   uint n_workers = safepoint_workers() != nullptr ? safepoint_workers()->active_workers() : 1;
1981   ShenandoahHeapIterationRootScanner rp(n_workers);
1982   rp.roots_do(oops);
1983 }
1984 
1985 void ShenandoahHeap::reclaim_aux_bitmap_for_iteration() {
1986   if (!_aux_bitmap_region_special && !os::uncommit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size())) {
1987     log_warning(gc)("Could not uncommit native memory for auxiliary marking bitmap for heap iteration");
1988   }
1989 }
1990 
1991 // Closure for parallelly iterate objects
1992 class ShenandoahObjectIterateParScanClosure : public BasicOopIterateClosure {
1993 private:
1994   MarkBitMap* _bitmap;
1995   ShenandoahObjToScanQueue* _queue;
1996   ShenandoahHeap* const _heap;
1997   ShenandoahMarkingContext* const _marking_context;
1998 
1999   template <class T>
2000   void do_oop_work(T* p) {
2001     T o = RawAccess<>::oop_load(p);
2002     if (!CompressedOops::is_null(o)) {
2003       oop obj = CompressedOops::decode_not_null(o);
2004       if (_heap->is_concurrent_weak_root_in_progress() && !_marking_context->is_marked(obj)) {
2005         // There may be dead oops in weak roots in concurrent root phase, do not touch them.
2006         return;
2007       }
2008       obj = ShenandoahBarrierSet::barrier_set()->load_reference_barrier(obj);
2009 
2010       assert(oopDesc::is_oop(obj), "Must be a valid oop");
2011       if (_bitmap->par_mark(obj)) {
2012         _queue->push(ShenandoahMarkTask(obj));
2013       }
2014     }
2015   }
2016 public:
2017   ShenandoahObjectIterateParScanClosure(MarkBitMap* bitmap, ShenandoahObjToScanQueue* q) :
2018     _bitmap(bitmap), _queue(q), _heap(ShenandoahHeap::heap()),
2019     _marking_context(_heap->marking_context()) {}
2020   void do_oop(oop* p)       { do_oop_work(p); }
2021   void do_oop(narrowOop* p) { do_oop_work(p); }
2022 };
2023 
2024 // Object iterator for parallel heap iteraion.
2025 // The root scanning phase happenes in construction as a preparation of
2026 // parallel marking queues.
2027 // Every worker processes it's own marking queue. work-stealing is used
2028 // to balance workload.
2029 class ShenandoahParallelObjectIterator : public ParallelObjectIteratorImpl {
2030 private:
2031   uint                         _num_workers;
2032   bool                         _init_ready;
2033   MarkBitMap*                  _aux_bit_map;
2034   ShenandoahHeap*              _heap;
2035   ShenandoahScanObjectStack    _roots_stack; // global roots stack
2036   ShenandoahObjToScanQueueSet* _task_queues;
2037 public:
2038   ShenandoahParallelObjectIterator(uint num_workers, MarkBitMap* bitmap) :
2039         _num_workers(num_workers),
2040         _init_ready(false),
2041         _aux_bit_map(bitmap),
2042         _heap(ShenandoahHeap::heap()) {
2043     // Initialize bitmap
2044     _init_ready = _heap->prepare_aux_bitmap_for_iteration();
2045     if (!_init_ready) {
2046       return;
2047     }
2048 
2049     ObjectIterateScanRootClosure oops(_aux_bit_map, &_roots_stack);
2050     _heap->scan_roots_for_iteration(&_roots_stack, &oops);
2051 
2052     _init_ready = prepare_worker_queues();
2053   }
2054 
2055   ~ShenandoahParallelObjectIterator() {
2056     // Reclaim bitmap
2057     _heap->reclaim_aux_bitmap_for_iteration();
2058     // Reclaim queue for workers
2059     if (_task_queues!= nullptr) {
2060       for (uint i = 0; i < _num_workers; ++i) {
2061         ShenandoahObjToScanQueue* q = _task_queues->queue(i);
2062         if (q != nullptr) {
2063           delete q;
2064           _task_queues->register_queue(i, nullptr);
2065         }
2066       }
2067       delete _task_queues;
2068       _task_queues = nullptr;
2069     }
2070   }
2071 
2072   virtual void object_iterate(ObjectClosure* cl, uint worker_id) {
2073     if (_init_ready) {
2074       object_iterate_parallel(cl, worker_id, _task_queues);
2075     }
2076   }
2077 
2078 private:
2079   // Divide global root_stack into worker queues
2080   bool prepare_worker_queues() {
2081     _task_queues = new ShenandoahObjToScanQueueSet((int) _num_workers);
2082     // Initialize queues for every workers
2083     for (uint i = 0; i < _num_workers; ++i) {
2084       ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue();
2085       _task_queues->register_queue(i, task_queue);
2086     }
2087     // Divide roots among the workers. Assume that object referencing distribution
2088     // is related with root kind, use round-robin to make every worker have same chance
2089     // to process every kind of roots
2090     size_t roots_num = _roots_stack.size();
2091     if (roots_num == 0) {
2092       // No work to do
2093       return false;
2094     }
2095 
2096     for (uint j = 0; j < roots_num; j++) {
2097       uint stack_id = j % _num_workers;
2098       oop obj = _roots_stack.pop();
2099       _task_queues->queue(stack_id)->push(ShenandoahMarkTask(obj));
2100     }
2101     return true;
2102   }
2103 
2104   void object_iterate_parallel(ObjectClosure* cl,
2105                                uint worker_id,
2106                                ShenandoahObjToScanQueueSet* queue_set) {
2107     assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
2108     assert(queue_set != nullptr, "task queue must not be null");
2109 
2110     ShenandoahObjToScanQueue* q = queue_set->queue(worker_id);
2111     assert(q != nullptr, "object iterate queue must not be null");
2112 
2113     ShenandoahMarkTask t;
2114     ShenandoahObjectIterateParScanClosure oops(_aux_bit_map, q);
2115 
2116     // Work through the queue to traverse heap.
2117     // Steal when there is no task in queue.
2118     while (q->pop(t) || queue_set->steal(worker_id, t)) {
2119       oop obj = t.obj();
2120       assert(oopDesc::is_oop(obj), "must be a valid oop");
2121       cl->do_object(obj);
2122       obj->oop_iterate(&oops);
2123     }
2124     assert(q->is_empty(), "should be empty");
2125   }
2126 };
2127 
2128 ParallelObjectIteratorImpl* ShenandoahHeap::parallel_object_iterator(uint workers) {
2129   return new ShenandoahParallelObjectIterator(workers, &_aux_bit_map);
2130 }
2131 
2132 // Keep alive an object that was loaded with AS_NO_KEEPALIVE.
2133 void ShenandoahHeap::keep_alive(oop obj) {
2134   if (is_concurrent_mark_in_progress() && (obj != nullptr)) {
2135     ShenandoahBarrierSet::barrier_set()->enqueue(obj);
2136   }
2137 }
2138 
2139 void ShenandoahHeap::heap_region_iterate(ShenandoahHeapRegionClosure* blk) const {
2140   for (size_t i = 0; i < num_regions(); i++) {
2141     ShenandoahHeapRegion* current = get_region(i);
2142     blk->heap_region_do(current);
2143   }
2144 }
2145 
2146 class ShenandoahParallelHeapRegionTask : public WorkerTask {
2147 private:
2148   ShenandoahHeap* const _heap;
2149   ShenandoahHeapRegionClosure* const _blk;
2150 
2151   shenandoah_padding(0);
2152   volatile size_t _index;
2153   shenandoah_padding(1);
2154 
2155 public:
2156   ShenandoahParallelHeapRegionTask(ShenandoahHeapRegionClosure* blk) :
2157           WorkerTask("Shenandoah Parallel Region Operation"),
2158           _heap(ShenandoahHeap::heap()), _blk(blk), _index(0) {}
2159 
2160   void work(uint worker_id) {
2161     ShenandoahParallelWorkerSession worker_session(worker_id);
2162     size_t stride = ShenandoahParallelRegionStride;
2163 
2164     size_t max = _heap->num_regions();
2165     while (Atomic::load(&_index) < max) {
2166       size_t cur = Atomic::fetch_and_add(&_index, stride, memory_order_relaxed);
2167       size_t start = cur;
2168       size_t end = MIN2(cur + stride, max);
2169       if (start >= max) break;
2170 
2171       for (size_t i = cur; i < end; i++) {
2172         ShenandoahHeapRegion* current = _heap->get_region(i);
2173         _blk->heap_region_do(current);
2174       }
2175     }
2176   }
2177 };
2178 
2179 void ShenandoahHeap::parallel_heap_region_iterate(ShenandoahHeapRegionClosure* blk) const {
2180   assert(blk->is_thread_safe(), "Only thread-safe closures here");
2181   if (num_regions() > ShenandoahParallelRegionStride) {
2182     ShenandoahParallelHeapRegionTask task(blk);
2183     workers()->run_task(&task);
2184   } else {
2185     heap_region_iterate(blk);
2186   }
2187 }
2188 
2189 class ShenandoahRendezvousClosure : public HandshakeClosure {
2190 public:
2191   inline ShenandoahRendezvousClosure() : HandshakeClosure("ShenandoahRendezvous") {}
2192   inline void do_thread(Thread* thread) {}
2193 };
2194 
2195 void ShenandoahHeap::rendezvous_threads() {
2196   ShenandoahRendezvousClosure cl;
2197   Handshake::execute(&cl);
2198 }
2199 
2200 void ShenandoahHeap::recycle_trash() {
2201   free_set()->recycle_trash();
2202 }
2203 
2204 void ShenandoahHeap::do_class_unloading() {
2205   _unloader.unload();
2206 }
2207 
2208 void ShenandoahHeap::stw_weak_refs(bool full_gc) {
2209   // Weak refs processing
2210   ShenandoahPhaseTimings::Phase phase = full_gc ? ShenandoahPhaseTimings::full_gc_weakrefs
2211                                                 : ShenandoahPhaseTimings::degen_gc_weakrefs;
2212   ShenandoahTimingsTracker t(phase);
2213   ShenandoahGCWorkerPhase worker_phase(phase);
2214   active_generation()->ref_processor()->process_references(phase, workers(), false /* concurrent */);
2215 }
2216 
2217 void ShenandoahHeap::prepare_update_heap_references(bool concurrent) {
2218   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
2219 
2220   // Evacuation is over, no GCLABs are needed anymore. GCLABs are under URWM, so we need to
2221   // make them parsable for update code to work correctly. Plus, we can compute new sizes
2222   // for future GCLABs here.
2223   if (UseTLAB) {
2224     ShenandoahGCPhase phase(concurrent ?
2225                             ShenandoahPhaseTimings::init_update_refs_manage_gclabs :
2226                             ShenandoahPhaseTimings::degen_gc_init_update_refs_manage_gclabs);
2227     gclabs_retire(ResizeTLAB);
2228   }
2229 
2230   _update_refs_iterator.reset();
2231 }
2232 
2233 void ShenandoahHeap::set_gc_state_all_threads(char state) {
2234   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
2235     ShenandoahThreadLocalData::set_gc_state(t, state);
2236   }
2237 }
2238 
2239 void ShenandoahHeap::set_gc_state_mask(uint mask, bool value) {
2240   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should really be Shenandoah safepoint");
2241   _gc_state.set_cond(mask, value);
2242   set_gc_state_all_threads(_gc_state.raw_value());
2243 }
2244 
2245 void ShenandoahHeap::set_concurrent_young_mark_in_progress(bool in_progress) {
2246   if (has_forwarded_objects()) {
2247     set_gc_state_mask(YOUNG_MARKING | UPDATEREFS, in_progress);
2248   } else {
2249     set_gc_state_mask(YOUNG_MARKING, in_progress);
2250   }
2251 
2252   manage_satb_barrier(in_progress);
2253 }
2254 
2255 void ShenandoahHeap::set_concurrent_old_mark_in_progress(bool in_progress) {
2256   if (has_forwarded_objects()) {
2257     set_gc_state_mask(OLD_MARKING | UPDATEREFS, in_progress);
2258   } else {
2259     set_gc_state_mask(OLD_MARKING, in_progress);
2260   }
2261 
2262   manage_satb_barrier(in_progress);
2263 }
2264 
2265 void ShenandoahHeap::set_prepare_for_old_mark_in_progress(bool in_progress) {
2266   // Unlike other set-gc-state functions, this may happen outside safepoint.
2267   // Is only set and queried by control thread, so no coherence issues.
2268   _prepare_for_old_mark = in_progress;
2269 }
2270 
2271 void ShenandoahHeap::set_aging_cycle(bool in_progress) {
2272   _is_aging_cycle.set_cond(in_progress);
2273 }
2274 
2275 void ShenandoahHeap::manage_satb_barrier(bool active) {
2276   if (is_concurrent_mark_in_progress()) {
2277     // Ignore request to deactivate barrier while concurrent mark is in progress.
2278     // Do not attempt to re-activate the barrier if it is already active.
2279     if (active && !ShenandoahBarrierSet::satb_mark_queue_set().is_active()) {
2280       ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(active, !active);
2281     }
2282   } else {
2283     // No concurrent marking is in progress so honor request to deactivate,
2284     // but only if the barrier is already active.
2285     if (!active && ShenandoahBarrierSet::satb_mark_queue_set().is_active()) {
2286       ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(active, !active);
2287     }
2288   }
2289 }
2290 
2291 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
2292   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only call this at safepoint");
2293   set_gc_state_mask(EVACUATION, in_progress);
2294 }
2295 
2296 void ShenandoahHeap::set_concurrent_strong_root_in_progress(bool in_progress) {
2297   if (in_progress) {
2298     _concurrent_strong_root_in_progress.set();
2299   } else {
2300     _concurrent_strong_root_in_progress.unset();
2301   }
2302 }
2303 
2304 void ShenandoahHeap::set_concurrent_weak_root_in_progress(bool cond) {
2305   set_gc_state_mask(WEAK_ROOTS, cond);
2306 }
2307 
2308 GCTracer* ShenandoahHeap::tracer() {
2309   return shenandoah_policy()->tracer();
2310 }
2311 
2312 size_t ShenandoahHeap::tlab_used(Thread* thread) const {
2313   return _free_set->used();
2314 }
2315 
2316 bool ShenandoahHeap::try_cancel_gc() {
2317   while (true) {
2318     jbyte prev = _cancelled_gc.cmpxchg(CANCELLED, CANCELLABLE);
2319     if (prev == CANCELLABLE) return true;
2320     else if (prev == CANCELLED) return false;
2321     assert(ShenandoahSuspendibleWorkers, "should not get here when not using suspendible workers");
2322     assert(prev == NOT_CANCELLED, "must be NOT_CANCELLED");
2323     Thread* thread = Thread::current();
2324     if (thread->is_Java_thread()) {
2325       // We need to provide a safepoint here, otherwise we might
2326       // spin forever if a SP is pending.
2327       ThreadBlockInVM sp(JavaThread::cast(thread));
2328       SpinPause();
2329     }
2330   }
2331 }
2332 
2333 void ShenandoahHeap::cancel_concurrent_mark() {
2334   _young_generation->cancel_marking();
2335   _old_generation->cancel_marking();
2336   _global_generation->cancel_marking();
2337 
2338   ShenandoahBarrierSet::satb_mark_queue_set().abandon_partial_marking();
2339 }
2340 
2341 void ShenandoahHeap::cancel_gc(GCCause::Cause cause) {
2342   if (try_cancel_gc()) {
2343     FormatBuffer<> msg("Cancelling GC: %s", GCCause::to_string(cause));
2344     log_info(gc)("%s", msg.buffer());
2345     Events::log(Thread::current(), "%s", msg.buffer());
2346     _cancel_requested_time = os::elapsedTime();
2347     if (cause == GCCause::_shenandoah_upgrade_to_full_gc) {
2348       _upgraded_to_full = true;
2349     }
2350   }
2351 }
2352 
2353 uint ShenandoahHeap::max_workers() {
2354   return _max_workers;
2355 }
2356 
2357 void ShenandoahHeap::stop() {
2358   // The shutdown sequence should be able to terminate when GC is running.
2359 
2360   // Step 0a. Stop requesting collections.
2361   regulator_thread()->stop();
2362 
2363   // Step 0. Notify policy to disable event recording.
2364   _shenandoah_policy->record_shutdown();
2365 
2366   // Step 1. Notify control thread that we are in shutdown.
2367   // Note that we cannot do that with stop(), because stop() is blocking and waits for the actual shutdown.
2368   // Doing stop() here would wait for the normal GC cycle to complete, never falling through to cancel below.
2369   control_thread()->prepare_for_graceful_shutdown();
2370 
2371   // Step 2. Notify GC workers that we are cancelling GC.
2372   cancel_gc(GCCause::_shenandoah_stop_vm);
2373 
2374   // Step 3. Wait until GC worker exits normally.
2375   control_thread()->stop();
2376 }
2377 
2378 void ShenandoahHeap::stw_unload_classes(bool full_gc) {
2379   if (!unload_classes()) return;
2380   // Unload classes and purge SystemDictionary.
2381   {
2382     ShenandoahPhaseTimings::Phase phase = full_gc ?
2383                                           ShenandoahPhaseTimings::full_gc_purge_class_unload :
2384                                           ShenandoahPhaseTimings::degen_gc_purge_class_unload;
2385     ShenandoahIsAliveSelector is_alive;
2386     CodeCache::UnloadingScope scope(is_alive.is_alive_closure());
2387     ShenandoahGCPhase gc_phase(phase);
2388     ShenandoahGCWorkerPhase worker_phase(phase);
2389     bool purged_class = SystemDictionary::do_unloading(gc_timer());
2390 
2391     uint num_workers = _workers->active_workers();
2392     ShenandoahClassUnloadingTask unlink_task(phase, num_workers, purged_class);
2393     _workers->run_task(&unlink_task);
2394   }
2395 
2396   {
2397     ShenandoahGCPhase phase(full_gc ?
2398                             ShenandoahPhaseTimings::full_gc_purge_cldg :
2399                             ShenandoahPhaseTimings::degen_gc_purge_cldg);
2400     ClassLoaderDataGraph::purge(/*at_safepoint*/true);
2401   }
2402   // Resize and verify metaspace
2403   MetaspaceGC::compute_new_size();
2404   DEBUG_ONLY(MetaspaceUtils::verify();)
2405 }
2406 
2407 // Weak roots are either pre-evacuated (final mark) or updated (final updaterefs),
2408 // so they should not have forwarded oops.
2409 // However, we do need to "null" dead oops in the roots, if can not be done
2410 // in concurrent cycles.
2411 void ShenandoahHeap::stw_process_weak_roots(bool full_gc) {
2412   uint num_workers = _workers->active_workers();
2413   ShenandoahPhaseTimings::Phase timing_phase = full_gc ?
2414                                                ShenandoahPhaseTimings::full_gc_purge_weak_par :
2415                                                ShenandoahPhaseTimings::degen_gc_purge_weak_par;
2416   ShenandoahGCPhase phase(timing_phase);
2417   ShenandoahGCWorkerPhase worker_phase(timing_phase);
2418   // Cleanup weak roots
2419   if (has_forwarded_objects()) {
2420     ShenandoahForwardedIsAliveClosure is_alive;
2421     ShenandoahUpdateRefsClosure keep_alive;
2422     ShenandoahParallelWeakRootsCleaningTask<ShenandoahForwardedIsAliveClosure, ShenandoahUpdateRefsClosure>
2423       cleaning_task(timing_phase, &is_alive, &keep_alive, num_workers);
2424     _workers->run_task(&cleaning_task);
2425   } else {
2426     ShenandoahIsAliveClosure is_alive;
2427 #ifdef ASSERT
2428     ShenandoahAssertNotForwardedClosure verify_cl;
2429     ShenandoahParallelWeakRootsCleaningTask<ShenandoahIsAliveClosure, ShenandoahAssertNotForwardedClosure>
2430       cleaning_task(timing_phase, &is_alive, &verify_cl, num_workers);
2431 #else
2432     ShenandoahParallelWeakRootsCleaningTask<ShenandoahIsAliveClosure, DoNothingClosure>
2433       cleaning_task(timing_phase, &is_alive, &do_nothing_cl, num_workers);
2434 #endif
2435     _workers->run_task(&cleaning_task);
2436   }
2437 }
2438 
2439 void ShenandoahHeap::parallel_cleaning(bool full_gc) {
2440   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2441   assert(is_stw_gc_in_progress(), "Only for Degenerated and Full GC");
2442   ShenandoahGCPhase phase(full_gc ?
2443                           ShenandoahPhaseTimings::full_gc_purge :
2444                           ShenandoahPhaseTimings::degen_gc_purge);
2445   stw_weak_refs(full_gc);
2446   stw_process_weak_roots(full_gc);
2447   stw_unload_classes(full_gc);
2448 }
2449 
2450 void ShenandoahHeap::set_has_forwarded_objects(bool cond) {
2451   set_gc_state_mask(HAS_FORWARDED, cond);
2452 }
2453 
2454 void ShenandoahHeap::set_unload_classes(bool uc) {
2455   _unload_classes.set_cond(uc);
2456 }
2457 
2458 bool ShenandoahHeap::unload_classes() const {
2459   return _unload_classes.is_set();
2460 }
2461 
2462 address ShenandoahHeap::in_cset_fast_test_addr() {
2463   ShenandoahHeap* heap = ShenandoahHeap::heap();
2464   assert(heap->collection_set() != nullptr, "Sanity");
2465   return (address) heap->collection_set()->biased_map_address();
2466 }
2467 
2468 address ShenandoahHeap::gc_state_addr() {
2469   return (address) ShenandoahHeap::heap()->_gc_state.addr_of();
2470 }
2471 
2472 void ShenandoahHeap::reset_bytes_allocated_since_gc_start() {
2473   if (mode()->is_generational()) {
2474     young_generation()->reset_bytes_allocated_since_gc_start();
2475     old_generation()->reset_bytes_allocated_since_gc_start();
2476   }
2477 
2478   global_generation()->reset_bytes_allocated_since_gc_start();
2479 }
2480 
2481 void ShenandoahHeap::set_degenerated_gc_in_progress(bool in_progress) {
2482   _degenerated_gc_in_progress.set_cond(in_progress);
2483 }
2484 
2485 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) {
2486   _full_gc_in_progress.set_cond(in_progress);
2487 }
2488 
2489 void ShenandoahHeap::set_full_gc_move_in_progress(bool in_progress) {
2490   assert (is_full_gc_in_progress(), "should be");
2491   _full_gc_move_in_progress.set_cond(in_progress);
2492 }
2493 
2494 void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) {
2495   set_gc_state_mask(UPDATEREFS, in_progress);
2496 }
2497 
2498 void ShenandoahHeap::register_nmethod(nmethod* nm) {
2499   ShenandoahCodeRoots::register_nmethod(nm);
2500 }
2501 
2502 void ShenandoahHeap::unregister_nmethod(nmethod* nm) {
2503   ShenandoahCodeRoots::unregister_nmethod(nm);
2504 }
2505 
2506 void ShenandoahHeap::pin_object(JavaThread* thr, oop o) {
2507   heap_region_containing(o)->record_pin();
2508 }
2509 
2510 void ShenandoahHeap::unpin_object(JavaThread* thr, oop o) {
2511   ShenandoahHeapRegion* r = heap_region_containing(o);
2512   assert(r != nullptr, "Sanity");
2513   assert(r->pin_count() > 0, "Region " SIZE_FORMAT " should have non-zero pins", r->index());
2514   r->record_unpin();
2515 }
2516 
2517 void ShenandoahHeap::sync_pinned_region_status() {
2518   ShenandoahHeapLocker locker(lock());
2519 
2520   for (size_t i = 0; i < num_regions(); i++) {
2521     ShenandoahHeapRegion *r = get_region(i);
2522     if (r->is_active()) {
2523       if (r->is_pinned()) {
2524         if (r->pin_count() == 0) {
2525           r->make_unpinned();
2526         }
2527       } else {
2528         if (r->pin_count() > 0) {
2529           r->make_pinned();
2530         }
2531       }
2532     }
2533   }
2534 
2535   assert_pinned_region_status();
2536 }
2537 
2538 #ifdef ASSERT
2539 void ShenandoahHeap::assert_pinned_region_status() {
2540   for (size_t i = 0; i < num_regions(); i++) {
2541     ShenandoahHeapRegion* r = get_region(i);
2542     if (active_generation()->contains(r)) {
2543       assert((r->is_pinned() && r->pin_count() > 0) || (!r->is_pinned() && r->pin_count() == 0),
2544              "Region " SIZE_FORMAT " pinning status is inconsistent", i);
2545     }
2546   }
2547 }
2548 #endif
2549 
2550 ConcurrentGCTimer* ShenandoahHeap::gc_timer() const {
2551   return _gc_timer;
2552 }
2553 
2554 void ShenandoahHeap::prepare_concurrent_roots() {
2555   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2556   assert(!is_stw_gc_in_progress(), "Only concurrent GC");
2557   set_concurrent_strong_root_in_progress(!collection_set()->is_empty());
2558   set_concurrent_weak_root_in_progress(true);
2559   if (unload_classes()) {
2560     _unloader.prepare();
2561   }
2562 }
2563 
2564 void ShenandoahHeap::finish_concurrent_roots() {
2565   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2566   assert(!is_stw_gc_in_progress(), "Only concurrent GC");
2567   if (unload_classes()) {
2568     _unloader.finish();
2569   }
2570 }
2571 
2572 #ifdef ASSERT
2573 void ShenandoahHeap::assert_gc_workers(uint nworkers) {
2574   assert(nworkers > 0 && nworkers <= max_workers(), "Sanity");
2575 
2576   if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
2577     if (UseDynamicNumberOfGCThreads) {
2578       assert(nworkers <= ParallelGCThreads, "Cannot use more than it has");
2579     } else {
2580       // Use ParallelGCThreads inside safepoints
2581       assert(nworkers == ParallelGCThreads, "Use ParallelGCThreads within safepoints");
2582     }
2583   } else {
2584     if (UseDynamicNumberOfGCThreads) {
2585       assert(nworkers <= ConcGCThreads, "Cannot use more than it has");
2586     } else {
2587       // Use ConcGCThreads outside safepoints
2588       assert(nworkers == ConcGCThreads, "Use ConcGCThreads outside safepoints");
2589     }
2590   }
2591 }
2592 #endif
2593 
2594 ShenandoahVerifier* ShenandoahHeap::verifier() {
2595   guarantee(ShenandoahVerify, "Should be enabled");
2596   assert (_verifier != nullptr, "sanity");
2597   return _verifier;
2598 }
2599 
2600 template<bool CONCURRENT>
2601 class ShenandoahUpdateHeapRefsTask : public WorkerTask {
2602 private:
2603   ShenandoahHeap* _heap;
2604   ShenandoahRegionIterator* _regions;
2605   ShenandoahRegionChunkIterator* _work_chunks;
2606 
2607 public:
2608   explicit ShenandoahUpdateHeapRefsTask(ShenandoahRegionIterator* regions,
2609                                         ShenandoahRegionChunkIterator* work_chunks) :
2610     WorkerTask("Shenandoah Update References"),
2611     _heap(ShenandoahHeap::heap()),
2612     _regions(regions),
2613     _work_chunks(work_chunks)
2614   {
2615   }
2616 
2617   void work(uint worker_id) {
2618     if (CONCURRENT) {
2619       ShenandoahConcurrentWorkerSession worker_session(worker_id);
2620       ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
2621       do_work<ShenandoahConcUpdateRefsClosure>(worker_id);
2622     } else {
2623       ShenandoahParallelWorkerSession worker_session(worker_id);
2624       do_work<ShenandoahSTWUpdateRefsClosure>(worker_id);
2625     }
2626   }
2627 
2628 private:
2629   template<class T>
2630   void do_work(uint worker_id) {
2631     T cl;
2632     ShenandoahHeapRegion* r = _regions->next();
2633     // We update references for global, old, and young collections.
2634     assert(_heap->active_generation()->is_mark_complete(), "Expected complete marking");
2635     ShenandoahMarkingContext* const ctx = _heap->marking_context();
2636     bool is_mixed = _heap->collection_set()->has_old_regions();
2637     while (r != nullptr) {
2638       HeapWord* update_watermark = r->get_update_watermark();
2639       assert (update_watermark >= r->bottom(), "sanity");
2640 
2641       log_debug(gc)("ShenandoahUpdateHeapRefsTask::do_work(%u) looking at region " SIZE_FORMAT, worker_id, r->index());
2642       bool region_progress = false;
2643       if (r->is_active() && !r->is_cset()) {
2644         if (!_heap->mode()->is_generational() || (r->affiliation() == ShenandoahRegionAffiliation::YOUNG_GENERATION)) {
2645           _heap->marked_object_oop_iterate(r, &cl, update_watermark);
2646           region_progress = true;
2647         } else if (r->affiliation() == ShenandoahRegionAffiliation::OLD_GENERATION) {
2648           if (_heap->active_generation()->generation_mode() == GLOBAL) {
2649             // Note that GLOBAL collection is not as effectively balanced as young and mixed cycles.  This is because
2650             // concurrent GC threads are parceled out entire heap regions of work at a time and there
2651             // is no "catchup phase" consisting of remembered set scanning, during which parcels of work are smaller
2652             // and more easily distributed more fairly across threads.
2653 
2654             // TODO: Consider an improvement to load balance GLOBAL GC.
2655             _heap->marked_object_oop_iterate(r, &cl, update_watermark);
2656             region_progress = true;
2657           }
2658           // Otherwise, this is an old region in a young or mixed cycle.  Process it during a second phase, below.
2659           // Don't bother to report pacing progress in this case.
2660         } else {
2661           // Because updating of references runs concurrently, it is possible that a FREE inactive region transitions
2662           // to a non-free active region while this loop is executing.  Whenever this happens, the changing of a region's
2663           // active status may propagate at a different speed than the changing of the region's affiliation.
2664 
2665           // When we reach this control point, it is because a race has allowed a region's is_active() status to be seen
2666           // by this thread before the region's affiliation() is seen by this thread.
2667 
2668           // It's ok for this race to occur because the newly transformed region does not have any references to be
2669           // updated.
2670 
2671           assert(r->get_update_watermark() == r->bottom(),
2672                  "%s Region " SIZE_FORMAT " is_active but not recognized as YOUNG or OLD so must be newly transitioned from FREE",
2673                  affiliation_name(r->affiliation()), r->index());
2674         }
2675       }
2676       if (region_progress && ShenandoahPacing) {
2677         _heap->pacer()->report_updaterefs(pointer_delta(update_watermark, r->bottom()));
2678       }
2679       if (_heap->check_cancelled_gc_and_yield(CONCURRENT)) {
2680         return;
2681       }
2682       r = _regions->next();
2683     }
2684 
2685     if (_heap->mode()->is_generational() && (_heap->active_generation()->generation_mode() != GLOBAL)) {
2686       // Since this is generational and not GLOBAL, we have to process the remembered set.  There's no remembered
2687       // set processing if not in generational mode or if GLOBAL mode.
2688 
2689       // After this thread has exhausted its traditional update-refs work, it continues with updating refs within remembered set.
2690       // The remembered set workload is better balanced between threads, so threads that are "behind" can catch up with other
2691       // threads during this phase, allowing all threads to work more effectively in parallel.
2692       struct ShenandoahRegionChunk assignment;
2693       RememberedScanner* scanner = _heap->card_scan();
2694 
2695       while (!_heap->check_cancelled_gc_and_yield(CONCURRENT) && _work_chunks->next(&assignment)) {
2696         // Keep grabbing next work chunk to process until finished, or asked to yield
2697         ShenandoahHeapRegion* r = assignment._r;
2698         if (r->is_active() && !r->is_cset() && (r->affiliation() == ShenandoahRegionAffiliation::OLD_GENERATION)) {
2699           HeapWord* start_of_range = r->bottom() + assignment._chunk_offset;
2700           HeapWord* end_of_range = r->get_update_watermark();
2701           if (end_of_range > start_of_range + assignment._chunk_size) {
2702             end_of_range = start_of_range + assignment._chunk_size;
2703           }
2704 
2705           // Old region in a young cycle or mixed cycle.
2706           if (is_mixed) {
2707             // TODO: For mixed evac, consider building an old-gen remembered set that allows restricted updating
2708             // within old-gen HeapRegions.  This remembered set can be constructed by old-gen concurrent marking
2709             // and augmented by card marking.  For example, old-gen concurrent marking can remember for each old-gen
2710             // card which other old-gen regions it refers to: none, one-other specifically, multiple-other non-specific.
2711             // Update-references when _mixed_evac processess each old-gen memory range that has a traditional DIRTY
2712             // card or if the "old-gen remembered set" indicates that this card holds pointers specifically to an
2713             // old-gen region in the most recent collection set, or if this card holds pointers to other non-specific
2714             // old-gen heap regions.
2715 
2716             if (r->is_humongous()) {
2717               if (start_of_range < end_of_range) {
2718                 // Need to examine both dirty and clean cards during mixed evac.
2719                 r->oop_iterate_humongous_slice(&cl, false, start_of_range, assignment._chunk_size, true);
2720               }
2721             } else {
2722               // Since this is mixed evacuation, old regions that are candidates for collection have not been coalesced
2723               // and filled.  Use mark bits to find objects that need to be updated.
2724               //
2725               // Future TODO: establish a second remembered set to identify which old-gen regions point to other old-gen
2726               // regions which are in the collection set for a particular mixed evacuation.
2727               if (start_of_range < end_of_range) {
2728                 HeapWord* p = nullptr;
2729                 size_t card_index = scanner->card_index_for_addr(start_of_range);
2730                 // In case last object in my range spans boundary of my chunk, I may need to scan all the way to top()
2731                 ShenandoahObjectToOopBoundedClosure<T> objs(&cl, start_of_range, r->top());
2732 
2733                 // Any object that begins in a previous range is part of a different scanning assignment.  Any object that
2734                 // starts after end_of_range is also not my responsibility.  (Either allocated during evacuation, so does
2735                 // not hold pointers to from-space, or is beyond the range of my assigned work chunk.)
2736 
2737                 // Find the first object that begins in my range, if there is one.
2738                 p = start_of_range;
2739                 oop obj = cast_to_oop(p);
2740                 HeapWord* tams = ctx->top_at_mark_start(r);
2741                 if (p >= tams) {
2742                   // We cannot use ctx->is_marked(obj) to test whether an object begins at this address.  Instead,
2743                   // we need to use the remembered set crossing map to advance p to the first object that starts
2744                   // within the enclosing card.
2745 
2746                   while (true) {
2747                     HeapWord* first_object = scanner->first_object_in_card(card_index);
2748                     if (first_object != nullptr) {
2749                       p = first_object;
2750                       break;
2751                     } else if (scanner->addr_for_card_index(card_index + 1) < end_of_range) {
2752                       card_index++;
2753                     } else {
2754                       // Force the loop that follows to immediately terminate.
2755                       p = end_of_range;
2756                       break;
2757                     }
2758                   }
2759                   obj = cast_to_oop(p);
2760                   // Note: p may be >= end_of_range
2761                 } else if (!ctx->is_marked(obj)) {
2762                   p = ctx->get_next_marked_addr(p, tams);
2763                   obj = cast_to_oop(p);
2764                   // If there are no more marked objects before tams, this returns tams.
2765                   // Note that tams is either >= end_of_range, or tams is the start of an object that is marked.
2766                 }
2767                 while (p < end_of_range) {
2768                   // p is known to point to the beginning of marked object obj
2769                   objs.do_object(obj);
2770                   HeapWord* prev_p = p;
2771                   p += obj->size();
2772                   if (p < tams) {
2773                     p = ctx->get_next_marked_addr(p, tams);
2774                     // If there are no more marked objects before tams, this returns tams.  Note that tams is
2775                     // either >= end_of_range, or tams is the start of an object that is marked.
2776                   }
2777                   assert(p != prev_p, "Lack of forward progress");
2778                   obj = cast_to_oop(p);
2779                 }
2780               }
2781             }
2782           } else {
2783             // This is a young evac..
2784             if (start_of_range < end_of_range) {
2785               size_t cluster_size =
2786                 CardTable::card_size_in_words() * ShenandoahCardCluster<ShenandoahDirectCardMarkRememberedSet>::CardsPerCluster;
2787               size_t clusters = assignment._chunk_size / cluster_size;
2788               assert(clusters * cluster_size == assignment._chunk_size, "Chunk assignment must align on cluster boundaries");
2789               scanner->process_region_slice(r, assignment._chunk_offset, clusters, end_of_range, &cl, true, worker_id);
2790             }
2791           }
2792           if (ShenandoahPacing && (start_of_range < end_of_range)) {
2793             _heap->pacer()->report_updaterefs(pointer_delta(end_of_range, start_of_range));
2794           }
2795         }
2796       }
2797     }
2798   }
2799 };
2800 
2801 void ShenandoahHeap::update_heap_references(bool concurrent) {
2802   assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
2803   uint nworkers = workers()->active_workers();
2804   ShenandoahRegionChunkIterator work_list(nworkers);
2805 
2806   if (concurrent) {
2807     ShenandoahUpdateHeapRefsTask<true> task(&_update_refs_iterator, &work_list);
2808     workers()->run_task(&task);
2809   } else {
2810     ShenandoahUpdateHeapRefsTask<false> task(&_update_refs_iterator, &work_list);
2811     workers()->run_task(&task);
2812   }
2813   if (ShenandoahEnableCardStats && card_scan()!=nullptr) { // generational check proxy
2814     card_scan()->log_card_stats(nworkers, CARD_STAT_UPDATE_REFS);
2815   }
2816 }
2817 
2818 class ShenandoahFinalUpdateRefsUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
2819 private:
2820   ShenandoahMarkingContext* _ctx;
2821   ShenandoahHeapLock* const _lock;
2822   bool _is_generational;
2823 
2824 public:
2825   ShenandoahFinalUpdateRefsUpdateRegionStateClosure(
2826     ShenandoahMarkingContext* ctx) : _ctx(ctx), _lock(ShenandoahHeap::heap()->lock()),
2827                                      _is_generational(ShenandoahHeap::heap()->mode()->is_generational()) { }
2828 
2829   void heap_region_do(ShenandoahHeapRegion* r) {
2830 
2831     // Maintenance of region age must follow evacuation in order to account for evacuation allocations within survivor
2832     // regions.  We consult region age during the subsequent evacuation to determine whether certain objects need to
2833     // be promoted.
2834     if (_is_generational && r->is_young()) {
2835       HeapWord *tams = _ctx->top_at_mark_start(r);
2836       HeapWord *top = r->top();
2837 
2838       // Allocations move the watermark when top moves.  However compacting
2839       // objects will sometimes lower top beneath the watermark, after which,
2840       // attempts to read the watermark will assert out (watermark should not be
2841       // higher than top).
2842       if (top > tams) {
2843         // There have been allocations in this region since the start of the cycle.
2844         // Any objects new to this region must not assimilate elevated age.
2845         r->reset_age();
2846       } else if (ShenandoahHeap::heap()->is_aging_cycle()) {
2847         r->increment_age();
2848       }
2849     }
2850 
2851     // Drop unnecessary "pinned" state from regions that does not have CP marks
2852     // anymore, as this would allow trashing them.
2853     if (r->is_active()) {
2854       if (r->is_pinned()) {
2855         if (r->pin_count() == 0) {
2856           ShenandoahHeapLocker locker(_lock);
2857           r->make_unpinned();
2858         }
2859       } else {
2860         if (r->pin_count() > 0) {
2861           ShenandoahHeapLocker locker(_lock);
2862           r->make_pinned();
2863         }
2864       }
2865     }
2866   }
2867 
2868   bool is_thread_safe() { return true; }
2869 };
2870 
2871 void ShenandoahHeap::update_heap_region_states(bool concurrent) {
2872   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2873   assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
2874 
2875   {
2876     ShenandoahGCPhase phase(concurrent ?
2877                             ShenandoahPhaseTimings::final_update_refs_update_region_states :
2878                             ShenandoahPhaseTimings::degen_gc_final_update_refs_update_region_states);
2879     ShenandoahFinalUpdateRefsUpdateRegionStateClosure cl (active_generation()->complete_marking_context());
2880     parallel_heap_region_iterate(&cl);
2881 
2882     assert_pinned_region_status();
2883   }
2884 
2885   {
2886     ShenandoahGCPhase phase(concurrent ?
2887                             ShenandoahPhaseTimings::final_update_refs_trash_cset :
2888                             ShenandoahPhaseTimings::degen_gc_final_update_refs_trash_cset);
2889     trash_cset_regions();
2890   }
2891 }
2892 
2893 void ShenandoahHeap::rebuild_free_set(bool concurrent) {
2894   {
2895     ShenandoahGCPhase phase(concurrent ?
2896                             ShenandoahPhaseTimings::final_update_refs_rebuild_freeset :
2897                             ShenandoahPhaseTimings::degen_gc_final_update_refs_rebuild_freeset);
2898     ShenandoahHeapLocker locker(lock());
2899     _free_set->rebuild();
2900   }
2901 }
2902 
2903 void ShenandoahHeap::print_extended_on(outputStream *st) const {
2904   print_on(st);
2905   print_heap_regions_on(st);
2906 }
2907 
2908 bool ShenandoahHeap::is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self) {
2909   size_t slice = r->index() / _bitmap_regions_per_slice;
2910 
2911   size_t regions_from = _bitmap_regions_per_slice * slice;
2912   size_t regions_to   = MIN2(num_regions(), _bitmap_regions_per_slice * (slice + 1));
2913   for (size_t g = regions_from; g < regions_to; g++) {
2914     assert (g / _bitmap_regions_per_slice == slice, "same slice");
2915     if (skip_self && g == r->index()) continue;
2916     if (get_region(g)->is_committed()) {
2917       return true;
2918     }
2919   }
2920   return false;
2921 }
2922 
2923 bool ShenandoahHeap::commit_bitmap_slice(ShenandoahHeapRegion* r) {
2924   shenandoah_assert_heaplocked();
2925 
2926   // Bitmaps in special regions do not need commits
2927   if (_bitmap_region_special) {
2928     return true;
2929   }
2930 
2931   if (is_bitmap_slice_committed(r, true)) {
2932     // Some other region from the group is already committed, meaning the bitmap
2933     // slice is already committed, we exit right away.
2934     return true;
2935   }
2936 
2937   // Commit the bitmap slice:
2938   size_t slice = r->index() / _bitmap_regions_per_slice;
2939   size_t off = _bitmap_bytes_per_slice * slice;
2940   size_t len = _bitmap_bytes_per_slice;
2941   char* start = (char*) _bitmap_region.start() + off;
2942 
2943   if (!os::commit_memory(start, len, false)) {
2944     return false;
2945   }
2946 
2947   if (AlwaysPreTouch) {
2948     os::pretouch_memory(start, start + len, _pretouch_bitmap_page_size);
2949   }
2950 
2951   return true;
2952 }
2953 
2954 bool ShenandoahHeap::uncommit_bitmap_slice(ShenandoahHeapRegion *r) {
2955   shenandoah_assert_heaplocked();
2956 
2957   // Bitmaps in special regions do not need uncommits
2958   if (_bitmap_region_special) {
2959     return true;
2960   }
2961 
2962   if (is_bitmap_slice_committed(r, true)) {
2963     // Some other region from the group is still committed, meaning the bitmap
2964     // slice is should stay committed, exit right away.
2965     return true;
2966   }
2967 
2968   // Uncommit the bitmap slice:
2969   size_t slice = r->index() / _bitmap_regions_per_slice;
2970   size_t off = _bitmap_bytes_per_slice * slice;
2971   size_t len = _bitmap_bytes_per_slice;
2972   if (!os::uncommit_memory((char*)_bitmap_region.start() + off, len)) {
2973     return false;
2974   }
2975   return true;
2976 }
2977 
2978 void ShenandoahHeap::safepoint_synchronize_begin() {
2979   if (ShenandoahSuspendibleWorkers || UseStringDeduplication) {
2980     SuspendibleThreadSet::synchronize();
2981   }
2982 }
2983 
2984 void ShenandoahHeap::safepoint_synchronize_end() {
2985   if (ShenandoahSuspendibleWorkers || UseStringDeduplication) {
2986     SuspendibleThreadSet::desynchronize();
2987   }
2988 }
2989 
2990 void ShenandoahHeap::entry_uncommit(double shrink_before, size_t shrink_until) {
2991   static const char *msg = "Concurrent uncommit";
2992   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_uncommit, true /* log_heap_usage */);
2993   EventMark em("%s", msg);
2994 
2995   op_uncommit(shrink_before, shrink_until);
2996 }
2997 
2998 void ShenandoahHeap::try_inject_alloc_failure() {
2999   if (ShenandoahAllocFailureALot && !cancelled_gc() && ((os::random() % 1000) > 950)) {
3000     _inject_alloc_failure.set();
3001     os::naked_short_sleep(1);
3002     if (cancelled_gc()) {
3003       log_info(gc)("Allocation failure was successfully injected");
3004     }
3005   }
3006 }
3007 
3008 bool ShenandoahHeap::should_inject_alloc_failure() {
3009   return _inject_alloc_failure.is_set() && _inject_alloc_failure.try_unset();
3010 }
3011 
3012 void ShenandoahHeap::initialize_serviceability() {
3013   if (mode()->is_generational()) {
3014     _young_gen_memory_pool = new ShenandoahYoungGenMemoryPool(this);
3015     _old_gen_memory_pool = new ShenandoahOldGenMemoryPool(this);
3016     _cycle_memory_manager.add_pool(_young_gen_memory_pool);
3017     _cycle_memory_manager.add_pool(_old_gen_memory_pool);
3018     _stw_memory_manager.add_pool(_young_gen_memory_pool);
3019     _stw_memory_manager.add_pool(_old_gen_memory_pool);
3020   } else {
3021     _memory_pool = new ShenandoahMemoryPool(this);
3022     _cycle_memory_manager.add_pool(_memory_pool);
3023     _stw_memory_manager.add_pool(_memory_pool);
3024   }
3025 }
3026 
3027 GrowableArray<GCMemoryManager*> ShenandoahHeap::memory_managers() {
3028   GrowableArray<GCMemoryManager*> memory_managers(2);
3029   memory_managers.append(&_cycle_memory_manager);
3030   memory_managers.append(&_stw_memory_manager);
3031   return memory_managers;
3032 }
3033 
3034 GrowableArray<MemoryPool*> ShenandoahHeap::memory_pools() {
3035   GrowableArray<MemoryPool*> memory_pools(1);
3036   if (mode()->is_generational()) {
3037     memory_pools.append(_young_gen_memory_pool);
3038     memory_pools.append(_old_gen_memory_pool);
3039   } else {
3040     memory_pools.append(_memory_pool);
3041   }
3042   return memory_pools;
3043 }
3044 
3045 MemoryUsage ShenandoahHeap::memory_usage() {
3046   return MemoryUsage(_initial_size, used(), committed(), max_capacity());
3047 }
3048 
3049 ShenandoahRegionIterator::ShenandoahRegionIterator() :
3050   _heap(ShenandoahHeap::heap()),
3051   _index(0) {}
3052 
3053 ShenandoahRegionIterator::ShenandoahRegionIterator(ShenandoahHeap* heap) :
3054   _heap(heap),
3055   _index(0) {}
3056 
3057 void ShenandoahRegionIterator::reset() {
3058   _index = 0;
3059 }
3060 
3061 bool ShenandoahRegionIterator::has_next() const {
3062   return _index < _heap->num_regions();
3063 }
3064 
3065 char ShenandoahHeap::gc_state() const {
3066   return _gc_state.raw_value();
3067 }
3068 
3069 ShenandoahLiveData* ShenandoahHeap::get_liveness_cache(uint worker_id) {
3070 #ifdef ASSERT
3071   assert(_liveness_cache != nullptr, "sanity");
3072   assert(worker_id < _max_workers, "sanity");
3073   for (uint i = 0; i < num_regions(); i++) {
3074     assert(_liveness_cache[worker_id][i] == 0, "liveness cache should be empty");
3075   }
3076 #endif
3077   return _liveness_cache[worker_id];
3078 }
3079 
3080 void ShenandoahHeap::flush_liveness_cache(uint worker_id) {
3081   assert(worker_id < _max_workers, "sanity");
3082   assert(_liveness_cache != nullptr, "sanity");
3083   ShenandoahLiveData* ld = _liveness_cache[worker_id];
3084 
3085   for (uint i = 0; i < num_regions(); i++) {
3086     ShenandoahLiveData live = ld[i];
3087     if (live > 0) {
3088       ShenandoahHeapRegion* r = get_region(i);
3089       r->increase_live_data_gc_words(live);
3090       ld[i] = 0;
3091     }
3092   }
3093 }
3094 
3095 bool ShenandoahHeap::requires_barriers(stackChunkOop obj) const {
3096   if (is_idle()) return false;
3097 
3098   // Objects allocated after marking start are implicitly alive, don't need any barriers during
3099   // marking phase.
3100   if (is_concurrent_mark_in_progress() &&
3101      !marking_context()->allocated_after_mark_start(obj)) {
3102     return true;
3103   }
3104 
3105   // Can not guarantee obj is deeply good.
3106   if (has_forwarded_objects()) {
3107     return true;
3108   }
3109 
3110   return false;
3111 }
3112 
3113 void ShenandoahHeap::transfer_old_pointers_from_satb() {
3114   _old_generation->transfer_pointers_from_satb();
3115 }
3116 
3117 template<>
3118 void ShenandoahGenerationRegionClosure<YOUNG>::heap_region_do(ShenandoahHeapRegion* region) {
3119   // Visit young and free regions
3120   if (region->affiliation() != OLD_GENERATION) {
3121     _cl->heap_region_do(region);
3122   }
3123 }
3124 
3125 template<>
3126 void ShenandoahGenerationRegionClosure<OLD>::heap_region_do(ShenandoahHeapRegion* region) {
3127   // Visit old and free regions
3128   if (region->affiliation() != YOUNG_GENERATION) {
3129     _cl->heap_region_do(region);
3130   }
3131 }
3132 
3133 template<>
3134 void ShenandoahGenerationRegionClosure<GLOBAL>::heap_region_do(ShenandoahHeapRegion* region) {
3135   _cl->heap_region_do(region);
3136 }
3137 
3138 // Assure that the remember set has a dirty card everywhere there is an interesting pointer.
3139 // This examines the read_card_table between bottom() and top() since all PLABS are retired
3140 // before the safepoint for init_mark.  Actually, we retire them before update-references and don't
3141 // restore them until the start of evacuation.
3142 void ShenandoahHeap::verify_rem_set_at_mark() {
3143   shenandoah_assert_safepoint();
3144   assert(mode()->is_generational(), "Only verify remembered set for generational operational modes");
3145 
3146   ShenandoahRegionIterator iterator;
3147   RememberedScanner* scanner = card_scan();
3148   ShenandoahVerifyRemSetClosure check_interesting_pointers(true);
3149   ShenandoahMarkingContext* ctx;
3150 
3151   log_debug(gc)("Verifying remembered set at %s mark", doing_mixed_evacuations()? "mixed": "young");
3152 
3153   if (is_old_bitmap_stable() || active_generation()->generation_mode() == GLOBAL) {
3154     ctx = complete_marking_context();
3155   } else {
3156     ctx = nullptr;
3157   }
3158 
3159   while (iterator.has_next()) {
3160     ShenandoahHeapRegion* r = iterator.next();
3161     HeapWord* tams = ctx? ctx->top_at_mark_start(r): nullptr;
3162     if (r == nullptr)
3163       break;
3164     if (r->is_old() && r->is_active()) {
3165       HeapWord* obj_addr = r->bottom();
3166       if (r->is_humongous_start()) {
3167         oop obj = cast_to_oop(obj_addr);
3168         if (!ctx || ctx->is_marked(obj)) {
3169           // For humongous objects, the typical object is an array, so the following checks may be overkill
3170           // For regular objects (not object arrays), if the card holding the start of the object is dirty,
3171           // we do not need to verify that cards spanning interesting pointers within this object are dirty.
3172           if (!scanner->is_card_dirty(obj_addr) || obj->is_objArray()) {
3173             obj->oop_iterate(&check_interesting_pointers);
3174           }
3175           // else, object's start is marked dirty and obj is not an objArray, so any interesting pointers are covered
3176         }
3177         // else, this humongous object is not marked so no need to verify its internal pointers
3178         if (!scanner->verify_registration(obj_addr, ctx)) {
3179           ShenandoahAsserts::print_failure(ShenandoahAsserts::_safe_all, obj, obj_addr, nullptr,
3180                                           "Verify init-mark remembered set violation", "object not properly registered", __FILE__, __LINE__);
3181         }
3182       } else if (!r->is_humongous()) {
3183         HeapWord* top = r->top();
3184         while (obj_addr < top) {
3185           oop obj = cast_to_oop(obj_addr);
3186           // ctx->is_marked() returns true if mark bit set (TAMS not relevant during init mark)
3187           if (!ctx || ctx->is_marked(obj)) {
3188             // For regular objects (not object arrays), if the card holding the start of the object is dirty,
3189             // we do not need to verify that cards spanning interesting pointers within this object are dirty.
3190             if (!scanner->is_card_dirty(obj_addr) || obj->is_objArray()) {
3191               obj->oop_iterate(&check_interesting_pointers);
3192             }
3193             // else, object's start is marked dirty and obj is not an objArray, so any interesting pointers are covered
3194             if (!scanner->verify_registration(obj_addr, ctx)) {
3195               ShenandoahAsserts::print_failure(ShenandoahAsserts::_safe_all, obj, obj_addr, nullptr,
3196                                                "Verify init-mark remembered set violation", "object not properly registered", __FILE__, __LINE__);
3197             }
3198             obj_addr += obj->size();
3199           } else {
3200             // This object is not live so we don't verify dirty cards contained therein
3201             assert(tams != nullptr, "If object is not live, ctx and tams should be non-null");
3202             obj_addr = ctx->get_next_marked_addr(obj_addr, tams);
3203           }
3204         }
3205       } // else, we ignore humongous continuation region
3206     } // else, this is not an OLD region so we ignore it
3207   } // all regions have been processed
3208 }
3209 
3210 void ShenandoahHeap::help_verify_region_rem_set(ShenandoahHeapRegion* r, ShenandoahMarkingContext* ctx, HeapWord* from,
3211                                                 HeapWord* top, HeapWord* registration_watermark, const char* message) {
3212   RememberedScanner* scanner = card_scan();
3213   ShenandoahVerifyRemSetClosure check_interesting_pointers(false);
3214 
3215   HeapWord* obj_addr = from;
3216   if (r->is_humongous_start()) {
3217     oop obj = cast_to_oop(obj_addr);
3218     if (!ctx || ctx->is_marked(obj)) {
3219       size_t card_index = scanner->card_index_for_addr(obj_addr);
3220       // For humongous objects, the typical object is an array, so the following checks may be overkill
3221       // For regular objects (not object arrays), if the card holding the start of the object is dirty,
3222       // we do not need to verify that cards spanning interesting pointers within this object are dirty.
3223       if (!scanner->is_write_card_dirty(card_index) || obj->is_objArray()) {
3224         obj->oop_iterate(&check_interesting_pointers);
3225       }
3226       // else, object's start is marked dirty and obj is not an objArray, so any interesting pointers are covered
3227     }
3228     // else, this humongous object is not live so no need to verify its internal pointers
3229 
3230     if ((obj_addr < registration_watermark) && !scanner->verify_registration(obj_addr, ctx)) {
3231       ShenandoahAsserts::print_failure(ShenandoahAsserts::_safe_all, obj, obj_addr, nullptr, message,
3232                                        "object not properly registered", __FILE__, __LINE__);
3233     }
3234   } else if (!r->is_humongous()) {
3235     while (obj_addr < top) {
3236       oop obj = cast_to_oop(obj_addr);
3237       // ctx->is_marked() returns true if mark bit set or if obj above TAMS.
3238       if (!ctx || ctx->is_marked(obj)) {
3239         size_t card_index = scanner->card_index_for_addr(obj_addr);
3240         // For regular objects (not object arrays), if the card holding the start of the object is dirty,
3241         // we do not need to verify that cards spanning interesting pointers within this object are dirty.
3242         if (!scanner->is_write_card_dirty(card_index) || obj->is_objArray()) {
3243           obj->oop_iterate(&check_interesting_pointers);
3244         }
3245         // else, object's start is marked dirty and obj is not an objArray, so any interesting pointers are covered
3246 
3247         if ((obj_addr < registration_watermark) && !scanner->verify_registration(obj_addr, ctx)) {
3248           ShenandoahAsserts::print_failure(ShenandoahAsserts::_safe_all, obj, obj_addr, nullptr, message,
3249                                            "object not properly registered", __FILE__, __LINE__);
3250         }
3251         obj_addr += obj->size();
3252       } else {
3253         // This object is not live so we don't verify dirty cards contained therein
3254         HeapWord* tams = ctx->top_at_mark_start(r);
3255         obj_addr = ctx->get_next_marked_addr(obj_addr, tams);
3256       }
3257     }
3258   }
3259 }
3260 
3261 void ShenandoahHeap::verify_rem_set_after_full_gc() {
3262   shenandoah_assert_safepoint();
3263   assert(mode()->is_generational(), "Only verify remembered set for generational operational modes");
3264 
3265   ShenandoahRegionIterator iterator;
3266 
3267   while (iterator.has_next()) {
3268     ShenandoahHeapRegion* r = iterator.next();
3269     if (r == nullptr)
3270       break;
3271     if (r->is_old() && !r->is_cset()) {
3272       help_verify_region_rem_set(r, nullptr, r->bottom(), r->top(), r->top(), "Remembered set violation at end of Full GC");
3273     }
3274   }
3275 }
3276 
3277 // Assure that the remember set has a dirty card everywhere there is an interesting pointer.  Even though
3278 // the update-references scan of remembered set only examines cards up to update_watermark, the remembered
3279 // set should be valid through top.  This examines the write_card_table between bottom() and top() because
3280 // all PLABS are retired immediately before the start of update refs.
3281 void ShenandoahHeap::verify_rem_set_at_update_ref() {
3282   shenandoah_assert_safepoint();
3283   assert(mode()->is_generational(), "Only verify remembered set for generational operational modes");
3284 
3285   ShenandoahRegionIterator iterator;
3286   ShenandoahMarkingContext* ctx;
3287 
3288   if (is_old_bitmap_stable() || active_generation()->generation_mode() == GLOBAL) {
3289     ctx = complete_marking_context();
3290   } else {
3291     ctx = nullptr;
3292   }
3293 
3294   while (iterator.has_next()) {
3295     ShenandoahHeapRegion* r = iterator.next();
3296     if (r == nullptr)
3297       break;
3298     if (r->is_old() && !r->is_cset()) {
3299       help_verify_region_rem_set(r, ctx, r->bottom(), r->top(), r->get_update_watermark(),
3300                                  "Remembered set violation at init-update-references");
3301     }
3302   }
3303 }
3304 
3305 ShenandoahGeneration* ShenandoahHeap::generation_for(ShenandoahRegionAffiliation affiliation) const {
3306   if (!mode()->is_generational()) {
3307     return global_generation();
3308   } else if (affiliation == YOUNG_GENERATION) {
3309     return young_generation();
3310   } else if (affiliation == OLD_GENERATION) {
3311     return old_generation();
3312   }
3313 
3314   ShouldNotReachHere();
3315   return nullptr;
3316 }
3317 
3318 void ShenandoahHeap::log_heap_status(const char* msg) const {
3319   if (mode()->is_generational()) {
3320     young_generation()->log_status(msg);
3321     old_generation()->log_status(msg);
3322   } else {
3323     global_generation()->log_status(msg);
3324   }
3325 }
3326