1 /*
   2  * Copyright (c) 2013, 2021, Red Hat, Inc. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "memory/allocation.hpp"
  27 #include "memory/universe.hpp"
  28 
  29 #include "gc/shared/gcArguments.hpp"
  30 #include "gc/shared/gcTimer.hpp"
  31 #include "gc/shared/gcTraceTime.inline.hpp"
  32 #include "gc/shared/locationPrinter.inline.hpp"
  33 #include "gc/shared/memAllocator.hpp"
  34 #include "gc/shared/plab.hpp"
  35 #include "gc/shared/tlab_globals.hpp"
  36 
  37 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  38 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
  39 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  40 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  41 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
  42 #include "gc/shenandoah/shenandoahControlThread.hpp"
  43 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  44 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  45 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  46 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
  47 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  48 #include "gc/shenandoah/shenandoahInitLogger.hpp"
  49 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  50 #include "gc/shenandoah/shenandoahMemoryPool.hpp"
  51 #include "gc/shenandoah/shenandoahMetrics.hpp"
  52 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  53 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  54 #include "gc/shenandoah/shenandoahPacer.inline.hpp"
  55 #include "gc/shenandoah/shenandoahPadding.hpp"
  56 #include "gc/shenandoah/shenandoahParallelCleaning.inline.hpp"
  57 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
  58 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
  59 #include "gc/shenandoah/shenandoahStringDedup.hpp"
  60 #include "gc/shenandoah/shenandoahSTWMark.hpp"
  61 #include "gc/shenandoah/shenandoahUtils.hpp"
  62 #include "gc/shenandoah/shenandoahVerifier.hpp"
  63 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
  64 #include "gc/shenandoah/shenandoahVMOperations.hpp"
  65 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
  66 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  67 #include "gc/shenandoah/mode/shenandoahIUMode.hpp"
  68 #include "gc/shenandoah/mode/shenandoahPassiveMode.hpp"
  69 #include "gc/shenandoah/mode/shenandoahSATBMode.hpp"
  70 #if INCLUDE_JFR
  71 #include "gc/shenandoah/shenandoahJfrSupport.hpp"
  72 #endif
  73 
  74 #include "classfile/systemDictionary.hpp"
  75 #include "memory/classLoaderMetaspace.hpp"
  76 #include "memory/metaspaceUtils.hpp"
  77 #include "oops/compressedOops.inline.hpp"
  78 #include "prims/jvmtiTagMap.hpp"
  79 #include "runtime/atomic.hpp"
  80 #include "runtime/globals.hpp"
  81 #include "runtime/interfaceSupport.inline.hpp"
  82 #include "runtime/java.hpp"
  83 #include "runtime/orderAccess.hpp"
  84 #include "runtime/safepointMechanism.hpp"
  85 #include "runtime/vmThread.hpp"
  86 #include "services/mallocTracker.hpp"
  87 #include "services/memTracker.hpp"
  88 #include "utilities/events.hpp"
  89 #include "utilities/powerOfTwo.hpp"
  90 
  91 class ShenandoahPretouchHeapTask : public AbstractGangTask {
  92 private:
  93   ShenandoahRegionIterator _regions;
  94   const size_t _page_size;
  95 public:
  96   ShenandoahPretouchHeapTask(size_t page_size) :
  97     AbstractGangTask("Shenandoah Pretouch Heap"),
  98     _page_size(page_size) {}
  99 
 100   virtual void work(uint worker_id) {
 101     ShenandoahHeapRegion* r = _regions.next();
 102     while (r != NULL) {
 103       if (r->is_committed()) {
 104         os::pretouch_memory(r->bottom(), r->end(), _page_size);
 105       }
 106       r = _regions.next();
 107     }
 108   }
 109 };
 110 
 111 class ShenandoahPretouchBitmapTask : public AbstractGangTask {
 112 private:
 113   ShenandoahRegionIterator _regions;
 114   char* _bitmap_base;
 115   const size_t _bitmap_size;
 116   const size_t _page_size;
 117 public:
 118   ShenandoahPretouchBitmapTask(char* bitmap_base, size_t bitmap_size, size_t page_size) :
 119     AbstractGangTask("Shenandoah Pretouch Bitmap"),
 120     _bitmap_base(bitmap_base),
 121     _bitmap_size(bitmap_size),
 122     _page_size(page_size) {}
 123 
 124   virtual void work(uint worker_id) {
 125     ShenandoahHeapRegion* r = _regions.next();
 126     while (r != NULL) {
 127       size_t start = r->index()       * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
 128       size_t end   = (r->index() + 1) * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
 129       assert (end <= _bitmap_size, "end is sane: " SIZE_FORMAT " < " SIZE_FORMAT, end, _bitmap_size);
 130 
 131       if (r->is_committed()) {
 132         os::pretouch_memory(_bitmap_base + start, _bitmap_base + end, _page_size);
 133       }
 134 
 135       r = _regions.next();
 136     }
 137   }
 138 };
 139 
 140 jint ShenandoahHeap::initialize() {
 141   //
 142   // Figure out heap sizing
 143   //
 144 
 145   size_t init_byte_size = InitialHeapSize;
 146   size_t min_byte_size  = MinHeapSize;
 147   size_t max_byte_size  = MaxHeapSize;
 148   size_t heap_alignment = HeapAlignment;
 149 
 150   size_t reg_size_bytes = ShenandoahHeapRegion::region_size_bytes();
 151 
 152   Universe::check_alignment(max_byte_size,  reg_size_bytes, "Shenandoah heap");
 153   Universe::check_alignment(init_byte_size, reg_size_bytes, "Shenandoah heap");
 154 
 155   _num_regions = ShenandoahHeapRegion::region_count();
 156   assert(_num_regions == (max_byte_size / reg_size_bytes),
 157          "Regions should cover entire heap exactly: " SIZE_FORMAT " != " SIZE_FORMAT "/" SIZE_FORMAT,
 158          _num_regions, max_byte_size, reg_size_bytes);
 159 
 160   // Now we know the number of regions, initialize the heuristics.
 161   initialize_heuristics();
 162 
 163   size_t num_committed_regions = init_byte_size / reg_size_bytes;
 164   num_committed_regions = MIN2(num_committed_regions, _num_regions);
 165   assert(num_committed_regions <= _num_regions, "sanity");
 166   _initial_size = num_committed_regions * reg_size_bytes;
 167 
 168   size_t num_min_regions = min_byte_size / reg_size_bytes;
 169   num_min_regions = MIN2(num_min_regions, _num_regions);
 170   assert(num_min_regions <= _num_regions, "sanity");
 171   _minimum_size = num_min_regions * reg_size_bytes;
 172 
 173   // Default to max heap size.
 174   _soft_max_size = _num_regions * reg_size_bytes;
 175 
 176   _committed = _initial_size;
 177 
 178   size_t heap_page_size   = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
 179   size_t bitmap_page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
 180   size_t region_page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
 181 
 182   //
 183   // Reserve and commit memory for heap
 184   //
 185 
 186   ReservedHeapSpace heap_rs = Universe::reserve_heap(max_byte_size, heap_alignment);
 187   initialize_reserved_region(heap_rs);
 188   _heap_region = MemRegion((HeapWord*)heap_rs.base(), heap_rs.size() / HeapWordSize);
 189   _heap_region_special = heap_rs.special();
 190 
 191   assert((((size_t) base()) & ShenandoahHeapRegion::region_size_bytes_mask()) == 0,
 192          "Misaligned heap: " PTR_FORMAT, p2i(base()));
 193 
 194 #if SHENANDOAH_OPTIMIZED_MARKTASK
 195   // The optimized ShenandoahMarkTask takes some bits away from the full object bits.
 196   // Fail if we ever attempt to address more than we can.
 197   if ((uintptr_t)heap_rs.end() >= ShenandoahMarkTask::max_addressable()) {
 198     FormatBuffer<512> buf("Shenandoah reserved [" PTR_FORMAT ", " PTR_FORMAT") for the heap, \n"
 199                           "but max object address is " PTR_FORMAT ". Try to reduce heap size, or try other \n"
 200                           "VM options that allocate heap at lower addresses (HeapBaseMinAddress, AllocateHeapAt, etc).",
 201                 p2i(heap_rs.base()), p2i(heap_rs.end()), ShenandoahMarkTask::max_addressable());
 202     vm_exit_during_initialization("Fatal Error", buf);
 203   }
 204 #endif
 205 
 206   ReservedSpace sh_rs = heap_rs.first_part(max_byte_size);
 207   if (!_heap_region_special) {
 208     os::commit_memory_or_exit(sh_rs.base(), _initial_size, heap_alignment, false,
 209                               "Cannot commit heap memory");
 210   }
 211 
 212   //
 213   // Reserve and commit memory for bitmap(s)
 214   //
 215 
 216   _bitmap_size = ShenandoahMarkBitMap::compute_size(heap_rs.size());
 217   _bitmap_size = align_up(_bitmap_size, bitmap_page_size);
 218 
 219   size_t bitmap_bytes_per_region = reg_size_bytes / ShenandoahMarkBitMap::heap_map_factor();
 220 
 221   guarantee(bitmap_bytes_per_region != 0,
 222             "Bitmap bytes per region should not be zero");
 223   guarantee(is_power_of_2(bitmap_bytes_per_region),
 224             "Bitmap bytes per region should be power of two: " SIZE_FORMAT, bitmap_bytes_per_region);
 225 
 226   if (bitmap_page_size > bitmap_bytes_per_region) {
 227     _bitmap_regions_per_slice = bitmap_page_size / bitmap_bytes_per_region;
 228     _bitmap_bytes_per_slice = bitmap_page_size;
 229   } else {
 230     _bitmap_regions_per_slice = 1;
 231     _bitmap_bytes_per_slice = bitmap_bytes_per_region;
 232   }
 233 
 234   guarantee(_bitmap_regions_per_slice >= 1,
 235             "Should have at least one region per slice: " SIZE_FORMAT,
 236             _bitmap_regions_per_slice);
 237 
 238   guarantee(((_bitmap_bytes_per_slice) % bitmap_page_size) == 0,
 239             "Bitmap slices should be page-granular: bps = " SIZE_FORMAT ", page size = " SIZE_FORMAT,
 240             _bitmap_bytes_per_slice, bitmap_page_size);
 241 
 242   ReservedSpace bitmap(_bitmap_size, bitmap_page_size);
 243   MemTracker::record_virtual_memory_type(bitmap.base(), mtGC);
 244   _bitmap_region = MemRegion((HeapWord*) bitmap.base(), bitmap.size() / HeapWordSize);
 245   _bitmap_region_special = bitmap.special();
 246 
 247   size_t bitmap_init_commit = _bitmap_bytes_per_slice *
 248                               align_up(num_committed_regions, _bitmap_regions_per_slice) / _bitmap_regions_per_slice;
 249   bitmap_init_commit = MIN2(_bitmap_size, bitmap_init_commit);
 250   if (!_bitmap_region_special) {
 251     os::commit_memory_or_exit((char *) _bitmap_region.start(), bitmap_init_commit, bitmap_page_size, false,
 252                               "Cannot commit bitmap memory");
 253   }
 254 
 255   _marking_context = new ShenandoahMarkingContext(_heap_region, _bitmap_region, _num_regions, _max_workers);
 256 
 257   if (ShenandoahVerify) {
 258     ReservedSpace verify_bitmap(_bitmap_size, bitmap_page_size);
 259     if (!verify_bitmap.special()) {
 260       os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), bitmap_page_size, false,
 261                                 "Cannot commit verification bitmap memory");
 262     }
 263     MemTracker::record_virtual_memory_type(verify_bitmap.base(), mtGC);
 264     MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize);
 265     _verification_bit_map.initialize(_heap_region, verify_bitmap_region);
 266     _verifier = new ShenandoahVerifier(this, &_verification_bit_map);
 267   }
 268 
 269   // Reserve aux bitmap for use in object_iterate(). We don't commit it here.
 270   ReservedSpace aux_bitmap(_bitmap_size, bitmap_page_size);
 271   MemTracker::record_virtual_memory_type(aux_bitmap.base(), mtGC);
 272   _aux_bitmap_region = MemRegion((HeapWord*) aux_bitmap.base(), aux_bitmap.size() / HeapWordSize);
 273   _aux_bitmap_region_special = aux_bitmap.special();
 274   _aux_bit_map.initialize(_heap_region, _aux_bitmap_region);
 275 
 276   //
 277   // Create regions and region sets
 278   //
 279   size_t region_align = align_up(sizeof(ShenandoahHeapRegion), SHENANDOAH_CACHE_LINE_SIZE);
 280   size_t region_storage_size = align_up(region_align * _num_regions, region_page_size);
 281   region_storage_size = align_up(region_storage_size, os::vm_allocation_granularity());
 282 
 283   ReservedSpace region_storage(region_storage_size, region_page_size);
 284   MemTracker::record_virtual_memory_type(region_storage.base(), mtGC);
 285   if (!region_storage.special()) {
 286     os::commit_memory_or_exit(region_storage.base(), region_storage_size, region_page_size, false,
 287                               "Cannot commit region memory");
 288   }
 289 
 290   // Try to fit the collection set bitmap at lower addresses. This optimizes code generation for cset checks.
 291   // Go up until a sensible limit (subject to encoding constraints) and try to reserve the space there.
 292   // If not successful, bite a bullet and allocate at whatever address.
 293   {
 294     size_t cset_align = MAX2<size_t>(os::vm_page_size(), os::vm_allocation_granularity());
 295     size_t cset_size = align_up(((size_t) sh_rs.base() + sh_rs.size()) >> ShenandoahHeapRegion::region_size_bytes_shift(), cset_align);
 296 
 297     uintptr_t min = round_up_power_of_2(cset_align);
 298     uintptr_t max = (1u << 30u);
 299 
 300     for (uintptr_t addr = min; addr <= max; addr <<= 1u) {
 301       char* req_addr = (char*)addr;
 302       assert(is_aligned(req_addr, cset_align), "Should be aligned");
 303       ReservedSpace cset_rs(cset_size, cset_align, os::vm_page_size(), req_addr);
 304       if (cset_rs.is_reserved()) {
 305         assert(cset_rs.base() == req_addr, "Allocated where requested: " PTR_FORMAT ", " PTR_FORMAT, p2i(cset_rs.base()), addr);
 306         _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
 307         break;
 308       }
 309     }
 310 
 311     if (_collection_set == NULL) {
 312       ReservedSpace cset_rs(cset_size, cset_align, os::vm_page_size());
 313       _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
 314     }
 315   }
 316 
 317   _regions = NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, _num_regions, mtGC);
 318   _free_set = new ShenandoahFreeSet(this, _num_regions);
 319 
 320   {
 321     ShenandoahHeapLocker locker(lock());
 322 
 323     for (size_t i = 0; i < _num_regions; i++) {
 324       HeapWord* start = (HeapWord*)sh_rs.base() + ShenandoahHeapRegion::region_size_words() * i;
 325       bool is_committed = i < num_committed_regions;
 326       void* loc = region_storage.base() + i * region_align;
 327 
 328       ShenandoahHeapRegion* r = new (loc) ShenandoahHeapRegion(start, i, is_committed);
 329       assert(is_aligned(r, SHENANDOAH_CACHE_LINE_SIZE), "Sanity");
 330 
 331       _marking_context->initialize_top_at_mark_start(r);
 332       _regions[i] = r;
 333       assert(!collection_set()->is_in(i), "New region should not be in collection set");
 334     }
 335 
 336     // Initialize to complete
 337     _marking_context->mark_complete();
 338 
 339     _free_set->rebuild();
 340   }
 341 
 342   if (AlwaysPreTouch) {
 343     // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads,
 344     // before initialize() below zeroes it with initializing thread. For any given region,
 345     // we touch the region and the corresponding bitmaps from the same thread.
 346     ShenandoahPushWorkerScope scope(workers(), _max_workers, false);
 347 
 348     _pretouch_heap_page_size = heap_page_size;
 349     _pretouch_bitmap_page_size = bitmap_page_size;
 350 
 351 #ifdef LINUX
 352     // UseTransparentHugePages would madvise that backing memory can be coalesced into huge
 353     // pages. But, the kernel needs to know that every small page is used, in order to coalesce
 354     // them into huge one. Therefore, we need to pretouch with smaller pages.
 355     if (UseTransparentHugePages) {
 356       _pretouch_heap_page_size = (size_t)os::vm_page_size();
 357       _pretouch_bitmap_page_size = (size_t)os::vm_page_size();
 358     }
 359 #endif
 360 
 361     // OS memory managers may want to coalesce back-to-back pages. Make their jobs
 362     // simpler by pre-touching continuous spaces (heap and bitmap) separately.
 363 
 364     ShenandoahPretouchBitmapTask bcl(bitmap.base(), _bitmap_size, _pretouch_bitmap_page_size);
 365     _workers->run_task(&bcl);
 366 
 367     ShenandoahPretouchHeapTask hcl(_pretouch_heap_page_size);
 368     _workers->run_task(&hcl);
 369   }
 370 
 371   //
 372   // Initialize the rest of GC subsystems
 373   //
 374 
 375   _liveness_cache = NEW_C_HEAP_ARRAY(ShenandoahLiveData*, _max_workers, mtGC);
 376   for (uint worker = 0; worker < _max_workers; worker++) {
 377     _liveness_cache[worker] = NEW_C_HEAP_ARRAY(ShenandoahLiveData, _num_regions, mtGC);
 378     Copy::fill_to_bytes(_liveness_cache[worker], _num_regions * sizeof(ShenandoahLiveData));
 379   }
 380 
 381   // There should probably be Shenandoah-specific options for these,
 382   // just as there are G1-specific options.
 383   {
 384     ShenandoahSATBMarkQueueSet& satbqs = ShenandoahBarrierSet::satb_mark_queue_set();
 385     satbqs.set_process_completed_buffers_threshold(20); // G1SATBProcessCompletedThreshold
 386     satbqs.set_buffer_enqueue_threshold_percentage(60); // G1SATBBufferEnqueueingThresholdPercent
 387   }
 388 
 389   _monitoring_support = new ShenandoahMonitoringSupport(this);
 390   _phase_timings = new ShenandoahPhaseTimings(max_workers());
 391   ShenandoahCodeRoots::initialize();
 392 
 393   if (ShenandoahPacing) {
 394     _pacer = new ShenandoahPacer(this);
 395     _pacer->setup_for_idle();
 396   } else {
 397     _pacer = NULL;
 398   }
 399 
 400   _control_thread = new ShenandoahControlThread();
 401 
 402   ShenandoahInitLogger::print();
 403 
 404   return JNI_OK;
 405 }
 406 
 407 void ShenandoahHeap::initialize_mode() {
 408   if (ShenandoahGCMode != NULL) {
 409     if (strcmp(ShenandoahGCMode, "satb") == 0) {
 410       _gc_mode = new ShenandoahSATBMode();
 411     } else if (strcmp(ShenandoahGCMode, "iu") == 0) {
 412       _gc_mode = new ShenandoahIUMode();
 413     } else if (strcmp(ShenandoahGCMode, "passive") == 0) {
 414       _gc_mode = new ShenandoahPassiveMode();
 415     } else {
 416       vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option");
 417     }
 418   } else {
 419     vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option (null)");
 420   }
 421   _gc_mode->initialize_flags();
 422   if (_gc_mode->is_diagnostic() && !UnlockDiagnosticVMOptions) {
 423     vm_exit_during_initialization(
 424             err_msg("GC mode \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
 425                     _gc_mode->name()));
 426   }
 427   if (_gc_mode->is_experimental() && !UnlockExperimentalVMOptions) {
 428     vm_exit_during_initialization(
 429             err_msg("GC mode \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
 430                     _gc_mode->name()));
 431   }
 432 }
 433 
 434 void ShenandoahHeap::initialize_heuristics() {
 435   assert(_gc_mode != NULL, "Must be initialized");
 436   _heuristics = _gc_mode->initialize_heuristics();
 437 
 438   if (_heuristics->is_diagnostic() && !UnlockDiagnosticVMOptions) {
 439     vm_exit_during_initialization(
 440             err_msg("Heuristics \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
 441                     _heuristics->name()));
 442   }
 443   if (_heuristics->is_experimental() && !UnlockExperimentalVMOptions) {
 444     vm_exit_during_initialization(
 445             err_msg("Heuristics \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
 446                     _heuristics->name()));
 447   }
 448 }
 449 
 450 #ifdef _MSC_VER
 451 #pragma warning( push )
 452 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
 453 #endif
 454 
 455 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
 456   CollectedHeap(),
 457   _initial_size(0),
 458   _used(0),
 459   _committed(0),
 460   _bytes_allocated_since_gc_start(0),
 461   _max_workers(MAX2(ConcGCThreads, ParallelGCThreads)),
 462   _workers(NULL),
 463   _safepoint_workers(NULL),
 464   _heap_region_special(false),
 465   _num_regions(0),
 466   _regions(NULL),
 467   _update_refs_iterator(this),
 468   _gc_state_changed(false),
 469   _control_thread(NULL),
 470   _shenandoah_policy(policy),
 471   _gc_mode(NULL),
 472   _heuristics(NULL),
 473   _free_set(NULL),
 474   _pacer(NULL),
 475   _verifier(NULL),
 476   _phase_timings(NULL),
 477   _monitoring_support(NULL),
 478   _memory_pool(NULL),
 479   _stw_memory_manager("Shenandoah Pauses"),
 480   _cycle_memory_manager("Shenandoah Cycles"),
 481   _gc_timer(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
 482   _soft_ref_policy(),
 483   _log_min_obj_alignment_in_bytes(LogMinObjAlignmentInBytes),
 484   _ref_processor(new ShenandoahReferenceProcessor(MAX2(_max_workers, 1U))),
 485   _marking_context(NULL),
 486   _bitmap_size(0),
 487   _bitmap_regions_per_slice(0),
 488   _bitmap_bytes_per_slice(0),
 489   _bitmap_region_special(false),
 490   _aux_bitmap_region_special(false),
 491   _liveness_cache(NULL),
 492   _collection_set(NULL)
 493 {
 494   // Initialize GC mode early, so we can adjust barrier support
 495   initialize_mode();
 496   BarrierSet::set_barrier_set(new ShenandoahBarrierSet(this));
 497 
 498   _max_workers = MAX2(_max_workers, 1U);
 499   _workers = new ShenandoahWorkGang("Shenandoah GC Threads", _max_workers,
 500                             /* are_GC_task_threads */ true,
 501                             /* are_ConcurrentGC_threads */ true);
 502   if (_workers == NULL) {
 503     vm_exit_during_initialization("Failed necessary allocation.");
 504   } else {
 505     _workers->initialize_workers();
 506   }
 507 
 508   if (ParallelGCThreads > 1) {
 509     _safepoint_workers = new ShenandoahWorkGang("Safepoint Cleanup Thread",
 510                                                 ParallelGCThreads,
 511                       /* are_GC_task_threads */ false,
 512                  /* are_ConcurrentGC_threads */ false);
 513     _safepoint_workers->initialize_workers();
 514   }
 515 }
 516 
 517 #ifdef _MSC_VER
 518 #pragma warning( pop )
 519 #endif
 520 
 521 class ShenandoahResetBitmapTask : public AbstractGangTask {
 522 private:
 523   ShenandoahRegionIterator _regions;
 524 
 525 public:
 526   ShenandoahResetBitmapTask() :
 527     AbstractGangTask("Shenandoah Reset Bitmap") {}
 528 
 529   void work(uint worker_id) {
 530     ShenandoahHeapRegion* region = _regions.next();
 531     ShenandoahHeap* heap = ShenandoahHeap::heap();
 532     ShenandoahMarkingContext* const ctx = heap->marking_context();
 533     while (region != NULL) {
 534       if (heap->is_bitmap_slice_committed(region)) {
 535         ctx->clear_bitmap(region);
 536       }
 537       region = _regions.next();
 538     }
 539   }
 540 };
 541 
 542 void ShenandoahHeap::reset_mark_bitmap() {
 543   assert_gc_workers(_workers->active_workers());
 544   mark_incomplete_marking_context();
 545 
 546   ShenandoahResetBitmapTask task;
 547   _workers->run_task(&task);
 548 }
 549 
 550 void ShenandoahHeap::print_on(outputStream* st) const {
 551   st->print_cr("Shenandoah Heap");
 552   st->print_cr(" " SIZE_FORMAT "%s max, " SIZE_FORMAT "%s soft max, " SIZE_FORMAT "%s committed, " SIZE_FORMAT "%s used",
 553                byte_size_in_proper_unit(max_capacity()), proper_unit_for_byte_size(max_capacity()),
 554                byte_size_in_proper_unit(soft_max_capacity()), proper_unit_for_byte_size(soft_max_capacity()),
 555                byte_size_in_proper_unit(committed()),    proper_unit_for_byte_size(committed()),
 556                byte_size_in_proper_unit(used()),         proper_unit_for_byte_size(used()));
 557   st->print_cr(" " SIZE_FORMAT " x " SIZE_FORMAT"%s regions",
 558                num_regions(),
 559                byte_size_in_proper_unit(ShenandoahHeapRegion::region_size_bytes()),
 560                proper_unit_for_byte_size(ShenandoahHeapRegion::region_size_bytes()));
 561 
 562   st->print("Status: ");
 563   if (has_forwarded_objects())                 st->print("has forwarded objects, ");
 564   if (is_concurrent_mark_in_progress())        st->print("marking, ");
 565   if (is_evacuation_in_progress())             st->print("evacuating, ");
 566   if (is_update_refs_in_progress())            st->print("updating refs, ");
 567   if (is_degenerated_gc_in_progress())         st->print("degenerated gc, ");
 568   if (is_full_gc_in_progress())                st->print("full gc, ");
 569   if (is_full_gc_move_in_progress())           st->print("full gc move, ");
 570   if (is_concurrent_weak_root_in_progress())   st->print("concurrent weak roots, ");
 571   if (is_concurrent_strong_root_in_progress() &&
 572       !is_concurrent_weak_root_in_progress())  st->print("concurrent strong roots, ");
 573 
 574   if (cancelled_gc()) {
 575     st->print("cancelled");
 576   } else {
 577     st->print("not cancelled");
 578   }
 579   st->cr();
 580 
 581   st->print_cr("Reserved region:");
 582   st->print_cr(" - [" PTR_FORMAT ", " PTR_FORMAT ") ",
 583                p2i(reserved_region().start()),
 584                p2i(reserved_region().end()));
 585 
 586   ShenandoahCollectionSet* cset = collection_set();
 587   st->print_cr("Collection set:");
 588   if (cset != NULL) {
 589     st->print_cr(" - map (vanilla): " PTR_FORMAT, p2i(cset->map_address()));
 590     st->print_cr(" - map (biased):  " PTR_FORMAT, p2i(cset->biased_map_address()));
 591   } else {
 592     st->print_cr(" (NULL)");
 593   }
 594 
 595   st->cr();
 596   MetaspaceUtils::print_on(st);
 597 
 598   if (Verbose) {
 599     st->cr();
 600     print_heap_regions_on(st);
 601   }
 602 }
 603 
 604 class ShenandoahInitWorkerGCLABClosure : public ThreadClosure {
 605 public:
 606   void do_thread(Thread* thread) {
 607     assert(thread != NULL, "Sanity");
 608     assert(thread->is_Worker_thread(), "Only worker thread expected");
 609     ShenandoahThreadLocalData::initialize_gclab(thread);
 610   }
 611 };
 612 
 613 void ShenandoahHeap::post_initialize() {
 614   CollectedHeap::post_initialize();
 615   MutexLocker ml(Threads_lock);
 616 
 617   ShenandoahInitWorkerGCLABClosure init_gclabs;
 618   _workers->threads_do(&init_gclabs);
 619 
 620   // gclab can not be initialized early during VM startup, as it can not determinate its max_size.
 621   // Now, we will let WorkGang to initialize gclab when new worker is created.
 622   _workers->set_initialize_gclab();
 623   if (_safepoint_workers != NULL) {
 624     _safepoint_workers->threads_do(&init_gclabs);
 625     _safepoint_workers->set_initialize_gclab();
 626   }
 627 
 628   _heuristics->initialize();
 629 
 630   JFR_ONLY(ShenandoahJFRSupport::register_jfr_type_serializers());
 631 }
 632 
 633 size_t ShenandoahHeap::used() const {
 634   return Atomic::load(&_used);
 635 }
 636 
 637 size_t ShenandoahHeap::committed() const {
 638   return Atomic::load(&_committed);
 639 }
 640 
 641 void ShenandoahHeap::increase_committed(size_t bytes) {
 642   shenandoah_assert_heaplocked_or_safepoint();
 643   _committed += bytes;
 644 }
 645 
 646 void ShenandoahHeap::decrease_committed(size_t bytes) {
 647   shenandoah_assert_heaplocked_or_safepoint();
 648   _committed -= bytes;
 649 }
 650 
 651 void ShenandoahHeap::increase_used(size_t bytes) {
 652   Atomic::add(&_used, bytes, memory_order_relaxed);
 653 }
 654 
 655 void ShenandoahHeap::set_used(size_t bytes) {
 656   Atomic::store(&_used, bytes);
 657 }
 658 
 659 void ShenandoahHeap::decrease_used(size_t bytes) {
 660   assert(used() >= bytes, "never decrease heap size by more than we've left");
 661   Atomic::sub(&_used, bytes, memory_order_relaxed);
 662 }
 663 
 664 void ShenandoahHeap::increase_allocated(size_t bytes) {
 665   Atomic::add(&_bytes_allocated_since_gc_start, bytes, memory_order_relaxed);
 666 }
 667 
 668 void ShenandoahHeap::notify_mutator_alloc_words(size_t words, bool waste) {
 669   size_t bytes = words * HeapWordSize;
 670   if (!waste) {
 671     increase_used(bytes);
 672   }
 673   increase_allocated(bytes);
 674   if (ShenandoahPacing) {
 675     control_thread()->pacing_notify_alloc(words);
 676     if (waste) {
 677       pacer()->claim_for_alloc(words, true);
 678     }
 679   }
 680 }
 681 
 682 size_t ShenandoahHeap::capacity() const {
 683   return committed();
 684 }
 685 
 686 size_t ShenandoahHeap::max_capacity() const {
 687   return _num_regions * ShenandoahHeapRegion::region_size_bytes();
 688 }
 689 
 690 size_t ShenandoahHeap::soft_max_capacity() const {
 691   size_t v = Atomic::load(&_soft_max_size);
 692   assert(min_capacity() <= v && v <= max_capacity(),
 693          "Should be in bounds: " SIZE_FORMAT " <= " SIZE_FORMAT " <= " SIZE_FORMAT,
 694          min_capacity(), v, max_capacity());
 695   return v;
 696 }
 697 
 698 void ShenandoahHeap::set_soft_max_capacity(size_t v) {
 699   assert(min_capacity() <= v && v <= max_capacity(),
 700          "Should be in bounds: " SIZE_FORMAT " <= " SIZE_FORMAT " <= " SIZE_FORMAT,
 701          min_capacity(), v, max_capacity());
 702   Atomic::store(&_soft_max_size, v);
 703 }
 704 
 705 size_t ShenandoahHeap::min_capacity() const {
 706   return _minimum_size;
 707 }
 708 
 709 size_t ShenandoahHeap::initial_capacity() const {
 710   return _initial_size;
 711 }
 712 
 713 bool ShenandoahHeap::is_in(const void* p) const {
 714   HeapWord* heap_base = (HeapWord*) base();
 715   HeapWord* last_region_end = heap_base + ShenandoahHeapRegion::region_size_words() * num_regions();
 716   return p >= heap_base && p < last_region_end;
 717 }
 718 
 719 void ShenandoahHeap::op_uncommit(double shrink_before, size_t shrink_until) {
 720   assert (ShenandoahUncommit, "should be enabled");
 721 
 722   // Application allocates from the beginning of the heap, and GC allocates at
 723   // the end of it. It is more efficient to uncommit from the end, so that applications
 724   // could enjoy the near committed regions. GC allocations are much less frequent,
 725   // and therefore can accept the committing costs.
 726 
 727   size_t count = 0;
 728   for (size_t i = num_regions(); i > 0; i--) { // care about size_t underflow
 729     ShenandoahHeapRegion* r = get_region(i - 1);
 730     if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
 731       ShenandoahHeapLocker locker(lock());
 732       if (r->is_empty_committed()) {
 733         if (committed() < shrink_until + ShenandoahHeapRegion::region_size_bytes()) {
 734           break;
 735         }
 736 
 737         r->make_uncommitted();
 738         count++;
 739       }
 740     }
 741     SpinPause(); // allow allocators to take the lock
 742   }
 743 
 744   if (count > 0) {
 745     control_thread()->notify_heap_changed();
 746   }
 747 }
 748 
 749 HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) {
 750   // New object should fit the GCLAB size
 751   size_t min_size = MAX2(size, PLAB::min_size());
 752 
 753   // Figure out size of new GCLAB, looking back at heuristics. Expand aggressively.
 754   size_t new_size = ShenandoahThreadLocalData::gclab_size(thread) * 2;
 755   new_size = MIN2(new_size, PLAB::max_size());
 756   new_size = MAX2(new_size, PLAB::min_size());
 757 
 758   // Record new heuristic value even if we take any shortcut. This captures
 759   // the case when moderately-sized objects always take a shortcut. At some point,
 760   // heuristics should catch up with them.
 761   ShenandoahThreadLocalData::set_gclab_size(thread, new_size);
 762 
 763   if (new_size < size) {
 764     // New size still does not fit the object. Fall back to shared allocation.
 765     // This avoids retiring perfectly good GCLABs, when we encounter a large object.
 766     return NULL;
 767   }
 768 
 769   // Retire current GCLAB, and allocate a new one.
 770   PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
 771   gclab->retire();
 772 
 773   size_t actual_size = 0;
 774   HeapWord* gclab_buf = allocate_new_gclab(min_size, new_size, &actual_size);
 775   if (gclab_buf == NULL) {
 776     return NULL;
 777   }
 778 
 779   assert (size <= actual_size, "allocation should fit");
 780 
 781   if (ZeroTLAB) {
 782     // ..and clear it.
 783     Copy::zero_to_words(gclab_buf, actual_size);
 784   } else {
 785     // ...and zap just allocated object.
 786 #ifdef ASSERT
 787     // Skip mangling the space corresponding to the object header to
 788     // ensure that the returned space is not considered parsable by
 789     // any concurrent GC thread.
 790     size_t hdr_size = oopDesc::header_size();
 791     Copy::fill_to_words(gclab_buf + hdr_size, actual_size - hdr_size, badHeapWordVal);
 792 #endif // ASSERT
 793   }
 794   gclab->set_buf(gclab_buf, actual_size);
 795   return gclab->allocate(size);
 796 }
 797 
 798 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t min_size,
 799                                             size_t requested_size,
 800                                             size_t* actual_size) {
 801   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_tlab(min_size, requested_size);
 802   HeapWord* res = allocate_memory(req);
 803   if (res != NULL) {
 804     *actual_size = req.actual_size();
 805   } else {
 806     *actual_size = 0;
 807   }
 808   return res;
 809 }
 810 
 811 HeapWord* ShenandoahHeap::allocate_new_gclab(size_t min_size,
 812                                              size_t word_size,
 813                                              size_t* actual_size) {
 814   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_gclab(min_size, word_size);
 815   HeapWord* res = allocate_memory(req);
 816   if (res != NULL) {
 817     *actual_size = req.actual_size();
 818   } else {
 819     *actual_size = 0;
 820   }
 821   return res;
 822 }
 823 
 824 HeapWord* ShenandoahHeap::allocate_memory(ShenandoahAllocRequest& req) {
 825   intptr_t pacer_epoch = 0;
 826   bool in_new_region = false;
 827   HeapWord* result = NULL;
 828 
 829   if (req.is_mutator_alloc()) {
 830     if (ShenandoahPacing) {
 831       pacer()->pace_for_alloc(req.size());
 832       pacer_epoch = pacer()->epoch();
 833     }
 834 
 835     if (!ShenandoahAllocFailureALot || !should_inject_alloc_failure()) {
 836       result = allocate_memory_under_lock(req, in_new_region);
 837     }
 838 
 839     // Allocation failed, block until control thread reacted, then retry allocation.
 840     //
 841     // It might happen that one of the threads requesting allocation would unblock
 842     // way later after GC happened, only to fail the second allocation, because
 843     // other threads have already depleted the free storage. In this case, a better
 844     // strategy is to try again, as long as GC makes progress.
 845     //
 846     // Then, we need to make sure the allocation was retried after at least one
 847     // Full GC, which means we want to try more than ShenandoahFullGCThreshold times.
 848 
 849     size_t tries = 0;
 850 
 851     while (result == NULL && _progress_last_gc.is_set()) {
 852       tries++;
 853       control_thread()->handle_alloc_failure(req);
 854       result = allocate_memory_under_lock(req, in_new_region);
 855     }
 856 
 857     while (result == NULL && tries <= ShenandoahFullGCThreshold) {
 858       tries++;
 859       control_thread()->handle_alloc_failure(req);
 860       result = allocate_memory_under_lock(req, in_new_region);
 861     }
 862 
 863   } else {
 864     assert(req.is_gc_alloc(), "Can only accept GC allocs here");
 865     result = allocate_memory_under_lock(req, in_new_region);
 866     // Do not call handle_alloc_failure() here, because we cannot block.
 867     // The allocation failure would be handled by the LRB slowpath with handle_alloc_failure_evac().
 868   }
 869 
 870   if (in_new_region) {
 871     control_thread()->notify_heap_changed();
 872   }
 873 
 874   if (result != NULL) {
 875     size_t requested = req.size();
 876     size_t actual = req.actual_size();
 877 
 878     assert (req.is_lab_alloc() || (requested == actual),
 879             "Only LAB allocations are elastic: %s, requested = " SIZE_FORMAT ", actual = " SIZE_FORMAT,
 880             ShenandoahAllocRequest::alloc_type_to_string(req.type()), requested, actual);
 881 
 882     if (req.is_mutator_alloc()) {
 883       notify_mutator_alloc_words(actual, false);
 884 
 885       // If we requested more than we were granted, give the rest back to pacer.
 886       // This only matters if we are in the same pacing epoch: do not try to unpace
 887       // over the budget for the other phase.
 888       if (ShenandoahPacing && (pacer_epoch > 0) && (requested > actual)) {
 889         pacer()->unpace_for_alloc(pacer_epoch, requested - actual);
 890       }
 891     } else {
 892       increase_used(actual*HeapWordSize);
 893     }
 894   }
 895 
 896   return result;
 897 }
 898 
 899 HeapWord* ShenandoahHeap::allocate_memory_under_lock(ShenandoahAllocRequest& req, bool& in_new_region) {
 900   ShenandoahHeapLocker locker(lock());
 901   return _free_set->allocate(req, in_new_region);
 902 }
 903 
 904 HeapWord* ShenandoahHeap::mem_allocate(size_t size,
 905                                         bool*  gc_overhead_limit_was_exceeded) {
 906   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared(size);
 907   return allocate_memory(req);
 908 }
 909 
 910 MetaWord* ShenandoahHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
 911                                                              size_t size,
 912                                                              Metaspace::MetadataType mdtype) {
 913   MetaWord* result;
 914 
 915   // Inform metaspace OOM to GC heuristics if class unloading is possible.
 916   if (heuristics()->can_unload_classes()) {
 917     ShenandoahHeuristics* h = heuristics();
 918     h->record_metaspace_oom();
 919   }
 920 
 921   // Expand and retry allocation
 922   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
 923   if (result != NULL) {
 924     return result;
 925   }
 926 
 927   // Start full GC
 928   collect(GCCause::_metadata_GC_clear_soft_refs);
 929 
 930   // Retry allocation
 931   result = loader_data->metaspace_non_null()->allocate(size, mdtype);
 932   if (result != NULL) {
 933     return result;
 934   }
 935 
 936   // Expand and retry allocation
 937   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
 938   if (result != NULL) {
 939     return result;
 940   }
 941 
 942   // Out of memory
 943   return NULL;
 944 }
 945 
 946 class ShenandoahConcurrentEvacuateRegionObjectClosure : public ObjectClosure {
 947 private:
 948   ShenandoahHeap* const _heap;
 949   Thread* const _thread;
 950 public:
 951   ShenandoahConcurrentEvacuateRegionObjectClosure(ShenandoahHeap* heap) :
 952     _heap(heap), _thread(Thread::current()) {}
 953 
 954   void do_object(oop p) {
 955     shenandoah_assert_marked(NULL, p);
 956     if (!p->is_forwarded()) {
 957       _heap->evacuate_object(p, _thread);
 958     }
 959   }
 960 };
 961 
 962 class ShenandoahEvacuationTask : public AbstractGangTask {
 963 private:
 964   ShenandoahHeap* const _sh;
 965   ShenandoahCollectionSet* const _cs;
 966   bool _concurrent;
 967 public:
 968   ShenandoahEvacuationTask(ShenandoahHeap* sh,
 969                            ShenandoahCollectionSet* cs,
 970                            bool concurrent) :
 971     AbstractGangTask("Shenandoah Evacuation"),
 972     _sh(sh),
 973     _cs(cs),
 974     _concurrent(concurrent)
 975   {}
 976 
 977   void work(uint worker_id) {
 978     if (_concurrent) {
 979       ShenandoahConcurrentWorkerSession worker_session(worker_id);
 980       ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
 981       ShenandoahEvacOOMScope oom_evac_scope;
 982       do_work();
 983     } else {
 984       ShenandoahParallelWorkerSession worker_session(worker_id);
 985       ShenandoahEvacOOMScope oom_evac_scope;
 986       do_work();
 987     }
 988   }
 989 
 990 private:
 991   void do_work() {
 992     ShenandoahConcurrentEvacuateRegionObjectClosure cl(_sh);
 993     ShenandoahHeapRegion* r;
 994     while ((r =_cs->claim_next()) != NULL) {
 995       assert(r->has_live(), "Region " SIZE_FORMAT " should have been reclaimed early", r->index());
 996       _sh->marked_object_iterate(r, &cl);
 997 
 998       if (ShenandoahPacing) {
 999         _sh->pacer()->report_evac(r->used() >> LogHeapWordSize);
1000       }
1001 
1002       if (_sh->check_cancelled_gc_and_yield(_concurrent)) {
1003         break;
1004       }
1005     }
1006   }
1007 };
1008 
1009 void ShenandoahHeap::evacuate_collection_set(bool concurrent) {
1010   ShenandoahEvacuationTask task(this, _collection_set, concurrent);
1011   workers()->run_task(&task);
1012 }
1013 
1014 void ShenandoahHeap::trash_cset_regions() {
1015   ShenandoahHeapLocker locker(lock());
1016 
1017   ShenandoahCollectionSet* set = collection_set();
1018   ShenandoahHeapRegion* r;
1019   set->clear_current_index();
1020   while ((r = set->next()) != NULL) {
1021     r->make_trash();
1022   }
1023   collection_set()->clear();
1024 }
1025 
1026 void ShenandoahHeap::print_heap_regions_on(outputStream* st) const {
1027   st->print_cr("Heap Regions:");
1028   st->print_cr("Region state: EU=empty-uncommitted, EC=empty-committed, R=regular, H=humongous start, HP=pinned humongous start");
1029   st->print_cr("              HC=humongous continuation, CS=collection set, TR=trash, P=pinned, CSP=pinned collection set");
1030   st->print_cr("BTE=bottom/top/end, TAMS=top-at-mark-start");
1031   st->print_cr("UWM=update watermark, U=used");
1032   st->print_cr("T=TLAB allocs, G=GCLAB allocs");
1033   st->print_cr("S=shared allocs, L=live data");
1034   st->print_cr("CP=critical pins");
1035 
1036   for (size_t i = 0; i < num_regions(); i++) {
1037     get_region(i)->print_on(st);
1038   }
1039 }
1040 
1041 void ShenandoahHeap::trash_humongous_region_at(ShenandoahHeapRegion* start) {
1042   assert(start->is_humongous_start(), "reclaim regions starting with the first one");
1043 
1044   oop humongous_obj = cast_to_oop(start->bottom());
1045   size_t size = humongous_obj->size();
1046   size_t required_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize);
1047   size_t index = start->index() + required_regions - 1;
1048 
1049   assert(!start->has_live(), "liveness must be zero");
1050 
1051   for(size_t i = 0; i < required_regions; i++) {
1052     // Reclaim from tail. Otherwise, assertion fails when printing region to trace log,
1053     // as it expects that every region belongs to a humongous region starting with a humongous start region.
1054     ShenandoahHeapRegion* region = get_region(index --);
1055 
1056     assert(region->is_humongous(), "expect correct humongous start or continuation");
1057     assert(!region->is_cset(), "Humongous region should not be in collection set");
1058 
1059     region->make_trash_immediate();
1060   }
1061 }
1062 
1063 class ShenandoahCheckCleanGCLABClosure : public ThreadClosure {
1064 public:
1065   ShenandoahCheckCleanGCLABClosure() {}
1066   void do_thread(Thread* thread) {
1067     PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1068     assert(gclab != NULL, "GCLAB should be initialized for %s", thread->name());
1069     assert(gclab->words_remaining() == 0, "GCLAB should not need retirement");
1070   }
1071 };
1072 
1073 class ShenandoahRetireGCLABClosure : public ThreadClosure {
1074 private:
1075   bool const _resize;
1076 public:
1077   ShenandoahRetireGCLABClosure(bool resize) : _resize(resize) {}
1078   void do_thread(Thread* thread) {
1079     PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1080     assert(gclab != NULL, "GCLAB should be initialized for %s", thread->name());
1081     gclab->retire();
1082     if (_resize && ShenandoahThreadLocalData::gclab_size(thread) > 0) {
1083       ShenandoahThreadLocalData::set_gclab_size(thread, 0);
1084     }
1085   }
1086 };
1087 
1088 void ShenandoahHeap::labs_make_parsable() {
1089   assert(UseTLAB, "Only call with UseTLAB");
1090 
1091   ShenandoahRetireGCLABClosure cl(false);
1092 
1093   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1094     ThreadLocalAllocBuffer& tlab = t->tlab();
1095     tlab.make_parsable();
1096     cl.do_thread(t);
1097   }
1098 
1099   workers()->threads_do(&cl);
1100 }
1101 
1102 void ShenandoahHeap::tlabs_retire(bool resize) {
1103   assert(UseTLAB, "Only call with UseTLAB");
1104   assert(!resize || ResizeTLAB, "Only call for resize when ResizeTLAB is enabled");
1105 
1106   ThreadLocalAllocStats stats;
1107 
1108   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1109     ThreadLocalAllocBuffer& tlab = t->tlab();
1110     tlab.retire(&stats);
1111     if (resize) {
1112       tlab.resize();
1113     }
1114   }
1115 
1116   stats.publish();
1117 
1118 #ifdef ASSERT
1119   ShenandoahCheckCleanGCLABClosure cl;
1120   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1121     cl.do_thread(t);
1122   }
1123   workers()->threads_do(&cl);
1124 #endif
1125 }
1126 
1127 void ShenandoahHeap::gclabs_retire(bool resize) {
1128   assert(UseTLAB, "Only call with UseTLAB");
1129   assert(!resize || ResizeTLAB, "Only call for resize when ResizeTLAB is enabled");
1130 
1131   ShenandoahRetireGCLABClosure cl(resize);
1132   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1133     cl.do_thread(t);
1134   }
1135   workers()->threads_do(&cl);
1136 
1137   if (safepoint_workers() != NULL) {
1138     safepoint_workers()->threads_do(&cl);
1139   }
1140 }
1141 
1142 // Returns size in bytes
1143 size_t ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const {
1144   if (ShenandoahElasticTLAB) {
1145     // With Elastic TLABs, return the max allowed size, and let the allocation path
1146     // figure out the safe size for current allocation.
1147     return ShenandoahHeapRegion::max_tlab_size_bytes();
1148   } else {
1149     return MIN2(_free_set->unsafe_peek_free(), ShenandoahHeapRegion::max_tlab_size_bytes());
1150   }
1151 }
1152 
1153 size_t ShenandoahHeap::max_tlab_size() const {
1154   // Returns size in words
1155   return ShenandoahHeapRegion::max_tlab_size_words();
1156 }
1157 
1158 void ShenandoahHeap::collect(GCCause::Cause cause) {
1159   control_thread()->request_gc(cause);
1160 }
1161 
1162 void ShenandoahHeap::do_full_collection(bool clear_all_soft_refs) {
1163   //assert(false, "Shouldn't need to do full collections");
1164 }
1165 
1166 HeapWord* ShenandoahHeap::block_start(const void* addr) const {
1167   ShenandoahHeapRegion* r = heap_region_containing(addr);
1168   if (r != NULL) {
1169     return r->block_start(addr);
1170   }
1171   return NULL;
1172 }
1173 
1174 bool ShenandoahHeap::block_is_obj(const HeapWord* addr) const {
1175   ShenandoahHeapRegion* r = heap_region_containing(addr);
1176   return r->block_is_obj(addr);
1177 }
1178 
1179 bool ShenandoahHeap::print_location(outputStream* st, void* addr) const {
1180   return BlockLocationPrinter<ShenandoahHeap>::print_location(st, addr);
1181 }
1182 
1183 void ShenandoahHeap::prepare_for_verify() {
1184   if (SafepointSynchronize::is_at_safepoint() && UseTLAB) {
1185     labs_make_parsable();
1186   }
1187 }
1188 
1189 void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const {
1190   tcl->do_thread(_control_thread);
1191   workers()->threads_do(tcl);
1192   if (_safepoint_workers != NULL) {
1193     _safepoint_workers->threads_do(tcl);
1194   }
1195   if (ShenandoahStringDedup::is_enabled()) {
1196     ShenandoahStringDedup::threads_do(tcl);
1197   }
1198 }
1199 
1200 void ShenandoahHeap::print_tracing_info() const {
1201   LogTarget(Info, gc, stats) lt;
1202   if (lt.is_enabled()) {
1203     ResourceMark rm;
1204     LogStream ls(lt);
1205 
1206     phase_timings()->print_global_on(&ls);
1207 
1208     ls.cr();
1209     ls.cr();
1210 
1211     shenandoah_policy()->print_gc_stats(&ls);
1212 
1213     ls.cr();
1214     ls.cr();
1215   }
1216 }
1217 
1218 void ShenandoahHeap::verify(VerifyOption vo) {
1219   if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
1220     if (ShenandoahVerify) {
1221       verifier()->verify_generic(vo);
1222     } else {
1223       // TODO: Consider allocating verification bitmaps on demand,
1224       // and turn this on unconditionally.
1225     }
1226   }
1227 }
1228 size_t ShenandoahHeap::tlab_capacity(Thread *thr) const {
1229   return _free_set->capacity();
1230 }
1231 
1232 class ObjectIterateScanRootClosure : public BasicOopIterateClosure {
1233 private:
1234   MarkBitMap* _bitmap;
1235   ShenandoahScanObjectStack* _oop_stack;
1236   ShenandoahHeap* const _heap;
1237   ShenandoahMarkingContext* const _marking_context;
1238 
1239   template <class T>
1240   void do_oop_work(T* p) {
1241     T o = RawAccess<>::oop_load(p);
1242     if (!CompressedOops::is_null(o)) {
1243       oop obj = CompressedOops::decode_not_null(o);
1244       if (_heap->is_concurrent_weak_root_in_progress() && !_marking_context->is_marked(obj)) {
1245         // There may be dead oops in weak roots in concurrent root phase, do not touch them.
1246         return;
1247       }
1248       obj = ShenandoahBarrierSet::barrier_set()->load_reference_barrier(obj);
1249 
1250       assert(oopDesc::is_oop(obj), "must be a valid oop");
1251       if (!_bitmap->is_marked(obj)) {
1252         _bitmap->mark(obj);
1253         _oop_stack->push(obj);
1254       }
1255     }
1256   }
1257 public:
1258   ObjectIterateScanRootClosure(MarkBitMap* bitmap, ShenandoahScanObjectStack* oop_stack) :
1259     _bitmap(bitmap), _oop_stack(oop_stack), _heap(ShenandoahHeap::heap()),
1260     _marking_context(_heap->marking_context()) {}
1261   void do_oop(oop* p)       { do_oop_work(p); }
1262   void do_oop(narrowOop* p) { do_oop_work(p); }
1263 };
1264 
1265 /*
1266  * This is public API, used in preparation of object_iterate().
1267  * Since we don't do linear scan of heap in object_iterate() (see comment below), we don't
1268  * need to make the heap parsable. For Shenandoah-internal linear heap scans that we can
1269  * control, we call SH::tlabs_retire, SH::gclabs_retire.
1270  */
1271 void ShenandoahHeap::ensure_parsability(bool retire_tlabs) {
1272   // No-op.
1273 }
1274 
1275 /*
1276  * Iterates objects in the heap. This is public API, used for, e.g., heap dumping.
1277  *
1278  * We cannot safely iterate objects by doing a linear scan at random points in time. Linear
1279  * scanning needs to deal with dead objects, which may have dead Klass* pointers (e.g.
1280  * calling oopDesc::size() would crash) or dangling reference fields (crashes) etc. Linear
1281  * scanning therefore depends on having a valid marking bitmap to support it. However, we only
1282  * have a valid marking bitmap after successful marking. In particular, we *don't* have a valid
1283  * marking bitmap during marking, after aborted marking or during/after cleanup (when we just
1284  * wiped the bitmap in preparation for next marking).
1285  *
1286  * For all those reasons, we implement object iteration as a single marking traversal, reporting
1287  * objects as we mark+traverse through the heap, starting from GC roots. JVMTI IterateThroughHeap
1288  * is allowed to report dead objects, but is not required to do so.
1289  */
1290 void ShenandoahHeap::object_iterate(ObjectClosure* cl) {
1291   // Reset bitmap
1292   if (!prepare_aux_bitmap_for_iteration())
1293     return;
1294 
1295   ShenandoahScanObjectStack oop_stack;
1296   ObjectIterateScanRootClosure oops(&_aux_bit_map, &oop_stack);
1297   // Seed the stack with root scan
1298   scan_roots_for_iteration(&oop_stack, &oops);
1299 
1300   // Work through the oop stack to traverse heap
1301   while (! oop_stack.is_empty()) {
1302     oop obj = oop_stack.pop();
1303     assert(oopDesc::is_oop(obj), "must be a valid oop");
1304     cl->do_object(obj);
1305     obj->oop_iterate(&oops);
1306   }
1307 
1308   assert(oop_stack.is_empty(), "should be empty");
1309   // Reclaim bitmap
1310   reclaim_aux_bitmap_for_iteration();
1311 }
1312 
1313 bool ShenandoahHeap::prepare_aux_bitmap_for_iteration() {
1314   assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1315 
1316   if (!_aux_bitmap_region_special && !os::commit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size(), false)) {
1317     log_warning(gc)("Could not commit native memory for auxiliary marking bitmap for heap iteration");
1318     return false;
1319   }
1320   // Reset bitmap
1321   _aux_bit_map.clear();
1322   return true;
1323 }
1324 
1325 void ShenandoahHeap::scan_roots_for_iteration(ShenandoahScanObjectStack* oop_stack, ObjectIterateScanRootClosure* oops) {
1326   // Process GC roots according to current GC cycle
1327   // This populates the work stack with initial objects
1328   // It is important to relinquish the associated locks before diving
1329   // into heap dumper
1330   ShenandoahHeapIterationRootScanner rp;
1331   rp.roots_do(oops);
1332 }
1333 
1334 void ShenandoahHeap::reclaim_aux_bitmap_for_iteration() {
1335   if (!_aux_bitmap_region_special && !os::uncommit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size())) {
1336     log_warning(gc)("Could not uncommit native memory for auxiliary marking bitmap for heap iteration");
1337   }
1338 }
1339 
1340 // Closure for parallelly iterate objects
1341 class ShenandoahObjectIterateParScanClosure : public BasicOopIterateClosure {
1342 private:
1343   MarkBitMap* _bitmap;
1344   ShenandoahObjToScanQueue* _queue;
1345   ShenandoahHeap* const _heap;
1346   ShenandoahMarkingContext* const _marking_context;
1347 
1348   template <class T>
1349   void do_oop_work(T* p) {
1350     T o = RawAccess<>::oop_load(p);
1351     if (!CompressedOops::is_null(o)) {
1352       oop obj = CompressedOops::decode_not_null(o);
1353       if (_heap->is_concurrent_weak_root_in_progress() && !_marking_context->is_marked(obj)) {
1354         // There may be dead oops in weak roots in concurrent root phase, do not touch them.
1355         return;
1356       }
1357       obj = ShenandoahBarrierSet::barrier_set()->load_reference_barrier(obj);
1358 
1359       assert(oopDesc::is_oop(obj), "Must be a valid oop");
1360       if (_bitmap->par_mark(obj)) {
1361         _queue->push(ShenandoahMarkTask(obj));
1362       }
1363     }
1364   }
1365 public:
1366   ShenandoahObjectIterateParScanClosure(MarkBitMap* bitmap, ShenandoahObjToScanQueue* q) :
1367     _bitmap(bitmap), _queue(q), _heap(ShenandoahHeap::heap()),
1368     _marking_context(_heap->marking_context()) {}
1369   void do_oop(oop* p)       { do_oop_work(p); }
1370   void do_oop(narrowOop* p) { do_oop_work(p); }
1371 };
1372 
1373 // Object iterator for parallel heap iteraion.
1374 // The root scanning phase happenes in construction as a preparation of
1375 // parallel marking queues.
1376 // Every worker processes it's own marking queue. work-stealing is used
1377 // to balance workload.
1378 class ShenandoahParallelObjectIterator : public ParallelObjectIteratorImpl {
1379 private:
1380   uint                         _num_workers;
1381   bool                         _init_ready;
1382   MarkBitMap*                  _aux_bit_map;
1383   ShenandoahHeap*              _heap;
1384   ShenandoahScanObjectStack    _roots_stack; // global roots stack
1385   ShenandoahObjToScanQueueSet* _task_queues;
1386 public:
1387   ShenandoahParallelObjectIterator(uint num_workers, MarkBitMap* bitmap) :
1388         _num_workers(num_workers),
1389         _init_ready(false),
1390         _aux_bit_map(bitmap),
1391         _heap(ShenandoahHeap::heap()) {
1392     // Initialize bitmap
1393     _init_ready = _heap->prepare_aux_bitmap_for_iteration();
1394     if (!_init_ready) {
1395       return;
1396     }
1397 
1398     ObjectIterateScanRootClosure oops(_aux_bit_map, &_roots_stack);
1399     _heap->scan_roots_for_iteration(&_roots_stack, &oops);
1400 
1401     _init_ready = prepare_worker_queues();
1402   }
1403 
1404   ~ShenandoahParallelObjectIterator() {
1405     // Reclaim bitmap
1406     _heap->reclaim_aux_bitmap_for_iteration();
1407     // Reclaim queue for workers
1408     if (_task_queues!= NULL) {
1409       for (uint i = 0; i < _num_workers; ++i) {
1410         ShenandoahObjToScanQueue* q = _task_queues->queue(i);
1411         if (q != NULL) {
1412           delete q;
1413           _task_queues->register_queue(i, NULL);
1414         }
1415       }
1416       delete _task_queues;
1417       _task_queues = NULL;
1418     }
1419   }
1420 
1421   virtual void object_iterate(ObjectClosure* cl, uint worker_id) {
1422     if (_init_ready) {
1423       object_iterate_parallel(cl, worker_id, _task_queues);
1424     }
1425   }
1426 
1427 private:
1428   // Divide global root_stack into worker queues
1429   bool prepare_worker_queues() {
1430     _task_queues = new ShenandoahObjToScanQueueSet((int) _num_workers);
1431     // Initialize queues for every workers
1432     for (uint i = 0; i < _num_workers; ++i) {
1433       ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue();
1434       task_queue->initialize();
1435       _task_queues->register_queue(i, task_queue);
1436     }
1437     // Divide roots among the workers. Assume that object referencing distribution
1438     // is related with root kind, use round-robin to make every worker have same chance
1439     // to process every kind of roots
1440     size_t roots_num = _roots_stack.size();
1441     if (roots_num == 0) {
1442       // No work to do
1443       return false;
1444     }
1445 
1446     for (uint j = 0; j < roots_num; j++) {
1447       uint stack_id = j % _num_workers;
1448       oop obj = _roots_stack.pop();
1449       _task_queues->queue(stack_id)->push(ShenandoahMarkTask(obj));
1450     }
1451     return true;
1452   }
1453 
1454   void object_iterate_parallel(ObjectClosure* cl,
1455                                uint worker_id,
1456                                ShenandoahObjToScanQueueSet* queue_set) {
1457     assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1458     assert(queue_set != NULL, "task queue must not be NULL");
1459 
1460     ShenandoahObjToScanQueue* q = queue_set->queue(worker_id);
1461     assert(q != NULL, "object iterate queue must not be NULL");
1462 
1463     ShenandoahMarkTask t;
1464     ShenandoahObjectIterateParScanClosure oops(_aux_bit_map, q);
1465 
1466     // Work through the queue to traverse heap.
1467     // Steal when there is no task in queue.
1468     while (q->pop(t) || queue_set->steal(worker_id, t)) {
1469       oop obj = t.obj();
1470       assert(oopDesc::is_oop(obj), "must be a valid oop");
1471       cl->do_object(obj);
1472       obj->oop_iterate(&oops);
1473     }
1474     assert(q->is_empty(), "should be empty");
1475   }
1476 };
1477 
1478 ParallelObjectIteratorImpl* ShenandoahHeap::parallel_object_iterator(uint workers) {
1479   return new ShenandoahParallelObjectIterator(workers, &_aux_bit_map);
1480 }
1481 
1482 // Keep alive an object that was loaded with AS_NO_KEEPALIVE.
1483 void ShenandoahHeap::keep_alive(oop obj) {
1484   if (is_concurrent_mark_in_progress() && (obj != NULL)) {
1485     ShenandoahBarrierSet::barrier_set()->enqueue(obj);
1486   }
1487 }
1488 
1489 void ShenandoahHeap::heap_region_iterate(ShenandoahHeapRegionClosure* blk) const {
1490   for (size_t i = 0; i < num_regions(); i++) {
1491     ShenandoahHeapRegion* current = get_region(i);
1492     blk->heap_region_do(current);
1493   }
1494 }
1495 
1496 class ShenandoahParallelHeapRegionTask : public AbstractGangTask {
1497 private:
1498   ShenandoahHeap* const _heap;
1499   ShenandoahHeapRegionClosure* const _blk;
1500 
1501   shenandoah_padding(0);
1502   volatile size_t _index;
1503   shenandoah_padding(1);
1504 
1505 public:
1506   ShenandoahParallelHeapRegionTask(ShenandoahHeapRegionClosure* blk) :
1507           AbstractGangTask("Shenandoah Parallel Region Operation"),
1508           _heap(ShenandoahHeap::heap()), _blk(blk), _index(0) {}
1509 
1510   void work(uint worker_id) {
1511     ShenandoahParallelWorkerSession worker_session(worker_id);
1512     size_t stride = ShenandoahParallelRegionStride;
1513 
1514     size_t max = _heap->num_regions();
1515     while (Atomic::load(&_index) < max) {
1516       size_t cur = Atomic::fetch_and_add(&_index, stride, memory_order_relaxed);
1517       size_t start = cur;
1518       size_t end = MIN2(cur + stride, max);
1519       if (start >= max) break;
1520 
1521       for (size_t i = cur; i < end; i++) {
1522         ShenandoahHeapRegion* current = _heap->get_region(i);
1523         _blk->heap_region_do(current);
1524       }
1525     }
1526   }
1527 };
1528 
1529 void ShenandoahHeap::parallel_heap_region_iterate(ShenandoahHeapRegionClosure* blk) const {
1530   assert(blk->is_thread_safe(), "Only thread-safe closures here");
1531   if (num_regions() > ShenandoahParallelRegionStride) {
1532     ShenandoahParallelHeapRegionTask task(blk);
1533     workers()->run_task(&task);
1534   } else {
1535     heap_region_iterate(blk);
1536   }
1537 }
1538 
1539 class ShenandoahInitMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
1540 private:
1541   ShenandoahMarkingContext* const _ctx;
1542 public:
1543   ShenandoahInitMarkUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
1544 
1545   void heap_region_do(ShenandoahHeapRegion* r) {
1546     assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index());
1547     if (r->is_active()) {
1548       // Check if region needs updating its TAMS. We have updated it already during concurrent
1549       // reset, so it is very likely we don't need to do another write here.
1550       if (_ctx->top_at_mark_start(r) != r->top()) {
1551         _ctx->capture_top_at_mark_start(r);
1552       }
1553     } else {
1554       assert(_ctx->top_at_mark_start(r) == r->top(),
1555              "Region " SIZE_FORMAT " should already have correct TAMS", r->index());
1556     }
1557   }
1558 
1559   bool is_thread_safe() { return true; }
1560 };
1561 
1562 class ShenandoahRendezvousClosure : public HandshakeClosure {
1563 public:
1564   inline ShenandoahRendezvousClosure() : HandshakeClosure("ShenandoahRendezvous") {}
1565   inline void do_thread(Thread* thread) {}
1566 };
1567 
1568 void ShenandoahHeap::rendezvous_threads() {
1569   ShenandoahRendezvousClosure cl;
1570   Handshake::execute(&cl);
1571 }
1572 
1573 void ShenandoahHeap::recycle_trash() {
1574   free_set()->recycle_trash();
1575 }
1576 
1577 class ShenandoahResetUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
1578 private:
1579   ShenandoahMarkingContext* const _ctx;
1580 public:
1581   ShenandoahResetUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
1582 
1583   void heap_region_do(ShenandoahHeapRegion* r) {
1584     if (r->is_active()) {
1585       // Reset live data and set TAMS optimistically. We would recheck these under the pause
1586       // anyway to capture any updates that happened since now.
1587       r->clear_live_data();
1588       _ctx->capture_top_at_mark_start(r);
1589     }
1590   }
1591 
1592   bool is_thread_safe() { return true; }
1593 };
1594 
1595 void ShenandoahHeap::prepare_gc() {
1596   reset_mark_bitmap();
1597 
1598   ShenandoahResetUpdateRegionStateClosure cl;
1599   parallel_heap_region_iterate(&cl);
1600 }
1601 
1602 class ShenandoahFinalMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
1603 private:
1604   ShenandoahMarkingContext* const _ctx;
1605   ShenandoahHeapLock* const _lock;
1606 
1607 public:
1608   ShenandoahFinalMarkUpdateRegionStateClosure() :
1609     _ctx(ShenandoahHeap::heap()->complete_marking_context()), _lock(ShenandoahHeap::heap()->lock()) {}
1610 
1611   void heap_region_do(ShenandoahHeapRegion* r) {
1612     if (r->is_active()) {
1613       // All allocations past TAMS are implicitly live, adjust the region data.
1614       // Bitmaps/TAMS are swapped at this point, so we need to poll complete bitmap.
1615       HeapWord *tams = _ctx->top_at_mark_start(r);
1616       HeapWord *top = r->top();
1617       if (top > tams) {
1618         r->increase_live_data_alloc_words(pointer_delta(top, tams));
1619       }
1620 
1621       // We are about to select the collection set, make sure it knows about
1622       // current pinning status. Also, this allows trashing more regions that
1623       // now have their pinning status dropped.
1624       if (r->is_pinned()) {
1625         if (r->pin_count() == 0) {
1626           ShenandoahHeapLocker locker(_lock);
1627           r->make_unpinned();
1628         }
1629       } else {
1630         if (r->pin_count() > 0) {
1631           ShenandoahHeapLocker locker(_lock);
1632           r->make_pinned();
1633         }
1634       }
1635 
1636       // Remember limit for updating refs. It's guaranteed that we get no
1637       // from-space-refs written from here on.
1638       r->set_update_watermark_at_safepoint(r->top());
1639     } else {
1640       assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index());
1641       assert(_ctx->top_at_mark_start(r) == r->top(),
1642              "Region " SIZE_FORMAT " should have correct TAMS", r->index());
1643     }
1644   }
1645 
1646   bool is_thread_safe() { return true; }
1647 };
1648 
1649 void ShenandoahHeap::prepare_regions_and_collection_set(bool concurrent) {
1650   assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
1651   {
1652     ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::final_update_region_states :
1653                                          ShenandoahPhaseTimings::degen_gc_final_update_region_states);
1654     ShenandoahFinalMarkUpdateRegionStateClosure cl;
1655     parallel_heap_region_iterate(&cl);
1656 
1657     assert_pinned_region_status();
1658   }
1659 
1660   {
1661     ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::choose_cset :
1662                                          ShenandoahPhaseTimings::degen_gc_choose_cset);
1663     ShenandoahHeapLocker locker(lock());
1664     _collection_set->clear();
1665     heuristics()->choose_collection_set(_collection_set);
1666   }
1667 
1668   {
1669     ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::final_rebuild_freeset :
1670                                          ShenandoahPhaseTimings::degen_gc_final_rebuild_freeset);
1671     ShenandoahHeapLocker locker(lock());
1672     _free_set->rebuild();
1673   }
1674 }
1675 
1676 void ShenandoahHeap::do_class_unloading() {
1677   _unloader.unload();
1678 }
1679 
1680 void ShenandoahHeap::stw_weak_refs(bool full_gc) {
1681   // Weak refs processing
1682   ShenandoahPhaseTimings::Phase phase = full_gc ? ShenandoahPhaseTimings::full_gc_weakrefs
1683                                                 : ShenandoahPhaseTimings::degen_gc_weakrefs;
1684   ShenandoahTimingsTracker t(phase);
1685   ShenandoahGCWorkerPhase worker_phase(phase);
1686   ref_processor()->process_references(phase, workers(), false /* concurrent */);
1687 }
1688 
1689 void ShenandoahHeap::prepare_update_heap_references(bool concurrent) {
1690   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
1691 
1692   // Evacuation is over, no GCLABs are needed anymore. GCLABs are under URWM, so we need to
1693   // make them parsable for update code to work correctly. Plus, we can compute new sizes
1694   // for future GCLABs here.
1695   if (UseTLAB) {
1696     ShenandoahGCPhase phase(concurrent ?
1697                             ShenandoahPhaseTimings::init_update_refs_manage_gclabs :
1698                             ShenandoahPhaseTimings::degen_gc_init_update_refs_manage_gclabs);
1699     gclabs_retire(ResizeTLAB);
1700   }
1701 
1702   _update_refs_iterator.reset();
1703 }
1704 
1705 void ShenandoahHeap::set_gc_state_all_threads() {
1706   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at Shenandoah safepoint");
1707   if (_gc_state_changed) {
1708     _gc_state_changed = false;
1709     char state = gc_state();
1710     for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1711       ShenandoahThreadLocalData::set_gc_state(t, state);
1712     }
1713   }
1714 }
1715 
1716 void ShenandoahHeap::set_gc_state_mask(uint mask, bool value) {
1717   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at Shenandoah safepoint");
1718   _gc_state.set_cond(mask, value);
1719   _gc_state_changed = true;
1720 }
1721 
1722 void ShenandoahHeap::set_concurrent_mark_in_progress(bool in_progress) {
1723   assert(!has_forwarded_objects(), "Not expected before/after mark phase");
1724   set_gc_state_mask(MARKING, in_progress);
1725   ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
1726 }
1727 
1728 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
1729   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only call this at safepoint");
1730   set_gc_state_mask(EVACUATION, in_progress);
1731 }
1732 
1733 void ShenandoahHeap::set_concurrent_strong_root_in_progress(bool in_progress) {
1734   if (in_progress) {
1735     _concurrent_strong_root_in_progress.set();
1736   } else {
1737     _concurrent_strong_root_in_progress.unset();
1738   }
1739 }
1740 
1741 void ShenandoahHeap::set_concurrent_weak_root_in_progress(bool cond) {
1742   set_gc_state_mask(WEAK_ROOTS, cond);
1743 }
1744 
1745 GCTracer* ShenandoahHeap::tracer() {
1746   return shenandoah_policy()->tracer();
1747 }
1748 
1749 size_t ShenandoahHeap::tlab_used(Thread* thread) const {
1750   return _free_set->used();
1751 }
1752 
1753 bool ShenandoahHeap::try_cancel_gc() {
1754   jbyte prev = _cancelled_gc.cmpxchg(CANCELLED, CANCELLABLE);
1755   return prev == CANCELLABLE;
1756 }
1757 
1758 void ShenandoahHeap::cancel_gc(GCCause::Cause cause) {
1759   if (try_cancel_gc()) {
1760     FormatBuffer<> msg("Cancelling GC: %s", GCCause::to_string(cause));
1761     log_info(gc)("%s", msg.buffer());
1762     Events::log(Thread::current(), "%s", msg.buffer());
1763   }
1764 }
1765 
1766 uint ShenandoahHeap::max_workers() {
1767   return _max_workers;
1768 }
1769 
1770 void ShenandoahHeap::stop() {
1771   // The shutdown sequence should be able to terminate when GC is running.
1772 
1773   // Step 0. Notify policy to disable event recording.
1774   _shenandoah_policy->record_shutdown();
1775 
1776   // Step 1. Notify control thread that we are in shutdown.
1777   // Note that we cannot do that with stop(), because stop() is blocking and waits for the actual shutdown.
1778   // Doing stop() here would wait for the normal GC cycle to complete, never falling through to cancel below.
1779   control_thread()->prepare_for_graceful_shutdown();
1780 
1781   // Step 2. Notify GC workers that we are cancelling GC.
1782   cancel_gc(GCCause::_shenandoah_stop_vm);
1783 
1784   // Step 3. Wait until GC worker exits normally.
1785   control_thread()->stop();
1786 }
1787 
1788 void ShenandoahHeap::stw_unload_classes(bool full_gc) {
1789   if (!unload_classes()) return;
1790   // Unload classes and purge SystemDictionary.
1791   {
1792     ShenandoahPhaseTimings::Phase phase = full_gc ?
1793                                           ShenandoahPhaseTimings::full_gc_purge_class_unload :
1794                                           ShenandoahPhaseTimings::degen_gc_purge_class_unload;
1795     ShenandoahGCPhase gc_phase(phase);
1796     ShenandoahGCWorkerPhase worker_phase(phase);
1797     bool purged_class = SystemDictionary::do_unloading(gc_timer());
1798 
1799     ShenandoahIsAliveSelector is_alive;
1800     uint num_workers = _workers->active_workers();
1801     ShenandoahClassUnloadingTask unlink_task(phase, is_alive.is_alive_closure(), num_workers, purged_class);
1802     _workers->run_task(&unlink_task);
1803   }
1804 
1805   {
1806     ShenandoahGCPhase phase(full_gc ?
1807                             ShenandoahPhaseTimings::full_gc_purge_cldg :
1808                             ShenandoahPhaseTimings::degen_gc_purge_cldg);
1809     ClassLoaderDataGraph::purge(/*at_safepoint*/true);
1810   }
1811   // Resize and verify metaspace
1812   MetaspaceGC::compute_new_size();
1813   DEBUG_ONLY(MetaspaceUtils::verify();)
1814 }
1815 
1816 // Weak roots are either pre-evacuated (final mark) or updated (final updaterefs),
1817 // so they should not have forwarded oops.
1818 // However, we do need to "null" dead oops in the roots, if can not be done
1819 // in concurrent cycles.
1820 void ShenandoahHeap::stw_process_weak_roots(bool full_gc) {
1821   uint num_workers = _workers->active_workers();
1822   ShenandoahPhaseTimings::Phase timing_phase = full_gc ?
1823                                                ShenandoahPhaseTimings::full_gc_purge_weak_par :
1824                                                ShenandoahPhaseTimings::degen_gc_purge_weak_par;
1825   ShenandoahGCPhase phase(timing_phase);
1826   ShenandoahGCWorkerPhase worker_phase(timing_phase);
1827   // Cleanup weak roots
1828   if (has_forwarded_objects()) {
1829     ShenandoahForwardedIsAliveClosure is_alive;
1830     ShenandoahUpdateRefsClosure keep_alive;
1831     ShenandoahParallelWeakRootsCleaningTask<ShenandoahForwardedIsAliveClosure, ShenandoahUpdateRefsClosure>
1832       cleaning_task(timing_phase, &is_alive, &keep_alive, num_workers);
1833     _workers->run_task(&cleaning_task);
1834   } else {
1835     ShenandoahIsAliveClosure is_alive;
1836 #ifdef ASSERT
1837     ShenandoahAssertNotForwardedClosure verify_cl;
1838     ShenandoahParallelWeakRootsCleaningTask<ShenandoahIsAliveClosure, ShenandoahAssertNotForwardedClosure>
1839       cleaning_task(timing_phase, &is_alive, &verify_cl, num_workers);
1840 #else
1841     ShenandoahParallelWeakRootsCleaningTask<ShenandoahIsAliveClosure, DoNothingClosure>
1842       cleaning_task(timing_phase, &is_alive, &do_nothing_cl, num_workers);
1843 #endif
1844     _workers->run_task(&cleaning_task);
1845   }
1846 }
1847 
1848 void ShenandoahHeap::parallel_cleaning(bool full_gc) {
1849   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
1850   assert(is_stw_gc_in_progress(), "Only for Degenerated and Full GC");
1851   ShenandoahGCPhase phase(full_gc ?
1852                           ShenandoahPhaseTimings::full_gc_purge :
1853                           ShenandoahPhaseTimings::degen_gc_purge);
1854   stw_weak_refs(full_gc);
1855   stw_process_weak_roots(full_gc);
1856   stw_unload_classes(full_gc);
1857 }
1858 
1859 void ShenandoahHeap::set_has_forwarded_objects(bool cond) {
1860   set_gc_state_mask(HAS_FORWARDED, cond);
1861 }
1862 
1863 void ShenandoahHeap::set_unload_classes(bool uc) {
1864   _unload_classes.set_cond(uc);
1865 }
1866 
1867 bool ShenandoahHeap::unload_classes() const {
1868   return _unload_classes.is_set();
1869 }
1870 
1871 address ShenandoahHeap::in_cset_fast_test_addr() {
1872   ShenandoahHeap* heap = ShenandoahHeap::heap();
1873   assert(heap->collection_set() != NULL, "Sanity");
1874   return (address) heap->collection_set()->biased_map_address();
1875 }
1876 
1877 address ShenandoahHeap::cancelled_gc_addr() {
1878   return (address) ShenandoahHeap::heap()->_cancelled_gc.addr_of();
1879 }
1880 
1881 size_t ShenandoahHeap::bytes_allocated_since_gc_start() {
1882   return Atomic::load(&_bytes_allocated_since_gc_start);
1883 }
1884 
1885 void ShenandoahHeap::reset_bytes_allocated_since_gc_start() {
1886   Atomic::store(&_bytes_allocated_since_gc_start, (size_t)0);
1887 }
1888 
1889 void ShenandoahHeap::set_degenerated_gc_in_progress(bool in_progress) {
1890   _degenerated_gc_in_progress.set_cond(in_progress);
1891 }
1892 
1893 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) {
1894   _full_gc_in_progress.set_cond(in_progress);
1895 }
1896 
1897 void ShenandoahHeap::set_full_gc_move_in_progress(bool in_progress) {
1898   assert (is_full_gc_in_progress(), "should be");
1899   _full_gc_move_in_progress.set_cond(in_progress);
1900 }
1901 
1902 void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) {
1903   set_gc_state_mask(UPDATEREFS, in_progress);
1904 }
1905 
1906 void ShenandoahHeap::register_nmethod(nmethod* nm) {
1907   ShenandoahCodeRoots::register_nmethod(nm);
1908 }
1909 
1910 void ShenandoahHeap::unregister_nmethod(nmethod* nm) {
1911   ShenandoahCodeRoots::unregister_nmethod(nm);
1912 }
1913 
1914 void ShenandoahHeap::flush_nmethod(nmethod* nm) {
1915   ShenandoahCodeRoots::flush_nmethod(nm);
1916 }
1917 
1918 oop ShenandoahHeap::pin_object(JavaThread* thr, oop o) {
1919   heap_region_containing(o)->record_pin();
1920   return o;
1921 }
1922 
1923 void ShenandoahHeap::unpin_object(JavaThread* thr, oop o) {
1924   ShenandoahHeapRegion* r = heap_region_containing(o);
1925   assert(r != NULL, "Sanity");
1926   assert(r->pin_count() > 0, "Region " SIZE_FORMAT " should have non-zero pins", r->index());
1927   r->record_unpin();
1928 }
1929 
1930 void ShenandoahHeap::sync_pinned_region_status() {
1931   ShenandoahHeapLocker locker(lock());
1932 
1933   for (size_t i = 0; i < num_regions(); i++) {
1934     ShenandoahHeapRegion *r = get_region(i);
1935     if (r->is_active()) {
1936       if (r->is_pinned()) {
1937         if (r->pin_count() == 0) {
1938           r->make_unpinned();
1939         }
1940       } else {
1941         if (r->pin_count() > 0) {
1942           r->make_pinned();
1943         }
1944       }
1945     }
1946   }
1947 
1948   assert_pinned_region_status();
1949 }
1950 
1951 #ifdef ASSERT
1952 void ShenandoahHeap::assert_pinned_region_status() {
1953   for (size_t i = 0; i < num_regions(); i++) {
1954     ShenandoahHeapRegion* r = get_region(i);
1955     assert((r->is_pinned() && r->pin_count() > 0) || (!r->is_pinned() && r->pin_count() == 0),
1956            "Region " SIZE_FORMAT " pinning status is inconsistent", i);
1957   }
1958 }
1959 #endif
1960 
1961 ConcurrentGCTimer* ShenandoahHeap::gc_timer() const {
1962   return _gc_timer;
1963 }
1964 
1965 void ShenandoahHeap::prepare_concurrent_roots() {
1966   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
1967   assert(!is_stw_gc_in_progress(), "Only concurrent GC");
1968   set_concurrent_strong_root_in_progress(!collection_set()->is_empty());
1969   set_concurrent_weak_root_in_progress(true);
1970   if (unload_classes()) {
1971     _unloader.prepare();
1972   }
1973 }
1974 
1975 void ShenandoahHeap::finish_concurrent_roots() {
1976   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
1977   assert(!is_stw_gc_in_progress(), "Only concurrent GC");
1978   if (unload_classes()) {
1979     _unloader.finish();
1980   }
1981 }
1982 
1983 #ifdef ASSERT
1984 void ShenandoahHeap::assert_gc_workers(uint nworkers) {
1985   assert(nworkers > 0 && nworkers <= max_workers(), "Sanity");
1986 
1987   if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
1988     if (UseDynamicNumberOfGCThreads) {
1989       assert(nworkers <= ParallelGCThreads, "Cannot use more than it has");
1990     } else {
1991       // Use ParallelGCThreads inside safepoints
1992       assert(nworkers == ParallelGCThreads, "Use ParallelGCThreads within safepoints");
1993     }
1994   } else {
1995     if (UseDynamicNumberOfGCThreads) {
1996       assert(nworkers <= ConcGCThreads, "Cannot use more than it has");
1997     } else {
1998       // Use ConcGCThreads outside safepoints
1999       assert(nworkers == ConcGCThreads, "Use ConcGCThreads outside safepoints");
2000     }
2001   }
2002 }
2003 #endif
2004 
2005 ShenandoahVerifier* ShenandoahHeap::verifier() {
2006   guarantee(ShenandoahVerify, "Should be enabled");
2007   assert (_verifier != NULL, "sanity");
2008   return _verifier;
2009 }
2010 
2011 template<bool CONCURRENT>
2012 class ShenandoahUpdateHeapRefsTask : public AbstractGangTask {
2013 private:
2014   ShenandoahHeap* _heap;
2015   ShenandoahRegionIterator* _regions;
2016 public:
2017   ShenandoahUpdateHeapRefsTask(ShenandoahRegionIterator* regions) :
2018     AbstractGangTask("Shenandoah Update References"),
2019     _heap(ShenandoahHeap::heap()),
2020     _regions(regions) {
2021   }
2022 
2023   void work(uint worker_id) {
2024     if (CONCURRENT) {
2025       ShenandoahConcurrentWorkerSession worker_session(worker_id);
2026       ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
2027       do_work<ShenandoahConcUpdateRefsClosure>();
2028     } else {
2029       ShenandoahParallelWorkerSession worker_session(worker_id);
2030       do_work<ShenandoahSTWUpdateRefsClosure>();
2031     }
2032   }
2033 
2034 private:
2035   template<class T>
2036   void do_work() {
2037     T cl;
2038     ShenandoahHeapRegion* r = _regions->next();
2039     ShenandoahMarkingContext* const ctx = _heap->complete_marking_context();
2040     while (r != NULL) {
2041       HeapWord* update_watermark = r->get_update_watermark();
2042       assert (update_watermark >= r->bottom(), "sanity");
2043       if (r->is_active() && !r->is_cset()) {
2044         _heap->marked_object_oop_iterate(r, &cl, update_watermark);
2045       }
2046       if (ShenandoahPacing) {
2047         _heap->pacer()->report_updaterefs(pointer_delta(update_watermark, r->bottom()));
2048       }
2049       if (_heap->check_cancelled_gc_and_yield(CONCURRENT)) {
2050         return;
2051       }
2052       r = _regions->next();
2053     }
2054   }
2055 };
2056 
2057 void ShenandoahHeap::update_heap_references(bool concurrent) {
2058   assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
2059 
2060   if (concurrent) {
2061     ShenandoahUpdateHeapRefsTask<true> task(&_update_refs_iterator);
2062     workers()->run_task(&task);
2063   } else {
2064     ShenandoahUpdateHeapRefsTask<false> task(&_update_refs_iterator);
2065     workers()->run_task(&task);
2066   }
2067 }
2068 
2069 
2070 class ShenandoahFinalUpdateRefsUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
2071 private:
2072   ShenandoahHeapLock* const _lock;
2073 
2074 public:
2075   ShenandoahFinalUpdateRefsUpdateRegionStateClosure() : _lock(ShenandoahHeap::heap()->lock()) {}
2076 
2077   void heap_region_do(ShenandoahHeapRegion* r) {
2078     // Drop unnecessary "pinned" state from regions that does not have CP marks
2079     // anymore, as this would allow trashing them.
2080 
2081     if (r->is_active()) {
2082       if (r->is_pinned()) {
2083         if (r->pin_count() == 0) {
2084           ShenandoahHeapLocker locker(_lock);
2085           r->make_unpinned();
2086         }
2087       } else {
2088         if (r->pin_count() > 0) {
2089           ShenandoahHeapLocker locker(_lock);
2090           r->make_pinned();
2091         }
2092       }
2093     }
2094   }
2095 
2096   bool is_thread_safe() { return true; }
2097 };
2098 
2099 void ShenandoahHeap::update_heap_region_states(bool concurrent) {
2100   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2101   assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
2102 
2103   {
2104     ShenandoahGCPhase phase(concurrent ?
2105                             ShenandoahPhaseTimings::final_update_refs_update_region_states :
2106                             ShenandoahPhaseTimings::degen_gc_final_update_refs_update_region_states);
2107     ShenandoahFinalUpdateRefsUpdateRegionStateClosure cl;
2108     parallel_heap_region_iterate(&cl);
2109 
2110     assert_pinned_region_status();
2111   }
2112 
2113   {
2114     ShenandoahGCPhase phase(concurrent ?
2115                             ShenandoahPhaseTimings::final_update_refs_trash_cset :
2116                             ShenandoahPhaseTimings::degen_gc_final_update_refs_trash_cset);
2117     trash_cset_regions();
2118   }
2119 }
2120 
2121 void ShenandoahHeap::rebuild_free_set(bool concurrent) {
2122   {
2123     ShenandoahGCPhase phase(concurrent ?
2124                             ShenandoahPhaseTimings::final_update_refs_rebuild_freeset :
2125                             ShenandoahPhaseTimings::degen_gc_final_update_refs_rebuild_freeset);
2126     ShenandoahHeapLocker locker(lock());
2127     _free_set->rebuild();
2128   }
2129 }
2130 
2131 void ShenandoahHeap::print_extended_on(outputStream *st) const {
2132   print_on(st);
2133   st->cr();
2134   print_heap_regions_on(st);
2135 }
2136 
2137 bool ShenandoahHeap::is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self) {
2138   size_t slice = r->index() / _bitmap_regions_per_slice;
2139 
2140   size_t regions_from = _bitmap_regions_per_slice * slice;
2141   size_t regions_to   = MIN2(num_regions(), _bitmap_regions_per_slice * (slice + 1));
2142   for (size_t g = regions_from; g < regions_to; g++) {
2143     assert (g / _bitmap_regions_per_slice == slice, "same slice");
2144     if (skip_self && g == r->index()) continue;
2145     if (get_region(g)->is_committed()) {
2146       return true;
2147     }
2148   }
2149   return false;
2150 }
2151 
2152 bool ShenandoahHeap::commit_bitmap_slice(ShenandoahHeapRegion* r) {
2153   shenandoah_assert_heaplocked();
2154 
2155   // Bitmaps in special regions do not need commits
2156   if (_bitmap_region_special) {
2157     return true;
2158   }
2159 
2160   if (is_bitmap_slice_committed(r, true)) {
2161     // Some other region from the group is already committed, meaning the bitmap
2162     // slice is already committed, we exit right away.
2163     return true;
2164   }
2165 
2166   // Commit the bitmap slice:
2167   size_t slice = r->index() / _bitmap_regions_per_slice;
2168   size_t off = _bitmap_bytes_per_slice * slice;
2169   size_t len = _bitmap_bytes_per_slice;
2170   char* start = (char*) _bitmap_region.start() + off;
2171 
2172   if (!os::commit_memory(start, len, false)) {
2173     return false;
2174   }
2175 
2176   if (AlwaysPreTouch) {
2177     os::pretouch_memory(start, start + len, _pretouch_bitmap_page_size);
2178   }
2179 
2180   return true;
2181 }
2182 
2183 bool ShenandoahHeap::uncommit_bitmap_slice(ShenandoahHeapRegion *r) {
2184   shenandoah_assert_heaplocked();
2185 
2186   // Bitmaps in special regions do not need uncommits
2187   if (_bitmap_region_special) {
2188     return true;
2189   }
2190 
2191   if (is_bitmap_slice_committed(r, true)) {
2192     // Some other region from the group is still committed, meaning the bitmap
2193     // slice is should stay committed, exit right away.
2194     return true;
2195   }
2196 
2197   // Uncommit the bitmap slice:
2198   size_t slice = r->index() / _bitmap_regions_per_slice;
2199   size_t off = _bitmap_bytes_per_slice * slice;
2200   size_t len = _bitmap_bytes_per_slice;
2201   if (!os::uncommit_memory((char*)_bitmap_region.start() + off, len)) {
2202     return false;
2203   }
2204   return true;
2205 }
2206 
2207 void ShenandoahHeap::safepoint_synchronize_begin() {
2208   if (ShenandoahSuspendibleWorkers || UseStringDeduplication) {
2209     SuspendibleThreadSet::synchronize();
2210   }
2211 }
2212 
2213 void ShenandoahHeap::safepoint_synchronize_end() {
2214   if (ShenandoahSuspendibleWorkers || UseStringDeduplication) {
2215     SuspendibleThreadSet::desynchronize();
2216   }
2217 }
2218 
2219 void ShenandoahHeap::entry_uncommit(double shrink_before, size_t shrink_until) {
2220   static const char *msg = "Concurrent uncommit";
2221   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_uncommit, true /* log_heap_usage */);
2222   EventMark em("%s", msg);
2223 
2224   op_uncommit(shrink_before, shrink_until);
2225 }
2226 
2227 void ShenandoahHeap::try_inject_alloc_failure() {
2228   if (ShenandoahAllocFailureALot && !cancelled_gc() && ((os::random() % 1000) > 950)) {
2229     _inject_alloc_failure.set();
2230     os::naked_short_sleep(1);
2231     if (cancelled_gc()) {
2232       log_info(gc)("Allocation failure was successfully injected");
2233     }
2234   }
2235 }
2236 
2237 bool ShenandoahHeap::should_inject_alloc_failure() {
2238   return _inject_alloc_failure.is_set() && _inject_alloc_failure.try_unset();
2239 }
2240 
2241 void ShenandoahHeap::initialize_serviceability() {
2242   _memory_pool = new ShenandoahMemoryPool(this);
2243   _cycle_memory_manager.add_pool(_memory_pool);
2244   _stw_memory_manager.add_pool(_memory_pool);
2245 }
2246 
2247 GrowableArray<GCMemoryManager*> ShenandoahHeap::memory_managers() {
2248   GrowableArray<GCMemoryManager*> memory_managers(2);
2249   memory_managers.append(&_cycle_memory_manager);
2250   memory_managers.append(&_stw_memory_manager);
2251   return memory_managers;
2252 }
2253 
2254 GrowableArray<MemoryPool*> ShenandoahHeap::memory_pools() {
2255   GrowableArray<MemoryPool*> memory_pools(1);
2256   memory_pools.append(_memory_pool);
2257   return memory_pools;
2258 }
2259 
2260 MemoryUsage ShenandoahHeap::memory_usage() {
2261   return _memory_pool->get_memory_usage();
2262 }
2263 
2264 ShenandoahRegionIterator::ShenandoahRegionIterator() :
2265   _heap(ShenandoahHeap::heap()),
2266   _index(0) {}
2267 
2268 ShenandoahRegionIterator::ShenandoahRegionIterator(ShenandoahHeap* heap) :
2269   _heap(heap),
2270   _index(0) {}
2271 
2272 void ShenandoahRegionIterator::reset() {
2273   _index = 0;
2274 }
2275 
2276 bool ShenandoahRegionIterator::has_next() const {
2277   return _index < _heap->num_regions();
2278 }
2279 
2280 char ShenandoahHeap::gc_state() const {
2281   return _gc_state.raw_value();
2282 }
2283 
2284 ShenandoahLiveData* ShenandoahHeap::get_liveness_cache(uint worker_id) {
2285 #ifdef ASSERT
2286   assert(_liveness_cache != NULL, "sanity");
2287   assert(worker_id < _max_workers, "sanity");
2288   for (uint i = 0; i < num_regions(); i++) {
2289     assert(_liveness_cache[worker_id][i] == 0, "liveness cache should be empty");
2290   }
2291 #endif
2292   return _liveness_cache[worker_id];
2293 }
2294 
2295 void ShenandoahHeap::flush_liveness_cache(uint worker_id) {
2296   assert(worker_id < _max_workers, "sanity");
2297   assert(_liveness_cache != NULL, "sanity");
2298   ShenandoahLiveData* ld = _liveness_cache[worker_id];
2299   for (uint i = 0; i < num_regions(); i++) {
2300     ShenandoahLiveData live = ld[i];
2301     if (live > 0) {
2302       ShenandoahHeapRegion* r = get_region(i);
2303       r->increase_live_data_gc_words(live);
2304       ld[i] = 0;
2305     }
2306   }
2307 }