1 /*
   2  * Copyright (c) 2013, 2021, Red Hat, Inc. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "memory/allocation.hpp"
  27 #include "memory/universe.hpp"
  28 
  29 #include "gc/shared/gcArguments.hpp"
  30 #include "gc/shared/gcTimer.hpp"
  31 #include "gc/shared/gcTraceTime.inline.hpp"
  32 #include "gc/shared/locationPrinter.inline.hpp"
  33 #include "gc/shared/memAllocator.hpp"
  34 #include "gc/shared/plab.hpp"
  35 #include "gc/shared/tlab_globals.hpp"
  36 
  37 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  38 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
  39 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  40 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  41 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
  42 #include "gc/shenandoah/shenandoahControlThread.hpp"
  43 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  44 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  45 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  46 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
  47 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  48 #include "gc/shenandoah/shenandoahInitLogger.hpp"
  49 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  50 #include "gc/shenandoah/shenandoahMemoryPool.hpp"
  51 #include "gc/shenandoah/shenandoahMetrics.hpp"
  52 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  53 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  54 #include "gc/shenandoah/shenandoahPacer.inline.hpp"
  55 #include "gc/shenandoah/shenandoahPadding.hpp"
  56 #include "gc/shenandoah/shenandoahParallelCleaning.inline.hpp"
  57 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
  58 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
  59 #include "gc/shenandoah/shenandoahStringDedup.hpp"
  60 #include "gc/shenandoah/shenandoahSTWMark.hpp"
  61 #include "gc/shenandoah/shenandoahUtils.hpp"
  62 #include "gc/shenandoah/shenandoahVerifier.hpp"
  63 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
  64 #include "gc/shenandoah/shenandoahVMOperations.hpp"
  65 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
  66 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  67 #include "gc/shenandoah/mode/shenandoahIUMode.hpp"
  68 #include "gc/shenandoah/mode/shenandoahPassiveMode.hpp"
  69 #include "gc/shenandoah/mode/shenandoahSATBMode.hpp"
  70 #if INCLUDE_JFR
  71 #include "gc/shenandoah/shenandoahJfrSupport.hpp"
  72 #endif
  73 
  74 #include "classfile/systemDictionary.hpp"
  75 #include "memory/classLoaderMetaspace.hpp"
  76 #include "memory/metaspaceUtils.hpp"
  77 #include "oops/compressedOops.inline.hpp"
  78 #include "prims/jvmtiTagMap.hpp"
  79 #include "runtime/atomic.hpp"
  80 #include "runtime/globals.hpp"
  81 #include "runtime/interfaceSupport.inline.hpp"
  82 #include "runtime/java.hpp"
  83 #include "runtime/orderAccess.hpp"
  84 #include "runtime/safepointMechanism.hpp"
  85 #include "runtime/vmThread.hpp"
  86 #include "services/mallocTracker.hpp"
  87 #include "services/memTracker.hpp"
  88 #include "utilities/events.hpp"
  89 #include "utilities/powerOfTwo.hpp"
  90 
  91 class ShenandoahPretouchHeapTask : public WorkerTask {
  92 private:
  93   ShenandoahRegionIterator _regions;
  94   const size_t _page_size;
  95 public:
  96   ShenandoahPretouchHeapTask(size_t page_size) :
  97     WorkerTask("Shenandoah Pretouch Heap"),
  98     _page_size(page_size) {}
  99 
 100   virtual void work(uint worker_id) {
 101     ShenandoahHeapRegion* r = _regions.next();
 102     while (r != NULL) {
 103       if (r->is_committed()) {
 104         os::pretouch_memory(r->bottom(), r->end(), _page_size);
 105       }
 106       r = _regions.next();
 107     }
 108   }
 109 };
 110 
 111 class ShenandoahPretouchBitmapTask : public WorkerTask {
 112 private:
 113   ShenandoahRegionIterator _regions;
 114   char* _bitmap_base;
 115   const size_t _bitmap_size;
 116   const size_t _page_size;
 117 public:
 118   ShenandoahPretouchBitmapTask(char* bitmap_base, size_t bitmap_size, size_t page_size) :
 119     WorkerTask("Shenandoah Pretouch Bitmap"),
 120     _bitmap_base(bitmap_base),
 121     _bitmap_size(bitmap_size),
 122     _page_size(page_size) {}
 123 
 124   virtual void work(uint worker_id) {
 125     ShenandoahHeapRegion* r = _regions.next();
 126     while (r != NULL) {
 127       size_t start = r->index()       * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
 128       size_t end   = (r->index() + 1) * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
 129       assert (end <= _bitmap_size, "end is sane: " SIZE_FORMAT " < " SIZE_FORMAT, end, _bitmap_size);
 130 
 131       if (r->is_committed()) {
 132         os::pretouch_memory(_bitmap_base + start, _bitmap_base + end, _page_size);
 133       }
 134 
 135       r = _regions.next();
 136     }
 137   }
 138 };
 139 
 140 jint ShenandoahHeap::initialize() {
 141   //
 142   // Figure out heap sizing
 143   //
 144 
 145   size_t init_byte_size = InitialHeapSize;
 146   size_t min_byte_size  = MinHeapSize;
 147   size_t max_byte_size  = MaxHeapSize;
 148   size_t heap_alignment = HeapAlignment;
 149 
 150   size_t reg_size_bytes = ShenandoahHeapRegion::region_size_bytes();
 151 
 152   Universe::check_alignment(max_byte_size,  reg_size_bytes, "Shenandoah heap");
 153   Universe::check_alignment(init_byte_size, reg_size_bytes, "Shenandoah heap");
 154 
 155   _num_regions = ShenandoahHeapRegion::region_count();
 156   assert(_num_regions == (max_byte_size / reg_size_bytes),
 157          "Regions should cover entire heap exactly: " SIZE_FORMAT " != " SIZE_FORMAT "/" SIZE_FORMAT,
 158          _num_regions, max_byte_size, reg_size_bytes);
 159 
 160   // Now we know the number of regions, initialize the heuristics.
 161   initialize_heuristics();
 162 
 163   size_t num_committed_regions = init_byte_size / reg_size_bytes;
 164   num_committed_regions = MIN2(num_committed_regions, _num_regions);
 165   assert(num_committed_regions <= _num_regions, "sanity");
 166   _initial_size = num_committed_regions * reg_size_bytes;
 167 
 168   size_t num_min_regions = min_byte_size / reg_size_bytes;
 169   num_min_regions = MIN2(num_min_regions, _num_regions);
 170   assert(num_min_regions <= _num_regions, "sanity");
 171   _minimum_size = num_min_regions * reg_size_bytes;
 172 
 173   // Default to max heap size.
 174   _soft_max_size = _num_regions * reg_size_bytes;
 175 
 176   _committed = _initial_size;
 177 
 178   size_t heap_page_size   = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
 179   size_t bitmap_page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
 180   size_t region_page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
 181 
 182   //
 183   // Reserve and commit memory for heap
 184   //
 185 
 186   ReservedHeapSpace heap_rs = Universe::reserve_heap(max_byte_size, heap_alignment);
 187   initialize_reserved_region(heap_rs);
 188   _heap_region = MemRegion((HeapWord*)heap_rs.base(), heap_rs.size() / HeapWordSize);
 189   _heap_region_special = heap_rs.special();
 190 
 191   assert((((size_t) base()) & ShenandoahHeapRegion::region_size_bytes_mask()) == 0,
 192          "Misaligned heap: " PTR_FORMAT, p2i(base()));
 193 
 194 #if SHENANDOAH_OPTIMIZED_MARKTASK
 195   // The optimized ShenandoahMarkTask takes some bits away from the full object bits.
 196   // Fail if we ever attempt to address more than we can.
 197   if ((uintptr_t)heap_rs.end() >= ShenandoahMarkTask::max_addressable()) {
 198     FormatBuffer<512> buf("Shenandoah reserved [" PTR_FORMAT ", " PTR_FORMAT") for the heap, \n"
 199                           "but max object address is " PTR_FORMAT ". Try to reduce heap size, or try other \n"
 200                           "VM options that allocate heap at lower addresses (HeapBaseMinAddress, AllocateHeapAt, etc).",
 201                 p2i(heap_rs.base()), p2i(heap_rs.end()), ShenandoahMarkTask::max_addressable());
 202     vm_exit_during_initialization("Fatal Error", buf);
 203   }
 204 #endif
 205 
 206   ReservedSpace sh_rs = heap_rs.first_part(max_byte_size);
 207   if (!_heap_region_special) {
 208     os::commit_memory_or_exit(sh_rs.base(), _initial_size, heap_alignment, false,
 209                               "Cannot commit heap memory");
 210   }
 211 
 212   //
 213   // Reserve and commit memory for bitmap(s)
 214   //
 215 
 216   _bitmap_size = ShenandoahMarkBitMap::compute_size(heap_rs.size());
 217   _bitmap_size = align_up(_bitmap_size, bitmap_page_size);
 218 
 219   size_t bitmap_bytes_per_region = reg_size_bytes / ShenandoahMarkBitMap::heap_map_factor();
 220 
 221   guarantee(bitmap_bytes_per_region != 0,
 222             "Bitmap bytes per region should not be zero");
 223   guarantee(is_power_of_2(bitmap_bytes_per_region),
 224             "Bitmap bytes per region should be power of two: " SIZE_FORMAT, bitmap_bytes_per_region);
 225 
 226   if (bitmap_page_size > bitmap_bytes_per_region) {
 227     _bitmap_regions_per_slice = bitmap_page_size / bitmap_bytes_per_region;
 228     _bitmap_bytes_per_slice = bitmap_page_size;
 229   } else {
 230     _bitmap_regions_per_slice = 1;
 231     _bitmap_bytes_per_slice = bitmap_bytes_per_region;
 232   }
 233 
 234   guarantee(_bitmap_regions_per_slice >= 1,
 235             "Should have at least one region per slice: " SIZE_FORMAT,
 236             _bitmap_regions_per_slice);
 237 
 238   guarantee(((_bitmap_bytes_per_slice) % bitmap_page_size) == 0,
 239             "Bitmap slices should be page-granular: bps = " SIZE_FORMAT ", page size = " SIZE_FORMAT,
 240             _bitmap_bytes_per_slice, bitmap_page_size);
 241 
 242   ReservedSpace bitmap(_bitmap_size, bitmap_page_size);
 243   MemTracker::record_virtual_memory_type(bitmap.base(), mtGC);
 244   _bitmap_region = MemRegion((HeapWord*) bitmap.base(), bitmap.size() / HeapWordSize);
 245   _bitmap_region_special = bitmap.special();
 246 
 247   size_t bitmap_init_commit = _bitmap_bytes_per_slice *
 248                               align_up(num_committed_regions, _bitmap_regions_per_slice) / _bitmap_regions_per_slice;
 249   bitmap_init_commit = MIN2(_bitmap_size, bitmap_init_commit);
 250   if (!_bitmap_region_special) {
 251     os::commit_memory_or_exit((char *) _bitmap_region.start(), bitmap_init_commit, bitmap_page_size, false,
 252                               "Cannot commit bitmap memory");
 253   }
 254 
 255   _marking_context = new ShenandoahMarkingContext(_heap_region, _bitmap_region, _num_regions, _max_workers);
 256 
 257   if (ShenandoahVerify) {
 258     ReservedSpace verify_bitmap(_bitmap_size, bitmap_page_size);
 259     if (!verify_bitmap.special()) {
 260       os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), bitmap_page_size, false,
 261                                 "Cannot commit verification bitmap memory");
 262     }
 263     MemTracker::record_virtual_memory_type(verify_bitmap.base(), mtGC);
 264     MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize);
 265     _verification_bit_map.initialize(_heap_region, verify_bitmap_region);
 266     _verifier = new ShenandoahVerifier(this, &_verification_bit_map);
 267   }
 268 
 269   // Reserve aux bitmap for use in object_iterate(). We don't commit it here.
 270   ReservedSpace aux_bitmap(_bitmap_size, bitmap_page_size);
 271   MemTracker::record_virtual_memory_type(aux_bitmap.base(), mtGC);
 272   _aux_bitmap_region = MemRegion((HeapWord*) aux_bitmap.base(), aux_bitmap.size() / HeapWordSize);
 273   _aux_bitmap_region_special = aux_bitmap.special();
 274   _aux_bit_map.initialize(_heap_region, _aux_bitmap_region);
 275 
 276   //
 277   // Create regions and region sets
 278   //
 279   size_t region_align = align_up(sizeof(ShenandoahHeapRegion), SHENANDOAH_CACHE_LINE_SIZE);
 280   size_t region_storage_size = align_up(region_align * _num_regions, region_page_size);
 281   region_storage_size = align_up(region_storage_size, os::vm_allocation_granularity());
 282 
 283   ReservedSpace region_storage(region_storage_size, region_page_size);
 284   MemTracker::record_virtual_memory_type(region_storage.base(), mtGC);
 285   if (!region_storage.special()) {
 286     os::commit_memory_or_exit(region_storage.base(), region_storage_size, region_page_size, false,
 287                               "Cannot commit region memory");
 288   }
 289 
 290   // Try to fit the collection set bitmap at lower addresses. This optimizes code generation for cset checks.
 291   // Go up until a sensible limit (subject to encoding constraints) and try to reserve the space there.
 292   // If not successful, bite a bullet and allocate at whatever address.
 293   {
 294     size_t cset_align = MAX2<size_t>(os::vm_page_size(), os::vm_allocation_granularity());
 295     size_t cset_size = align_up(((size_t) sh_rs.base() + sh_rs.size()) >> ShenandoahHeapRegion::region_size_bytes_shift(), cset_align);
 296 
 297     uintptr_t min = round_up_power_of_2(cset_align);
 298     uintptr_t max = (1u << 30u);
 299 
 300     for (uintptr_t addr = min; addr <= max; addr <<= 1u) {
 301       char* req_addr = (char*)addr;
 302       assert(is_aligned(req_addr, cset_align), "Should be aligned");
 303       ReservedSpace cset_rs(cset_size, cset_align, os::vm_page_size(), req_addr);
 304       if (cset_rs.is_reserved()) {
 305         assert(cset_rs.base() == req_addr, "Allocated where requested: " PTR_FORMAT ", " PTR_FORMAT, p2i(cset_rs.base()), addr);
 306         _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
 307         break;
 308       }
 309     }
 310 
 311     if (_collection_set == NULL) {
 312       ReservedSpace cset_rs(cset_size, cset_align, os::vm_page_size());
 313       _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
 314     }
 315   }
 316 
 317   _regions = NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, _num_regions, mtGC);
 318   _free_set = new ShenandoahFreeSet(this, _num_regions);
 319 
 320   {
 321     ShenandoahHeapLocker locker(lock());
 322 
 323     for (size_t i = 0; i < _num_regions; i++) {
 324       HeapWord* start = (HeapWord*)sh_rs.base() + ShenandoahHeapRegion::region_size_words() * i;
 325       bool is_committed = i < num_committed_regions;
 326       void* loc = region_storage.base() + i * region_align;
 327 
 328       ShenandoahHeapRegion* r = new (loc) ShenandoahHeapRegion(start, i, is_committed);
 329       assert(is_aligned(r, SHENANDOAH_CACHE_LINE_SIZE), "Sanity");
 330 
 331       _marking_context->initialize_top_at_mark_start(r);
 332       _regions[i] = r;
 333       assert(!collection_set()->is_in(i), "New region should not be in collection set");
 334     }
 335 
 336     // Initialize to complete
 337     _marking_context->mark_complete();
 338 
 339     _free_set->rebuild();
 340   }
 341 
 342   if (AlwaysPreTouch) {
 343     // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads,
 344     // before initialize() below zeroes it with initializing thread. For any given region,
 345     // we touch the region and the corresponding bitmaps from the same thread.
 346     ShenandoahPushWorkerScope scope(workers(), _max_workers, false);
 347 
 348     _pretouch_heap_page_size = heap_page_size;
 349     _pretouch_bitmap_page_size = bitmap_page_size;
 350 
 351 #ifdef LINUX
 352     // UseTransparentHugePages would madvise that backing memory can be coalesced into huge
 353     // pages. But, the kernel needs to know that every small page is used, in order to coalesce
 354     // them into huge one. Therefore, we need to pretouch with smaller pages.
 355     if (UseTransparentHugePages) {
 356       _pretouch_heap_page_size = (size_t)os::vm_page_size();
 357       _pretouch_bitmap_page_size = (size_t)os::vm_page_size();
 358     }
 359 #endif
 360 
 361     // OS memory managers may want to coalesce back-to-back pages. Make their jobs
 362     // simpler by pre-touching continuous spaces (heap and bitmap) separately.
 363 
 364     ShenandoahPretouchBitmapTask bcl(bitmap.base(), _bitmap_size, _pretouch_bitmap_page_size);
 365     _workers->run_task(&bcl);
 366 
 367     ShenandoahPretouchHeapTask hcl(_pretouch_heap_page_size);
 368     _workers->run_task(&hcl);
 369   }
 370 
 371   //
 372   // Initialize the rest of GC subsystems
 373   //
 374 
 375   _liveness_cache = NEW_C_HEAP_ARRAY(ShenandoahLiveData*, _max_workers, mtGC);
 376   for (uint worker = 0; worker < _max_workers; worker++) {
 377     _liveness_cache[worker] = NEW_C_HEAP_ARRAY(ShenandoahLiveData, _num_regions, mtGC);
 378     Copy::fill_to_bytes(_liveness_cache[worker], _num_regions * sizeof(ShenandoahLiveData));
 379   }
 380 
 381   // There should probably be Shenandoah-specific options for these,
 382   // just as there are G1-specific options.
 383   {
 384     ShenandoahSATBMarkQueueSet& satbqs = ShenandoahBarrierSet::satb_mark_queue_set();
 385     satbqs.set_process_completed_buffers_threshold(20); // G1SATBProcessCompletedThreshold
 386     satbqs.set_buffer_enqueue_threshold_percentage(60); // G1SATBBufferEnqueueingThresholdPercent
 387   }
 388 
 389   _monitoring_support = new ShenandoahMonitoringSupport(this);
 390   _phase_timings = new ShenandoahPhaseTimings(max_workers());
 391   ShenandoahCodeRoots::initialize();
 392 
 393   if (ShenandoahPacing) {
 394     _pacer = new ShenandoahPacer(this);
 395     _pacer->setup_for_idle();
 396   } else {
 397     _pacer = NULL;
 398   }
 399 
 400   _control_thread = new ShenandoahControlThread();
 401 
 402   ShenandoahInitLogger::print();
 403 
 404   return JNI_OK;
 405 }
 406 
 407 void ShenandoahHeap::initialize_mode() {
 408   if (ShenandoahGCMode != NULL) {
 409     if (strcmp(ShenandoahGCMode, "satb") == 0) {
 410       _gc_mode = new ShenandoahSATBMode();
 411     } else if (strcmp(ShenandoahGCMode, "iu") == 0) {
 412       _gc_mode = new ShenandoahIUMode();
 413     } else if (strcmp(ShenandoahGCMode, "passive") == 0) {
 414       _gc_mode = new ShenandoahPassiveMode();
 415     } else {
 416       vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option");
 417     }
 418   } else {
 419     ShouldNotReachHere();
 420   }
 421   _gc_mode->initialize_flags();
 422   if (_gc_mode->is_diagnostic() && !UnlockDiagnosticVMOptions) {
 423     vm_exit_during_initialization(
 424             err_msg("GC mode \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
 425                     _gc_mode->name()));
 426   }
 427   if (_gc_mode->is_experimental() && !UnlockExperimentalVMOptions) {
 428     vm_exit_during_initialization(
 429             err_msg("GC mode \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
 430                     _gc_mode->name()));
 431   }
 432 }
 433 
 434 void ShenandoahHeap::initialize_heuristics() {
 435   assert(_gc_mode != NULL, "Must be initialized");
 436   _heuristics = _gc_mode->initialize_heuristics();
 437 
 438   if (_heuristics->is_diagnostic() && !UnlockDiagnosticVMOptions) {
 439     vm_exit_during_initialization(
 440             err_msg("Heuristics \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
 441                     _heuristics->name()));
 442   }
 443   if (_heuristics->is_experimental() && !UnlockExperimentalVMOptions) {
 444     vm_exit_during_initialization(
 445             err_msg("Heuristics \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
 446                     _heuristics->name()));
 447   }
 448 }
 449 
 450 #ifdef _MSC_VER
 451 #pragma warning( push )
 452 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
 453 #endif
 454 
 455 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
 456   CollectedHeap(),
 457   _initial_size(0),
 458   _used(0),
 459   _committed(0),
 460   _bytes_allocated_since_gc_start(0),
 461   _max_workers(MAX2(ConcGCThreads, ParallelGCThreads)),
 462   _workers(NULL),
 463   _safepoint_workers(NULL),
 464   _heap_region_special(false),
 465   _num_regions(0),
 466   _regions(NULL),
 467   _update_refs_iterator(this),
 468   _control_thread(NULL),
 469   _shenandoah_policy(policy),
 470   _gc_mode(NULL),
 471   _heuristics(NULL),
 472   _free_set(NULL),
 473   _pacer(NULL),
 474   _verifier(NULL),
 475   _phase_timings(NULL),
 476   _monitoring_support(NULL),
 477   _memory_pool(NULL),
 478   _stw_memory_manager("Shenandoah Pauses", "end of GC pause"),
 479   _cycle_memory_manager("Shenandoah Cycles", "end of GC cycle"),
 480   _gc_timer(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
 481   _soft_ref_policy(),
 482   _log_min_obj_alignment_in_bytes(LogMinObjAlignmentInBytes),
 483   _ref_processor(new ShenandoahReferenceProcessor(MAX2(_max_workers, 1U))),
 484   _marking_context(NULL),
 485   _bitmap_size(0),
 486   _bitmap_regions_per_slice(0),
 487   _bitmap_bytes_per_slice(0),
 488   _bitmap_region_special(false),
 489   _aux_bitmap_region_special(false),
 490   _liveness_cache(NULL),
 491   _collection_set(NULL)
 492 {
 493   // Initialize GC mode early, so we can adjust barrier support
 494   initialize_mode();
 495   BarrierSet::set_barrier_set(new ShenandoahBarrierSet(this));
 496 
 497   _max_workers = MAX2(_max_workers, 1U);
 498   _workers = new ShenandoahWorkerThreads("Shenandoah GC Threads", _max_workers);
 499   if (_workers == NULL) {
 500     vm_exit_during_initialization("Failed necessary allocation.");
 501   } else {
 502     _workers->initialize_workers();
 503   }
 504 
 505   if (ParallelGCThreads > 1) {
 506     _safepoint_workers = new ShenandoahWorkerThreads("Safepoint Cleanup Thread",
 507                                                 ParallelGCThreads);
 508     _safepoint_workers->initialize_workers();
 509   }
 510 }
 511 
 512 #ifdef _MSC_VER
 513 #pragma warning( pop )
 514 #endif
 515 
 516 class ShenandoahResetBitmapTask : public WorkerTask {
 517 private:
 518   ShenandoahRegionIterator _regions;
 519 
 520 public:
 521   ShenandoahResetBitmapTask() :
 522     WorkerTask("Shenandoah Reset Bitmap") {}
 523 
 524   void work(uint worker_id) {
 525     ShenandoahHeapRegion* region = _regions.next();
 526     ShenandoahHeap* heap = ShenandoahHeap::heap();
 527     ShenandoahMarkingContext* const ctx = heap->marking_context();
 528     while (region != NULL) {
 529       if (heap->is_bitmap_slice_committed(region)) {
 530         ctx->clear_bitmap(region);
 531       }
 532       region = _regions.next();
 533     }
 534   }
 535 };
 536 
 537 void ShenandoahHeap::reset_mark_bitmap() {
 538   assert_gc_workers(_workers->active_workers());
 539   mark_incomplete_marking_context();
 540 
 541   ShenandoahResetBitmapTask task;
 542   _workers->run_task(&task);
 543 }
 544 
 545 void ShenandoahHeap::print_on(outputStream* st) const {
 546   st->print_cr("Shenandoah Heap");
 547   st->print_cr(" " SIZE_FORMAT "%s max, " SIZE_FORMAT "%s soft max, " SIZE_FORMAT "%s committed, " SIZE_FORMAT "%s used",
 548                byte_size_in_proper_unit(max_capacity()), proper_unit_for_byte_size(max_capacity()),
 549                byte_size_in_proper_unit(soft_max_capacity()), proper_unit_for_byte_size(soft_max_capacity()),
 550                byte_size_in_proper_unit(committed()),    proper_unit_for_byte_size(committed()),
 551                byte_size_in_proper_unit(used()),         proper_unit_for_byte_size(used()));
 552   st->print_cr(" " SIZE_FORMAT " x " SIZE_FORMAT"%s regions",
 553                num_regions(),
 554                byte_size_in_proper_unit(ShenandoahHeapRegion::region_size_bytes()),
 555                proper_unit_for_byte_size(ShenandoahHeapRegion::region_size_bytes()));
 556 
 557   st->print("Status: ");
 558   if (has_forwarded_objects())                 st->print("has forwarded objects, ");
 559   if (is_concurrent_mark_in_progress())        st->print("marking, ");
 560   if (is_evacuation_in_progress())             st->print("evacuating, ");
 561   if (is_update_refs_in_progress())            st->print("updating refs, ");
 562   if (is_degenerated_gc_in_progress())         st->print("degenerated gc, ");
 563   if (is_full_gc_in_progress())                st->print("full gc, ");
 564   if (is_full_gc_move_in_progress())           st->print("full gc move, ");
 565   if (is_concurrent_weak_root_in_progress())   st->print("concurrent weak roots, ");
 566   if (is_concurrent_strong_root_in_progress() &&
 567       !is_concurrent_weak_root_in_progress())  st->print("concurrent strong roots, ");
 568 
 569   if (cancelled_gc()) {
 570     st->print("cancelled");
 571   } else {
 572     st->print("not cancelled");
 573   }
 574   st->cr();
 575 
 576   st->print_cr("Reserved region:");
 577   st->print_cr(" - [" PTR_FORMAT ", " PTR_FORMAT ") ",
 578                p2i(reserved_region().start()),
 579                p2i(reserved_region().end()));
 580 
 581   ShenandoahCollectionSet* cset = collection_set();
 582   st->print_cr("Collection set:");
 583   if (cset != NULL) {
 584     st->print_cr(" - map (vanilla): " PTR_FORMAT, p2i(cset->map_address()));
 585     st->print_cr(" - map (biased):  " PTR_FORMAT, p2i(cset->biased_map_address()));
 586   } else {
 587     st->print_cr(" (NULL)");
 588   }
 589 
 590   st->cr();
 591   MetaspaceUtils::print_on(st);
 592 
 593   if (Verbose) {
 594     print_heap_regions_on(st);
 595   }
 596 }
 597 
 598 class ShenandoahInitWorkerGCLABClosure : public ThreadClosure {
 599 public:
 600   void do_thread(Thread* thread) {
 601     assert(thread != NULL, "Sanity");
 602     assert(thread->is_Worker_thread(), "Only worker thread expected");
 603     ShenandoahThreadLocalData::initialize_gclab(thread);
 604   }
 605 };
 606 
 607 void ShenandoahHeap::post_initialize() {
 608   CollectedHeap::post_initialize();
 609   MutexLocker ml(Threads_lock);
 610 
 611   ShenandoahInitWorkerGCLABClosure init_gclabs;
 612   _workers->threads_do(&init_gclabs);
 613 
 614   // gclab can not be initialized early during VM startup, as it can not determinate its max_size.
 615   // Now, we will let WorkerThreads to initialize gclab when new worker is created.
 616   _workers->set_initialize_gclab();
 617   if (_safepoint_workers != NULL) {
 618     _safepoint_workers->threads_do(&init_gclabs);
 619     _safepoint_workers->set_initialize_gclab();
 620   }
 621 
 622   _heuristics->initialize();
 623 
 624   JFR_ONLY(ShenandoahJFRSupport::register_jfr_type_serializers());
 625 }
 626 
 627 size_t ShenandoahHeap::used() const {
 628   return Atomic::load(&_used);
 629 }
 630 
 631 size_t ShenandoahHeap::committed() const {
 632   return Atomic::load(&_committed);
 633 }
 634 
 635 void ShenandoahHeap::increase_committed(size_t bytes) {
 636   shenandoah_assert_heaplocked_or_safepoint();
 637   _committed += bytes;
 638 }
 639 
 640 void ShenandoahHeap::decrease_committed(size_t bytes) {
 641   shenandoah_assert_heaplocked_or_safepoint();
 642   _committed -= bytes;
 643 }
 644 
 645 void ShenandoahHeap::increase_used(size_t bytes) {
 646   Atomic::add(&_used, bytes, memory_order_relaxed);
 647 }
 648 
 649 void ShenandoahHeap::set_used(size_t bytes) {
 650   Atomic::store(&_used, bytes);
 651 }
 652 
 653 void ShenandoahHeap::decrease_used(size_t bytes) {
 654   assert(used() >= bytes, "never decrease heap size by more than we've left");
 655   Atomic::sub(&_used, bytes, memory_order_relaxed);
 656 }
 657 
 658 void ShenandoahHeap::increase_allocated(size_t bytes) {
 659   Atomic::add(&_bytes_allocated_since_gc_start, bytes, memory_order_relaxed);
 660 }
 661 
 662 void ShenandoahHeap::notify_mutator_alloc_words(size_t words, bool waste) {
 663   size_t bytes = words * HeapWordSize;
 664   if (!waste) {
 665     increase_used(bytes);
 666   }
 667   increase_allocated(bytes);
 668   if (ShenandoahPacing) {
 669     control_thread()->pacing_notify_alloc(words);
 670     if (waste) {
 671       pacer()->claim_for_alloc(words, true);
 672     }
 673   }
 674 }
 675 
 676 size_t ShenandoahHeap::capacity() const {
 677   return committed();
 678 }
 679 
 680 size_t ShenandoahHeap::max_capacity() const {
 681   return _num_regions * ShenandoahHeapRegion::region_size_bytes();
 682 }
 683 
 684 size_t ShenandoahHeap::soft_max_capacity() const {
 685   size_t v = Atomic::load(&_soft_max_size);
 686   assert(min_capacity() <= v && v <= max_capacity(),
 687          "Should be in bounds: " SIZE_FORMAT " <= " SIZE_FORMAT " <= " SIZE_FORMAT,
 688          min_capacity(), v, max_capacity());
 689   return v;
 690 }
 691 
 692 void ShenandoahHeap::set_soft_max_capacity(size_t v) {
 693   assert(min_capacity() <= v && v <= max_capacity(),
 694          "Should be in bounds: " SIZE_FORMAT " <= " SIZE_FORMAT " <= " SIZE_FORMAT,
 695          min_capacity(), v, max_capacity());
 696   Atomic::store(&_soft_max_size, v);
 697 }
 698 
 699 size_t ShenandoahHeap::min_capacity() const {
 700   return _minimum_size;
 701 }
 702 
 703 size_t ShenandoahHeap::initial_capacity() const {
 704   return _initial_size;
 705 }
 706 
 707 bool ShenandoahHeap::is_in(const void* p) const {
 708   HeapWord* heap_base = (HeapWord*) base();
 709   HeapWord* last_region_end = heap_base + ShenandoahHeapRegion::region_size_words() * num_regions();
 710   return p >= heap_base && p < last_region_end;
 711 }
 712 
 713 void ShenandoahHeap::op_uncommit(double shrink_before, size_t shrink_until) {
 714   assert (ShenandoahUncommit, "should be enabled");
 715 
 716   // Application allocates from the beginning of the heap, and GC allocates at
 717   // the end of it. It is more efficient to uncommit from the end, so that applications
 718   // could enjoy the near committed regions. GC allocations are much less frequent,
 719   // and therefore can accept the committing costs.
 720 
 721   size_t count = 0;
 722   for (size_t i = num_regions(); i > 0; i--) { // care about size_t underflow
 723     ShenandoahHeapRegion* r = get_region(i - 1);
 724     if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
 725       ShenandoahHeapLocker locker(lock());
 726       if (r->is_empty_committed()) {
 727         if (committed() < shrink_until + ShenandoahHeapRegion::region_size_bytes()) {
 728           break;
 729         }
 730 
 731         r->make_uncommitted();
 732         count++;
 733       }
 734     }
 735     SpinPause(); // allow allocators to take the lock
 736   }
 737 
 738   if (count > 0) {
 739     control_thread()->notify_heap_changed();
 740   }
 741 }
 742 
 743 HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) {
 744   // New object should fit the GCLAB size
 745   size_t min_size = MAX2(size, PLAB::min_size());
 746 
 747   // Figure out size of new GCLAB, looking back at heuristics. Expand aggressively.
 748   size_t new_size = ShenandoahThreadLocalData::gclab_size(thread) * 2;
 749   new_size = MIN2(new_size, PLAB::max_size());
 750   new_size = MAX2(new_size, PLAB::min_size());
 751 
 752   // Record new heuristic value even if we take any shortcut. This captures
 753   // the case when moderately-sized objects always take a shortcut. At some point,
 754   // heuristics should catch up with them.
 755   ShenandoahThreadLocalData::set_gclab_size(thread, new_size);
 756 
 757   if (new_size < size) {
 758     // New size still does not fit the object. Fall back to shared allocation.
 759     // This avoids retiring perfectly good GCLABs, when we encounter a large object.
 760     return NULL;
 761   }
 762 
 763   // Retire current GCLAB, and allocate a new one.
 764   PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
 765   gclab->retire();
 766 
 767   size_t actual_size = 0;
 768   HeapWord* gclab_buf = allocate_new_gclab(min_size, new_size, &actual_size);
 769   if (gclab_buf == NULL) {
 770     return NULL;
 771   }
 772 
 773   assert (size <= actual_size, "allocation should fit");
 774 
 775   if (ZeroTLAB) {
 776     // ..and clear it.
 777     Copy::zero_to_words(gclab_buf, actual_size);
 778   } else {
 779     // ...and zap just allocated object.
 780 #ifdef ASSERT
 781     // Skip mangling the space corresponding to the object header to
 782     // ensure that the returned space is not considered parsable by
 783     // any concurrent GC thread.
 784     size_t hdr_size = oopDesc::header_size();
 785     Copy::fill_to_words(gclab_buf + hdr_size, actual_size - hdr_size, badHeapWordVal);
 786 #endif // ASSERT
 787   }
 788   gclab->set_buf(gclab_buf, actual_size);
 789   return gclab->allocate(size);
 790 }
 791 
 792 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t min_size,
 793                                             size_t requested_size,
 794                                             size_t* actual_size) {
 795   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_tlab(min_size, requested_size);
 796   HeapWord* res = allocate_memory(req);
 797   if (res != NULL) {
 798     *actual_size = req.actual_size();
 799   } else {
 800     *actual_size = 0;
 801   }
 802   return res;
 803 }
 804 
 805 HeapWord* ShenandoahHeap::allocate_new_gclab(size_t min_size,
 806                                              size_t word_size,
 807                                              size_t* actual_size) {
 808   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_gclab(min_size, word_size);
 809   HeapWord* res = allocate_memory(req);
 810   if (res != NULL) {
 811     *actual_size = req.actual_size();
 812   } else {
 813     *actual_size = 0;
 814   }
 815   return res;
 816 }
 817 
 818 HeapWord* ShenandoahHeap::allocate_memory(ShenandoahAllocRequest& req) {
 819   intptr_t pacer_epoch = 0;
 820   bool in_new_region = false;
 821   HeapWord* result = NULL;
 822 
 823   if (req.is_mutator_alloc()) {
 824     if (ShenandoahPacing) {
 825       pacer()->pace_for_alloc(req.size());
 826       pacer_epoch = pacer()->epoch();
 827     }
 828 
 829     if (!ShenandoahAllocFailureALot || !should_inject_alloc_failure()) {
 830       result = allocate_memory_under_lock(req, in_new_region);
 831     }
 832 
 833     // Allocation failed, block until control thread reacted, then retry allocation.
 834     //
 835     // It might happen that one of the threads requesting allocation would unblock
 836     // way later after GC happened, only to fail the second allocation, because
 837     // other threads have already depleted the free storage. In this case, a better
 838     // strategy is to try again, as long as GC makes progress.
 839     //
 840     // Then, we need to make sure the allocation was retried after at least one
 841     // Full GC, which means we want to try more than ShenandoahFullGCThreshold times.
 842 
 843     size_t tries = 0;
 844 
 845     while (result == NULL && _progress_last_gc.is_set()) {
 846       tries++;
 847       control_thread()->handle_alloc_failure(req);
 848       result = allocate_memory_under_lock(req, in_new_region);
 849     }
 850 
 851     while (result == NULL && tries <= ShenandoahFullGCThreshold) {
 852       tries++;
 853       control_thread()->handle_alloc_failure(req);
 854       result = allocate_memory_under_lock(req, in_new_region);
 855     }
 856 
 857   } else {
 858     assert(req.is_gc_alloc(), "Can only accept GC allocs here");
 859     result = allocate_memory_under_lock(req, in_new_region);
 860     // Do not call handle_alloc_failure() here, because we cannot block.
 861     // The allocation failure would be handled by the LRB slowpath with handle_alloc_failure_evac().
 862   }
 863 
 864   if (in_new_region) {
 865     control_thread()->notify_heap_changed();
 866   }
 867 
 868   if (result != NULL) {
 869     size_t requested = req.size();
 870     size_t actual = req.actual_size();
 871 
 872     assert (req.is_lab_alloc() || (requested == actual),
 873             "Only LAB allocations are elastic: %s, requested = " SIZE_FORMAT ", actual = " SIZE_FORMAT,
 874             ShenandoahAllocRequest::alloc_type_to_string(req.type()), requested, actual);
 875 
 876     if (req.is_mutator_alloc()) {
 877       notify_mutator_alloc_words(actual, false);
 878 
 879       // If we requested more than we were granted, give the rest back to pacer.
 880       // This only matters if we are in the same pacing epoch: do not try to unpace
 881       // over the budget for the other phase.
 882       if (ShenandoahPacing && (pacer_epoch > 0) && (requested > actual)) {
 883         pacer()->unpace_for_alloc(pacer_epoch, requested - actual);
 884       }
 885     } else {
 886       increase_used(actual*HeapWordSize);
 887     }
 888   }
 889 
 890   return result;
 891 }
 892 
 893 HeapWord* ShenandoahHeap::allocate_memory_under_lock(ShenandoahAllocRequest& req, bool& in_new_region) {
 894   ShenandoahHeapLocker locker(lock());
 895   return _free_set->allocate(req, in_new_region);
 896 }
 897 
 898 HeapWord* ShenandoahHeap::mem_allocate(size_t size,
 899                                         bool*  gc_overhead_limit_was_exceeded) {
 900   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared(size);
 901   return allocate_memory(req);
 902 }
 903 
 904 MetaWord* ShenandoahHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
 905                                                              size_t size,
 906                                                              Metaspace::MetadataType mdtype) {
 907   MetaWord* result;
 908 
 909   // Inform metaspace OOM to GC heuristics if class unloading is possible.
 910   if (heuristics()->can_unload_classes()) {
 911     ShenandoahHeuristics* h = heuristics();
 912     h->record_metaspace_oom();
 913   }
 914 
 915   // Expand and retry allocation
 916   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
 917   if (result != NULL) {
 918     return result;
 919   }
 920 
 921   // Start full GC
 922   collect(GCCause::_metadata_GC_clear_soft_refs);
 923 
 924   // Retry allocation
 925   result = loader_data->metaspace_non_null()->allocate(size, mdtype);
 926   if (result != NULL) {
 927     return result;
 928   }
 929 
 930   // Expand and retry allocation
 931   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
 932   if (result != NULL) {
 933     return result;
 934   }
 935 
 936   // Out of memory
 937   return NULL;
 938 }
 939 
 940 class ShenandoahConcurrentEvacuateRegionObjectClosure : public ObjectClosure {
 941 private:
 942   ShenandoahHeap* const _heap;
 943   Thread* const _thread;
 944 public:
 945   ShenandoahConcurrentEvacuateRegionObjectClosure(ShenandoahHeap* heap) :
 946     _heap(heap), _thread(Thread::current()) {}
 947 
 948   void do_object(oop p) {
 949     shenandoah_assert_marked(NULL, p);
 950     if (!p->is_forwarded()) {
 951       _heap->evacuate_object(p, _thread);
 952     }
 953   }
 954 };
 955 
 956 class ShenandoahEvacuationTask : public WorkerTask {
 957 private:
 958   ShenandoahHeap* const _sh;
 959   ShenandoahCollectionSet* const _cs;
 960   bool _concurrent;
 961 public:
 962   ShenandoahEvacuationTask(ShenandoahHeap* sh,
 963                            ShenandoahCollectionSet* cs,
 964                            bool concurrent) :
 965     WorkerTask("Shenandoah Evacuation"),
 966     _sh(sh),
 967     _cs(cs),
 968     _concurrent(concurrent)
 969   {}
 970 
 971   void work(uint worker_id) {
 972     if (_concurrent) {
 973       ShenandoahConcurrentWorkerSession worker_session(worker_id);
 974       ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
 975       ShenandoahEvacOOMScope oom_evac_scope;
 976       do_work();
 977     } else {
 978       ShenandoahParallelWorkerSession worker_session(worker_id);
 979       ShenandoahEvacOOMScope oom_evac_scope;
 980       do_work();
 981     }
 982   }
 983 
 984 private:
 985   void do_work() {
 986     ShenandoahConcurrentEvacuateRegionObjectClosure cl(_sh);
 987     ShenandoahHeapRegion* r;
 988     while ((r =_cs->claim_next()) != NULL) {
 989       assert(r->has_live(), "Region " SIZE_FORMAT " should have been reclaimed early", r->index());
 990       _sh->marked_object_iterate(r, &cl);
 991 
 992       if (ShenandoahPacing) {
 993         _sh->pacer()->report_evac(r->used() >> LogHeapWordSize);
 994       }
 995 
 996       if (_sh->check_cancelled_gc_and_yield(_concurrent)) {
 997         break;
 998       }
 999     }
1000   }
1001 };
1002 
1003 void ShenandoahHeap::evacuate_collection_set(bool concurrent) {
1004   ShenandoahEvacuationTask task(this, _collection_set, concurrent);
1005   workers()->run_task(&task);
1006 }
1007 
1008 void ShenandoahHeap::trash_cset_regions() {
1009   ShenandoahHeapLocker locker(lock());
1010 
1011   ShenandoahCollectionSet* set = collection_set();
1012   ShenandoahHeapRegion* r;
1013   set->clear_current_index();
1014   while ((r = set->next()) != NULL) {
1015     r->make_trash();
1016   }
1017   collection_set()->clear();
1018 }
1019 
1020 void ShenandoahHeap::print_heap_regions_on(outputStream* st) const {
1021   st->print_cr("Heap Regions:");
1022   st->print_cr("EU=empty-uncommitted, EC=empty-committed, R=regular, H=humongous start, HC=humongous continuation, CS=collection set, T=trash, P=pinned");
1023   st->print_cr("BTE=bottom/top/end, U=used, T=TLAB allocs, G=GCLAB allocs, S=shared allocs, L=live data");
1024   st->print_cr("R=root, CP=critical pins, TAMS=top-at-mark-start, UWM=update watermark");
1025   st->print_cr("SN=alloc sequence number");
1026 
1027   for (size_t i = 0; i < num_regions(); i++) {
1028     get_region(i)->print_on(st);
1029   }
1030 }
1031 
1032 void ShenandoahHeap::trash_humongous_region_at(ShenandoahHeapRegion* start) {
1033   assert(start->is_humongous_start(), "reclaim regions starting with the first one");
1034 
1035   oop humongous_obj = cast_to_oop(start->bottom());
1036   size_t size = humongous_obj->size();
1037   size_t required_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize);
1038   size_t index = start->index() + required_regions - 1;
1039 
1040   assert(!start->has_live(), "liveness must be zero");
1041 
1042   for(size_t i = 0; i < required_regions; i++) {
1043     // Reclaim from tail. Otherwise, assertion fails when printing region to trace log,
1044     // as it expects that every region belongs to a humongous region starting with a humongous start region.
1045     ShenandoahHeapRegion* region = get_region(index --);
1046 
1047     assert(region->is_humongous(), "expect correct humongous start or continuation");
1048     assert(!region->is_cset(), "Humongous region should not be in collection set");
1049 
1050     region->make_trash_immediate();
1051   }
1052 }
1053 
1054 class ShenandoahCheckCleanGCLABClosure : public ThreadClosure {
1055 public:
1056   ShenandoahCheckCleanGCLABClosure() {}
1057   void do_thread(Thread* thread) {
1058     PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1059     assert(gclab != NULL, "GCLAB should be initialized for %s", thread->name());
1060     assert(gclab->words_remaining() == 0, "GCLAB should not need retirement");
1061   }
1062 };
1063 
1064 class ShenandoahRetireGCLABClosure : public ThreadClosure {
1065 private:
1066   bool const _resize;
1067 public:
1068   ShenandoahRetireGCLABClosure(bool resize) : _resize(resize) {}
1069   void do_thread(Thread* thread) {
1070     PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1071     assert(gclab != NULL, "GCLAB should be initialized for %s", thread->name());
1072     gclab->retire();
1073     if (_resize && ShenandoahThreadLocalData::gclab_size(thread) > 0) {
1074       ShenandoahThreadLocalData::set_gclab_size(thread, 0);
1075     }
1076   }
1077 };
1078 
1079 void ShenandoahHeap::labs_make_parsable() {
1080   assert(UseTLAB, "Only call with UseTLAB");
1081 
1082   ShenandoahRetireGCLABClosure cl(false);
1083 
1084   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1085     ThreadLocalAllocBuffer& tlab = t->tlab();
1086     tlab.make_parsable();
1087     cl.do_thread(t);
1088   }
1089 
1090   workers()->threads_do(&cl);
1091 }
1092 
1093 void ShenandoahHeap::tlabs_retire(bool resize) {
1094   assert(UseTLAB, "Only call with UseTLAB");
1095   assert(!resize || ResizeTLAB, "Only call for resize when ResizeTLAB is enabled");
1096 
1097   ThreadLocalAllocStats stats;
1098 
1099   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1100     ThreadLocalAllocBuffer& tlab = t->tlab();
1101     tlab.retire(&stats);
1102     if (resize) {
1103       tlab.resize();
1104     }
1105   }
1106 
1107   stats.publish();
1108 
1109 #ifdef ASSERT
1110   ShenandoahCheckCleanGCLABClosure cl;
1111   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1112     cl.do_thread(t);
1113   }
1114   workers()->threads_do(&cl);
1115 #endif
1116 }
1117 
1118 void ShenandoahHeap::gclabs_retire(bool resize) {
1119   assert(UseTLAB, "Only call with UseTLAB");
1120   assert(!resize || ResizeTLAB, "Only call for resize when ResizeTLAB is enabled");
1121 
1122   ShenandoahRetireGCLABClosure cl(resize);
1123   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1124     cl.do_thread(t);
1125   }
1126   workers()->threads_do(&cl);
1127 
1128   if (safepoint_workers() != NULL) {
1129     safepoint_workers()->threads_do(&cl);
1130   }
1131 }
1132 
1133 // Returns size in bytes
1134 size_t ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const {
1135   if (ShenandoahElasticTLAB) {
1136     // With Elastic TLABs, return the max allowed size, and let the allocation path
1137     // figure out the safe size for current allocation.
1138     return ShenandoahHeapRegion::max_tlab_size_bytes();
1139   } else {
1140     return MIN2(_free_set->unsafe_peek_free(), ShenandoahHeapRegion::max_tlab_size_bytes());
1141   }
1142 }
1143 
1144 size_t ShenandoahHeap::max_tlab_size() const {
1145   // Returns size in words
1146   return ShenandoahHeapRegion::max_tlab_size_words();
1147 }
1148 
1149 void ShenandoahHeap::collect(GCCause::Cause cause) {
1150   control_thread()->request_gc(cause);
1151 }
1152 
1153 void ShenandoahHeap::do_full_collection(bool clear_all_soft_refs) {
1154   //assert(false, "Shouldn't need to do full collections");
1155 }
1156 
1157 HeapWord* ShenandoahHeap::block_start(const void* addr) const {
1158   ShenandoahHeapRegion* r = heap_region_containing(addr);
1159   if (r != NULL) {
1160     return r->block_start(addr);
1161   }
1162   return NULL;
1163 }
1164 
1165 bool ShenandoahHeap::block_is_obj(const HeapWord* addr) const {
1166   ShenandoahHeapRegion* r = heap_region_containing(addr);
1167   return r->block_is_obj(addr);
1168 }
1169 
1170 bool ShenandoahHeap::print_location(outputStream* st, void* addr) const {
1171   return BlockLocationPrinter<ShenandoahHeap>::print_location(st, addr);
1172 }
1173 
1174 void ShenandoahHeap::prepare_for_verify() {
1175   if (SafepointSynchronize::is_at_safepoint() && UseTLAB) {
1176     labs_make_parsable();
1177   }
1178 }
1179 
1180 void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const {
1181   workers()->threads_do(tcl);
1182   if (_safepoint_workers != NULL) {
1183     _safepoint_workers->threads_do(tcl);
1184   }
1185   if (ShenandoahStringDedup::is_enabled()) {
1186     ShenandoahStringDedup::threads_do(tcl);
1187   }
1188 }
1189 
1190 void ShenandoahHeap::print_tracing_info() const {
1191   LogTarget(Info, gc, stats) lt;
1192   if (lt.is_enabled()) {
1193     ResourceMark rm;
1194     LogStream ls(lt);
1195 
1196     phase_timings()->print_global_on(&ls);
1197 
1198     ls.cr();
1199     ls.cr();
1200 
1201     shenandoah_policy()->print_gc_stats(&ls);
1202 
1203     ls.cr();
1204     ls.cr();
1205   }
1206 }
1207 
1208 void ShenandoahHeap::verify(VerifyOption vo) {
1209   if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
1210     if (ShenandoahVerify) {
1211       verifier()->verify_generic(vo);
1212     } else {
1213       // TODO: Consider allocating verification bitmaps on demand,
1214       // and turn this on unconditionally.
1215     }
1216   }
1217 }
1218 size_t ShenandoahHeap::tlab_capacity(Thread *thr) const {
1219   return _free_set->capacity();
1220 }
1221 
1222 class ObjectIterateScanRootClosure : public BasicOopIterateClosure {
1223 private:
1224   MarkBitMap* _bitmap;
1225   ShenandoahScanObjectStack* _oop_stack;
1226   ShenandoahHeap* const _heap;
1227   ShenandoahMarkingContext* const _marking_context;
1228 
1229   template <class T>
1230   void do_oop_work(T* p) {
1231     T o = RawAccess<>::oop_load(p);
1232     if (!CompressedOops::is_null(o)) {
1233       oop obj = CompressedOops::decode_not_null(o);
1234       if (_heap->is_concurrent_weak_root_in_progress() && !_marking_context->is_marked(obj)) {
1235         // There may be dead oops in weak roots in concurrent root phase, do not touch them.
1236         return;
1237       }
1238       obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
1239 
1240       assert(oopDesc::is_oop(obj), "must be a valid oop");
1241       if (!_bitmap->is_marked(obj)) {
1242         _bitmap->mark(obj);
1243         _oop_stack->push(obj);
1244       }
1245     }
1246   }
1247 public:
1248   ObjectIterateScanRootClosure(MarkBitMap* bitmap, ShenandoahScanObjectStack* oop_stack) :
1249     _bitmap(bitmap), _oop_stack(oop_stack), _heap(ShenandoahHeap::heap()),
1250     _marking_context(_heap->marking_context()) {}
1251   void do_oop(oop* p)       { do_oop_work(p); }
1252   void do_oop(narrowOop* p) { do_oop_work(p); }
1253 };
1254 
1255 /*
1256  * This is public API, used in preparation of object_iterate().
1257  * Since we don't do linear scan of heap in object_iterate() (see comment below), we don't
1258  * need to make the heap parsable. For Shenandoah-internal linear heap scans that we can
1259  * control, we call SH::tlabs_retire, SH::gclabs_retire.
1260  */
1261 void ShenandoahHeap::ensure_parsability(bool retire_tlabs) {
1262   // No-op.
1263 }
1264 
1265 /*
1266  * Iterates objects in the heap. This is public API, used for, e.g., heap dumping.
1267  *
1268  * We cannot safely iterate objects by doing a linear scan at random points in time. Linear
1269  * scanning needs to deal with dead objects, which may have dead Klass* pointers (e.g.
1270  * calling oopDesc::size() would crash) or dangling reference fields (crashes) etc. Linear
1271  * scanning therefore depends on having a valid marking bitmap to support it. However, we only
1272  * have a valid marking bitmap after successful marking. In particular, we *don't* have a valid
1273  * marking bitmap during marking, after aborted marking or during/after cleanup (when we just
1274  * wiped the bitmap in preparation for next marking).
1275  *
1276  * For all those reasons, we implement object iteration as a single marking traversal, reporting
1277  * objects as we mark+traverse through the heap, starting from GC roots. JVMTI IterateThroughHeap
1278  * is allowed to report dead objects, but is not required to do so.
1279  */
1280 void ShenandoahHeap::object_iterate(ObjectClosure* cl) {
1281   // Reset bitmap
1282   if (!prepare_aux_bitmap_for_iteration())
1283     return;
1284 
1285   ShenandoahScanObjectStack oop_stack;
1286   ObjectIterateScanRootClosure oops(&_aux_bit_map, &oop_stack);
1287   // Seed the stack with root scan
1288   scan_roots_for_iteration(&oop_stack, &oops);
1289 
1290   // Work through the oop stack to traverse heap
1291   while (! oop_stack.is_empty()) {
1292     oop obj = oop_stack.pop();
1293     assert(oopDesc::is_oop(obj), "must be a valid oop");
1294     cl->do_object(obj);
1295     obj->oop_iterate(&oops);
1296   }
1297 
1298   assert(oop_stack.is_empty(), "should be empty");
1299   // Reclaim bitmap
1300   reclaim_aux_bitmap_for_iteration();
1301 }
1302 
1303 bool ShenandoahHeap::prepare_aux_bitmap_for_iteration() {
1304   assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1305 
1306   if (!_aux_bitmap_region_special && !os::commit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size(), false)) {
1307     log_warning(gc)("Could not commit native memory for auxiliary marking bitmap for heap iteration");
1308     return false;
1309   }
1310   // Reset bitmap
1311   _aux_bit_map.clear();
1312   return true;
1313 }
1314 
1315 void ShenandoahHeap::scan_roots_for_iteration(ShenandoahScanObjectStack* oop_stack, ObjectIterateScanRootClosure* oops) {
1316   // Process GC roots according to current GC cycle
1317   // This populates the work stack with initial objects
1318   // It is important to relinquish the associated locks before diving
1319   // into heap dumper
1320   uint n_workers = safepoint_workers() != NULL ? safepoint_workers()->active_workers() : 1;
1321   ShenandoahHeapIterationRootScanner rp(n_workers);
1322   rp.roots_do(oops);
1323 }
1324 
1325 void ShenandoahHeap::reclaim_aux_bitmap_for_iteration() {
1326   if (!_aux_bitmap_region_special && !os::uncommit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size())) {
1327     log_warning(gc)("Could not uncommit native memory for auxiliary marking bitmap for heap iteration");
1328   }
1329 }
1330 
1331 // Closure for parallelly iterate objects
1332 class ShenandoahObjectIterateParScanClosure : public BasicOopIterateClosure {
1333 private:
1334   MarkBitMap* _bitmap;
1335   ShenandoahObjToScanQueue* _queue;
1336   ShenandoahHeap* const _heap;
1337   ShenandoahMarkingContext* const _marking_context;
1338 
1339   template <class T>
1340   void do_oop_work(T* p) {
1341     T o = RawAccess<>::oop_load(p);
1342     if (!CompressedOops::is_null(o)) {
1343       oop obj = CompressedOops::decode_not_null(o);
1344       if (_heap->is_concurrent_weak_root_in_progress() && !_marking_context->is_marked(obj)) {
1345         // There may be dead oops in weak roots in concurrent root phase, do not touch them.
1346         return;
1347       }
1348       obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
1349 
1350       assert(oopDesc::is_oop(obj), "Must be a valid oop");
1351       if (_bitmap->par_mark(obj)) {
1352         _queue->push(ShenandoahMarkTask(obj));
1353       }
1354     }
1355   }
1356 public:
1357   ShenandoahObjectIterateParScanClosure(MarkBitMap* bitmap, ShenandoahObjToScanQueue* q) :
1358     _bitmap(bitmap), _queue(q), _heap(ShenandoahHeap::heap()),
1359     _marking_context(_heap->marking_context()) {}
1360   void do_oop(oop* p)       { do_oop_work(p); }
1361   void do_oop(narrowOop* p) { do_oop_work(p); }
1362 };
1363 
1364 // Object iterator for parallel heap iteraion.
1365 // The root scanning phase happenes in construction as a preparation of
1366 // parallel marking queues.
1367 // Every worker processes it's own marking queue. work-stealing is used
1368 // to balance workload.
1369 class ShenandoahParallelObjectIterator : public ParallelObjectIterator {
1370 private:
1371   uint                         _num_workers;
1372   bool                         _init_ready;
1373   MarkBitMap*                  _aux_bit_map;
1374   ShenandoahHeap*              _heap;
1375   ShenandoahScanObjectStack    _roots_stack; // global roots stack
1376   ShenandoahObjToScanQueueSet* _task_queues;
1377 public:
1378   ShenandoahParallelObjectIterator(uint num_workers, MarkBitMap* bitmap) :
1379         _num_workers(num_workers),
1380         _init_ready(false),
1381         _aux_bit_map(bitmap),
1382         _heap(ShenandoahHeap::heap()) {
1383     // Initialize bitmap
1384     _init_ready = _heap->prepare_aux_bitmap_for_iteration();
1385     if (!_init_ready) {
1386       return;
1387     }
1388 
1389     ObjectIterateScanRootClosure oops(_aux_bit_map, &_roots_stack);
1390     _heap->scan_roots_for_iteration(&_roots_stack, &oops);
1391 
1392     _init_ready = prepare_worker_queues();
1393   }
1394 
1395   ~ShenandoahParallelObjectIterator() {
1396     // Reclaim bitmap
1397     _heap->reclaim_aux_bitmap_for_iteration();
1398     // Reclaim queue for workers
1399     if (_task_queues!= NULL) {
1400       for (uint i = 0; i < _num_workers; ++i) {
1401         ShenandoahObjToScanQueue* q = _task_queues->queue(i);
1402         if (q != NULL) {
1403           delete q;
1404           _task_queues->register_queue(i, NULL);
1405         }
1406       }
1407       delete _task_queues;
1408       _task_queues = NULL;
1409     }
1410   }
1411 
1412   virtual void object_iterate(ObjectClosure* cl, uint worker_id) {
1413     if (_init_ready) {
1414       object_iterate_parallel(cl, worker_id, _task_queues);
1415     }
1416   }
1417 
1418 private:
1419   // Divide global root_stack into worker queues
1420   bool prepare_worker_queues() {
1421     _task_queues = new ShenandoahObjToScanQueueSet((int) _num_workers);
1422     // Initialize queues for every workers
1423     for (uint i = 0; i < _num_workers; ++i) {
1424       ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue();
1425       _task_queues->register_queue(i, task_queue);
1426     }
1427     // Divide roots among the workers. Assume that object referencing distribution
1428     // is related with root kind, use round-robin to make every worker have same chance
1429     // to process every kind of roots
1430     size_t roots_num = _roots_stack.size();
1431     if (roots_num == 0) {
1432       // No work to do
1433       return false;
1434     }
1435 
1436     for (uint j = 0; j < roots_num; j++) {
1437       uint stack_id = j % _num_workers;
1438       oop obj = _roots_stack.pop();
1439       _task_queues->queue(stack_id)->push(ShenandoahMarkTask(obj));
1440     }
1441     return true;
1442   }
1443 
1444   void object_iterate_parallel(ObjectClosure* cl,
1445                                uint worker_id,
1446                                ShenandoahObjToScanQueueSet* queue_set) {
1447     assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1448     assert(queue_set != NULL, "task queue must not be NULL");
1449 
1450     ShenandoahObjToScanQueue* q = queue_set->queue(worker_id);
1451     assert(q != NULL, "object iterate queue must not be NULL");
1452 
1453     ShenandoahMarkTask t;
1454     ShenandoahObjectIterateParScanClosure oops(_aux_bit_map, q);
1455 
1456     // Work through the queue to traverse heap.
1457     // Steal when there is no task in queue.
1458     while (q->pop(t) || queue_set->steal(worker_id, t)) {
1459       oop obj = t.obj();
1460       assert(oopDesc::is_oop(obj), "must be a valid oop");
1461       cl->do_object(obj);
1462       obj->oop_iterate(&oops);
1463     }
1464     assert(q->is_empty(), "should be empty");
1465   }
1466 };
1467 
1468 ParallelObjectIterator* ShenandoahHeap::parallel_object_iterator(uint workers) {
1469   return new ShenandoahParallelObjectIterator(workers, &_aux_bit_map);
1470 }
1471 
1472 // Keep alive an object that was loaded with AS_NO_KEEPALIVE.
1473 void ShenandoahHeap::keep_alive(oop obj) {
1474   if (is_concurrent_mark_in_progress() && (obj != NULL)) {
1475     ShenandoahBarrierSet::barrier_set()->enqueue(obj);
1476   }
1477 }
1478 
1479 void ShenandoahHeap::heap_region_iterate(ShenandoahHeapRegionClosure* blk) const {
1480   for (size_t i = 0; i < num_regions(); i++) {
1481     ShenandoahHeapRegion* current = get_region(i);
1482     blk->heap_region_do(current);
1483   }
1484 }
1485 
1486 class ShenandoahParallelHeapRegionTask : public WorkerTask {
1487 private:
1488   ShenandoahHeap* const _heap;
1489   ShenandoahHeapRegionClosure* const _blk;
1490 
1491   shenandoah_padding(0);
1492   volatile size_t _index;
1493   shenandoah_padding(1);
1494 
1495 public:
1496   ShenandoahParallelHeapRegionTask(ShenandoahHeapRegionClosure* blk) :
1497           WorkerTask("Shenandoah Parallel Region Operation"),
1498           _heap(ShenandoahHeap::heap()), _blk(blk), _index(0) {}
1499 
1500   void work(uint worker_id) {
1501     ShenandoahParallelWorkerSession worker_session(worker_id);
1502     size_t stride = ShenandoahParallelRegionStride;
1503 
1504     size_t max = _heap->num_regions();
1505     while (Atomic::load(&_index) < max) {
1506       size_t cur = Atomic::fetch_and_add(&_index, stride, memory_order_relaxed);
1507       size_t start = cur;
1508       size_t end = MIN2(cur + stride, max);
1509       if (start >= max) break;
1510 
1511       for (size_t i = cur; i < end; i++) {
1512         ShenandoahHeapRegion* current = _heap->get_region(i);
1513         _blk->heap_region_do(current);
1514       }
1515     }
1516   }
1517 };
1518 
1519 void ShenandoahHeap::parallel_heap_region_iterate(ShenandoahHeapRegionClosure* blk) const {
1520   assert(blk->is_thread_safe(), "Only thread-safe closures here");
1521   if (num_regions() > ShenandoahParallelRegionStride) {
1522     ShenandoahParallelHeapRegionTask task(blk);
1523     workers()->run_task(&task);
1524   } else {
1525     heap_region_iterate(blk);
1526   }
1527 }
1528 
1529 class ShenandoahInitMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
1530 private:
1531   ShenandoahMarkingContext* const _ctx;
1532 public:
1533   ShenandoahInitMarkUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
1534 
1535   void heap_region_do(ShenandoahHeapRegion* r) {
1536     assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index());
1537     if (r->is_active()) {
1538       // Check if region needs updating its TAMS. We have updated it already during concurrent
1539       // reset, so it is very likely we don't need to do another write here.
1540       if (_ctx->top_at_mark_start(r) != r->top()) {
1541         _ctx->capture_top_at_mark_start(r);
1542       }
1543     } else {
1544       assert(_ctx->top_at_mark_start(r) == r->top(),
1545              "Region " SIZE_FORMAT " should already have correct TAMS", r->index());
1546     }
1547   }
1548 
1549   bool is_thread_safe() { return true; }
1550 };
1551 
1552 class ShenandoahRendezvousClosure : public HandshakeClosure {
1553 public:
1554   inline ShenandoahRendezvousClosure() : HandshakeClosure("ShenandoahRendezvous") {}
1555   inline void do_thread(Thread* thread) {}
1556 };
1557 
1558 void ShenandoahHeap::rendezvous_threads() {
1559   ShenandoahRendezvousClosure cl;
1560   Handshake::execute(&cl);
1561 }
1562 
1563 void ShenandoahHeap::recycle_trash() {
1564   free_set()->recycle_trash();
1565 }
1566 
1567 class ShenandoahResetUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
1568 private:
1569   ShenandoahMarkingContext* const _ctx;
1570 public:
1571   ShenandoahResetUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
1572 
1573   void heap_region_do(ShenandoahHeapRegion* r) {
1574     if (r->is_active()) {
1575       // Reset live data and set TAMS optimistically. We would recheck these under the pause
1576       // anyway to capture any updates that happened since now.
1577       r->clear_live_data();
1578       _ctx->capture_top_at_mark_start(r);
1579     }
1580   }
1581 
1582   bool is_thread_safe() { return true; }
1583 };
1584 
1585 void ShenandoahHeap::prepare_gc() {
1586   reset_mark_bitmap();
1587 
1588   ShenandoahResetUpdateRegionStateClosure cl;
1589   parallel_heap_region_iterate(&cl);
1590 }
1591 
1592 class ShenandoahFinalMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
1593 private:
1594   ShenandoahMarkingContext* const _ctx;
1595   ShenandoahHeapLock* const _lock;
1596 
1597 public:
1598   ShenandoahFinalMarkUpdateRegionStateClosure() :
1599     _ctx(ShenandoahHeap::heap()->complete_marking_context()), _lock(ShenandoahHeap::heap()->lock()) {}
1600 
1601   void heap_region_do(ShenandoahHeapRegion* r) {
1602     if (r->is_active()) {
1603       // All allocations past TAMS are implicitly live, adjust the region data.
1604       // Bitmaps/TAMS are swapped at this point, so we need to poll complete bitmap.
1605       HeapWord *tams = _ctx->top_at_mark_start(r);
1606       HeapWord *top = r->top();
1607       if (top > tams) {
1608         r->increase_live_data_alloc_words(pointer_delta(top, tams));
1609       }
1610 
1611       // We are about to select the collection set, make sure it knows about
1612       // current pinning status. Also, this allows trashing more regions that
1613       // now have their pinning status dropped.
1614       if (r->is_pinned()) {
1615         if (r->pin_count() == 0) {
1616           ShenandoahHeapLocker locker(_lock);
1617           r->make_unpinned();
1618         }
1619       } else {
1620         if (r->pin_count() > 0) {
1621           ShenandoahHeapLocker locker(_lock);
1622           r->make_pinned();
1623         }
1624       }
1625 
1626       // Remember limit for updating refs. It's guaranteed that we get no
1627       // from-space-refs written from here on.
1628       r->set_update_watermark_at_safepoint(r->top());
1629     } else {
1630       assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index());
1631       assert(_ctx->top_at_mark_start(r) == r->top(),
1632              "Region " SIZE_FORMAT " should have correct TAMS", r->index());
1633     }
1634   }
1635 
1636   bool is_thread_safe() { return true; }
1637 };
1638 
1639 void ShenandoahHeap::prepare_regions_and_collection_set(bool concurrent) {
1640   assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
1641   {
1642     ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::final_update_region_states :
1643                                          ShenandoahPhaseTimings::degen_gc_final_update_region_states);
1644     ShenandoahFinalMarkUpdateRegionStateClosure cl;
1645     parallel_heap_region_iterate(&cl);
1646 
1647     assert_pinned_region_status();
1648   }
1649 
1650   {
1651     ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::choose_cset :
1652                                          ShenandoahPhaseTimings::degen_gc_choose_cset);
1653     ShenandoahHeapLocker locker(lock());
1654     _collection_set->clear();
1655     heuristics()->choose_collection_set(_collection_set);
1656   }
1657 
1658   {
1659     ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::final_rebuild_freeset :
1660                                          ShenandoahPhaseTimings::degen_gc_final_rebuild_freeset);
1661     ShenandoahHeapLocker locker(lock());
1662     _free_set->rebuild();
1663   }
1664 }
1665 
1666 void ShenandoahHeap::do_class_unloading() {
1667   _unloader.unload();
1668 }
1669 
1670 void ShenandoahHeap::stw_weak_refs(bool full_gc) {
1671   // Weak refs processing
1672   ShenandoahPhaseTimings::Phase phase = full_gc ? ShenandoahPhaseTimings::full_gc_weakrefs
1673                                                 : ShenandoahPhaseTimings::degen_gc_weakrefs;
1674   ShenandoahTimingsTracker t(phase);
1675   ShenandoahGCWorkerPhase worker_phase(phase);
1676   ref_processor()->process_references(phase, workers(), false /* concurrent */);
1677 }
1678 
1679 void ShenandoahHeap::prepare_update_heap_references(bool concurrent) {
1680   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
1681 
1682   // Evacuation is over, no GCLABs are needed anymore. GCLABs are under URWM, so we need to
1683   // make them parsable for update code to work correctly. Plus, we can compute new sizes
1684   // for future GCLABs here.
1685   if (UseTLAB) {
1686     ShenandoahGCPhase phase(concurrent ?
1687                             ShenandoahPhaseTimings::init_update_refs_manage_gclabs :
1688                             ShenandoahPhaseTimings::degen_gc_init_update_refs_manage_gclabs);
1689     gclabs_retire(ResizeTLAB);
1690   }
1691 
1692   _update_refs_iterator.reset();
1693 }
1694 
1695 void ShenandoahHeap::set_gc_state_all_threads(char state) {
1696   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1697     ShenandoahThreadLocalData::set_gc_state(t, state);
1698   }
1699 }
1700 
1701 void ShenandoahHeap::set_gc_state_mask(uint mask, bool value) {
1702   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should really be Shenandoah safepoint");
1703   _gc_state.set_cond(mask, value);
1704   set_gc_state_all_threads(_gc_state.raw_value());
1705 }
1706 
1707 void ShenandoahHeap::set_concurrent_mark_in_progress(bool in_progress) {
1708   assert(!has_forwarded_objects(), "Not expected before/after mark phase");
1709   set_gc_state_mask(MARKING, in_progress);
1710   ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
1711 }
1712 
1713 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
1714   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only call this at safepoint");
1715   set_gc_state_mask(EVACUATION, in_progress);
1716 }
1717 
1718 void ShenandoahHeap::set_concurrent_strong_root_in_progress(bool in_progress) {
1719   if (in_progress) {
1720     _concurrent_strong_root_in_progress.set();
1721   } else {
1722     _concurrent_strong_root_in_progress.unset();
1723   }
1724 }
1725 
1726 void ShenandoahHeap::set_concurrent_weak_root_in_progress(bool cond) {
1727   set_gc_state_mask(WEAK_ROOTS, cond);
1728 }
1729 
1730 GCTracer* ShenandoahHeap::tracer() {
1731   return shenandoah_policy()->tracer();
1732 }
1733 
1734 size_t ShenandoahHeap::tlab_used(Thread* thread) const {
1735   return _free_set->used();
1736 }
1737 
1738 bool ShenandoahHeap::try_cancel_gc() {
1739   while (true) {
1740     jbyte prev = _cancelled_gc.cmpxchg(CANCELLED, CANCELLABLE);
1741     if (prev == CANCELLABLE) return true;
1742     else if (prev == CANCELLED) return false;
1743     assert(ShenandoahSuspendibleWorkers, "should not get here when not using suspendible workers");
1744     assert(prev == NOT_CANCELLED, "must be NOT_CANCELLED");
1745     Thread* thread = Thread::current();
1746     if (thread->is_Java_thread()) {
1747       // We need to provide a safepoint here, otherwise we might
1748       // spin forever if a SP is pending.
1749       ThreadBlockInVM sp(JavaThread::cast(thread));
1750       SpinPause();
1751     }
1752   }
1753 }
1754 
1755 void ShenandoahHeap::cancel_gc(GCCause::Cause cause) {
1756   if (try_cancel_gc()) {
1757     FormatBuffer<> msg("Cancelling GC: %s", GCCause::to_string(cause));
1758     log_info(gc)("%s", msg.buffer());
1759     Events::log(Thread::current(), "%s", msg.buffer());
1760   }
1761 }
1762 
1763 uint ShenandoahHeap::max_workers() {
1764   return _max_workers;
1765 }
1766 
1767 void ShenandoahHeap::stop() {
1768   // The shutdown sequence should be able to terminate when GC is running.
1769 
1770   // Step 0. Notify policy to disable event recording.
1771   _shenandoah_policy->record_shutdown();
1772 
1773   // Step 1. Notify control thread that we are in shutdown.
1774   // Note that we cannot do that with stop(), because stop() is blocking and waits for the actual shutdown.
1775   // Doing stop() here would wait for the normal GC cycle to complete, never falling through to cancel below.
1776   control_thread()->prepare_for_graceful_shutdown();
1777 
1778   // Step 2. Notify GC workers that we are cancelling GC.
1779   cancel_gc(GCCause::_shenandoah_stop_vm);
1780 
1781   // Step 3. Wait until GC worker exits normally.
1782   control_thread()->stop();
1783 }
1784 
1785 void ShenandoahHeap::stw_unload_classes(bool full_gc) {
1786   if (!unload_classes()) return;
1787   // Unload classes and purge SystemDictionary.
1788   {
1789     ShenandoahPhaseTimings::Phase phase = full_gc ?
1790                                           ShenandoahPhaseTimings::full_gc_purge_class_unload :
1791                                           ShenandoahPhaseTimings::degen_gc_purge_class_unload;
1792     ShenandoahGCPhase gc_phase(phase);
1793     ShenandoahGCWorkerPhase worker_phase(phase);
1794     bool purged_class = SystemDictionary::do_unloading(gc_timer());
1795 
1796     ShenandoahIsAliveSelector is_alive;
1797     uint num_workers = _workers->active_workers();
1798     ShenandoahClassUnloadingTask unlink_task(phase, is_alive.is_alive_closure(), num_workers, purged_class);
1799     _workers->run_task(&unlink_task);
1800   }
1801 
1802   {
1803     ShenandoahGCPhase phase(full_gc ?
1804                             ShenandoahPhaseTimings::full_gc_purge_cldg :
1805                             ShenandoahPhaseTimings::degen_gc_purge_cldg);
1806     ClassLoaderDataGraph::purge(/*at_safepoint*/true);
1807   }
1808   // Resize and verify metaspace
1809   MetaspaceGC::compute_new_size();
1810   DEBUG_ONLY(MetaspaceUtils::verify();)
1811 }
1812 
1813 // Weak roots are either pre-evacuated (final mark) or updated (final updaterefs),
1814 // so they should not have forwarded oops.
1815 // However, we do need to "null" dead oops in the roots, if can not be done
1816 // in concurrent cycles.
1817 void ShenandoahHeap::stw_process_weak_roots(bool full_gc) {
1818   uint num_workers = _workers->active_workers();
1819   ShenandoahPhaseTimings::Phase timing_phase = full_gc ?
1820                                                ShenandoahPhaseTimings::full_gc_purge_weak_par :
1821                                                ShenandoahPhaseTimings::degen_gc_purge_weak_par;
1822   ShenandoahGCPhase phase(timing_phase);
1823   ShenandoahGCWorkerPhase worker_phase(timing_phase);
1824   // Cleanup weak roots
1825   if (has_forwarded_objects()) {
1826     ShenandoahForwardedIsAliveClosure is_alive;
1827     ShenandoahUpdateRefsClosure keep_alive;
1828     ShenandoahParallelWeakRootsCleaningTask<ShenandoahForwardedIsAliveClosure, ShenandoahUpdateRefsClosure>
1829       cleaning_task(timing_phase, &is_alive, &keep_alive, num_workers);
1830     _workers->run_task(&cleaning_task);
1831   } else {
1832     ShenandoahIsAliveClosure is_alive;
1833 #ifdef ASSERT
1834     ShenandoahAssertNotForwardedClosure verify_cl;
1835     ShenandoahParallelWeakRootsCleaningTask<ShenandoahIsAliveClosure, ShenandoahAssertNotForwardedClosure>
1836       cleaning_task(timing_phase, &is_alive, &verify_cl, num_workers);
1837 #else
1838     ShenandoahParallelWeakRootsCleaningTask<ShenandoahIsAliveClosure, DoNothingClosure>
1839       cleaning_task(timing_phase, &is_alive, &do_nothing_cl, num_workers);
1840 #endif
1841     _workers->run_task(&cleaning_task);
1842   }
1843 }
1844 
1845 void ShenandoahHeap::parallel_cleaning(bool full_gc) {
1846   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
1847   assert(is_stw_gc_in_progress(), "Only for Degenerated and Full GC");
1848   ShenandoahGCPhase phase(full_gc ?
1849                           ShenandoahPhaseTimings::full_gc_purge :
1850                           ShenandoahPhaseTimings::degen_gc_purge);
1851   stw_weak_refs(full_gc);
1852   stw_process_weak_roots(full_gc);
1853   stw_unload_classes(full_gc);
1854 }
1855 
1856 void ShenandoahHeap::set_has_forwarded_objects(bool cond) {
1857   set_gc_state_mask(HAS_FORWARDED, cond);
1858 }
1859 
1860 void ShenandoahHeap::set_unload_classes(bool uc) {
1861   _unload_classes.set_cond(uc);
1862 }
1863 
1864 bool ShenandoahHeap::unload_classes() const {
1865   return _unload_classes.is_set();
1866 }
1867 
1868 address ShenandoahHeap::in_cset_fast_test_addr() {
1869   ShenandoahHeap* heap = ShenandoahHeap::heap();
1870   assert(heap->collection_set() != NULL, "Sanity");
1871   return (address) heap->collection_set()->biased_map_address();
1872 }
1873 
1874 address ShenandoahHeap::cancelled_gc_addr() {
1875   return (address) ShenandoahHeap::heap()->_cancelled_gc.addr_of();
1876 }
1877 
1878 address ShenandoahHeap::gc_state_addr() {
1879   return (address) ShenandoahHeap::heap()->_gc_state.addr_of();
1880 }
1881 
1882 size_t ShenandoahHeap::bytes_allocated_since_gc_start() {
1883   return Atomic::load(&_bytes_allocated_since_gc_start);
1884 }
1885 
1886 void ShenandoahHeap::reset_bytes_allocated_since_gc_start() {
1887   Atomic::store(&_bytes_allocated_since_gc_start, (size_t)0);
1888 }
1889 
1890 void ShenandoahHeap::set_degenerated_gc_in_progress(bool in_progress) {
1891   _degenerated_gc_in_progress.set_cond(in_progress);
1892 }
1893 
1894 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) {
1895   _full_gc_in_progress.set_cond(in_progress);
1896 }
1897 
1898 void ShenandoahHeap::set_full_gc_move_in_progress(bool in_progress) {
1899   assert (is_full_gc_in_progress(), "should be");
1900   _full_gc_move_in_progress.set_cond(in_progress);
1901 }
1902 
1903 void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) {
1904   set_gc_state_mask(UPDATEREFS, in_progress);
1905 }
1906 
1907 void ShenandoahHeap::register_nmethod(nmethod* nm) {
1908   ShenandoahCodeRoots::register_nmethod(nm);
1909 }
1910 
1911 void ShenandoahHeap::unregister_nmethod(nmethod* nm) {
1912   ShenandoahCodeRoots::unregister_nmethod(nm);
1913 }
1914 
1915 void ShenandoahHeap::flush_nmethod(nmethod* nm) {
1916   ShenandoahCodeRoots::flush_nmethod(nm);
1917 }
1918 
1919 oop ShenandoahHeap::pin_object(JavaThread* thr, oop o) {
1920   heap_region_containing(o)->record_pin();
1921   return o;
1922 }
1923 
1924 void ShenandoahHeap::unpin_object(JavaThread* thr, oop o) {
1925   ShenandoahHeapRegion* r = heap_region_containing(o);
1926   assert(r != NULL, "Sanity");
1927   assert(r->pin_count() > 0, "Region " SIZE_FORMAT " should have non-zero pins", r->index());
1928   r->record_unpin();
1929 }
1930 
1931 void ShenandoahHeap::sync_pinned_region_status() {
1932   ShenandoahHeapLocker locker(lock());
1933 
1934   for (size_t i = 0; i < num_regions(); i++) {
1935     ShenandoahHeapRegion *r = get_region(i);
1936     if (r->is_active()) {
1937       if (r->is_pinned()) {
1938         if (r->pin_count() == 0) {
1939           r->make_unpinned();
1940         }
1941       } else {
1942         if (r->pin_count() > 0) {
1943           r->make_pinned();
1944         }
1945       }
1946     }
1947   }
1948 
1949   assert_pinned_region_status();
1950 }
1951 
1952 #ifdef ASSERT
1953 void ShenandoahHeap::assert_pinned_region_status() {
1954   for (size_t i = 0; i < num_regions(); i++) {
1955     ShenandoahHeapRegion* r = get_region(i);
1956     assert((r->is_pinned() && r->pin_count() > 0) || (!r->is_pinned() && r->pin_count() == 0),
1957            "Region " SIZE_FORMAT " pinning status is inconsistent", i);
1958   }
1959 }
1960 #endif
1961 
1962 ConcurrentGCTimer* ShenandoahHeap::gc_timer() const {
1963   return _gc_timer;
1964 }
1965 
1966 void ShenandoahHeap::prepare_concurrent_roots() {
1967   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
1968   assert(!is_stw_gc_in_progress(), "Only concurrent GC");
1969   set_concurrent_strong_root_in_progress(!collection_set()->is_empty());
1970   set_concurrent_weak_root_in_progress(true);
1971   if (unload_classes()) {
1972     _unloader.prepare();
1973   }
1974 }
1975 
1976 void ShenandoahHeap::finish_concurrent_roots() {
1977   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
1978   assert(!is_stw_gc_in_progress(), "Only concurrent GC");
1979   if (unload_classes()) {
1980     _unloader.finish();
1981   }
1982 }
1983 
1984 #ifdef ASSERT
1985 void ShenandoahHeap::assert_gc_workers(uint nworkers) {
1986   assert(nworkers > 0 && nworkers <= max_workers(), "Sanity");
1987 
1988   if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
1989     if (UseDynamicNumberOfGCThreads) {
1990       assert(nworkers <= ParallelGCThreads, "Cannot use more than it has");
1991     } else {
1992       // Use ParallelGCThreads inside safepoints
1993       assert(nworkers == ParallelGCThreads, "Use ParallelGCThreads within safepoints");
1994     }
1995   } else {
1996     if (UseDynamicNumberOfGCThreads) {
1997       assert(nworkers <= ConcGCThreads, "Cannot use more than it has");
1998     } else {
1999       // Use ConcGCThreads outside safepoints
2000       assert(nworkers == ConcGCThreads, "Use ConcGCThreads outside safepoints");
2001     }
2002   }
2003 }
2004 #endif
2005 
2006 ShenandoahVerifier* ShenandoahHeap::verifier() {
2007   guarantee(ShenandoahVerify, "Should be enabled");
2008   assert (_verifier != NULL, "sanity");
2009   return _verifier;
2010 }
2011 
2012 template<bool CONCURRENT>
2013 class ShenandoahUpdateHeapRefsTask : public WorkerTask {
2014 private:
2015   ShenandoahHeap* _heap;
2016   ShenandoahRegionIterator* _regions;
2017 public:
2018   ShenandoahUpdateHeapRefsTask(ShenandoahRegionIterator* regions) :
2019     WorkerTask("Shenandoah Update References"),
2020     _heap(ShenandoahHeap::heap()),
2021     _regions(regions) {
2022   }
2023 
2024   void work(uint worker_id) {
2025     if (CONCURRENT) {
2026       ShenandoahConcurrentWorkerSession worker_session(worker_id);
2027       ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
2028       do_work<ShenandoahConcUpdateRefsClosure>();
2029     } else {
2030       ShenandoahParallelWorkerSession worker_session(worker_id);
2031       do_work<ShenandoahSTWUpdateRefsClosure>();
2032     }
2033   }
2034 
2035 private:
2036   template<class T>
2037   void do_work() {
2038     T cl;
2039     ShenandoahHeapRegion* r = _regions->next();
2040     ShenandoahMarkingContext* const ctx = _heap->complete_marking_context();
2041     while (r != NULL) {
2042       HeapWord* update_watermark = r->get_update_watermark();
2043       assert (update_watermark >= r->bottom(), "sanity");
2044       if (r->is_active() && !r->is_cset()) {
2045         _heap->marked_object_oop_iterate(r, &cl, update_watermark);
2046       }
2047       if (ShenandoahPacing) {
2048         _heap->pacer()->report_updaterefs(pointer_delta(update_watermark, r->bottom()));
2049       }
2050       if (_heap->check_cancelled_gc_and_yield(CONCURRENT)) {
2051         return;
2052       }
2053       r = _regions->next();
2054     }
2055   }
2056 };
2057 
2058 void ShenandoahHeap::update_heap_references(bool concurrent) {
2059   assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
2060 
2061   if (concurrent) {
2062     ShenandoahUpdateHeapRefsTask<true> task(&_update_refs_iterator);
2063     workers()->run_task(&task);
2064   } else {
2065     ShenandoahUpdateHeapRefsTask<false> task(&_update_refs_iterator);
2066     workers()->run_task(&task);
2067   }
2068 }
2069 
2070 
2071 class ShenandoahFinalUpdateRefsUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
2072 private:
2073   ShenandoahHeapLock* const _lock;
2074 
2075 public:
2076   ShenandoahFinalUpdateRefsUpdateRegionStateClosure() : _lock(ShenandoahHeap::heap()->lock()) {}
2077 
2078   void heap_region_do(ShenandoahHeapRegion* r) {
2079     // Drop unnecessary "pinned" state from regions that does not have CP marks
2080     // anymore, as this would allow trashing them.
2081 
2082     if (r->is_active()) {
2083       if (r->is_pinned()) {
2084         if (r->pin_count() == 0) {
2085           ShenandoahHeapLocker locker(_lock);
2086           r->make_unpinned();
2087         }
2088       } else {
2089         if (r->pin_count() > 0) {
2090           ShenandoahHeapLocker locker(_lock);
2091           r->make_pinned();
2092         }
2093       }
2094     }
2095   }
2096 
2097   bool is_thread_safe() { return true; }
2098 };
2099 
2100 void ShenandoahHeap::update_heap_region_states(bool concurrent) {
2101   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2102   assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
2103 
2104   {
2105     ShenandoahGCPhase phase(concurrent ?
2106                             ShenandoahPhaseTimings::final_update_refs_update_region_states :
2107                             ShenandoahPhaseTimings::degen_gc_final_update_refs_update_region_states);
2108     ShenandoahFinalUpdateRefsUpdateRegionStateClosure cl;
2109     parallel_heap_region_iterate(&cl);
2110 
2111     assert_pinned_region_status();
2112   }
2113 
2114   {
2115     ShenandoahGCPhase phase(concurrent ?
2116                             ShenandoahPhaseTimings::final_update_refs_trash_cset :
2117                             ShenandoahPhaseTimings::degen_gc_final_update_refs_trash_cset);
2118     trash_cset_regions();
2119   }
2120 }
2121 
2122 void ShenandoahHeap::rebuild_free_set(bool concurrent) {
2123   {
2124     ShenandoahGCPhase phase(concurrent ?
2125                             ShenandoahPhaseTimings::final_update_refs_rebuild_freeset :
2126                             ShenandoahPhaseTimings::degen_gc_final_update_refs_rebuild_freeset);
2127     ShenandoahHeapLocker locker(lock());
2128     _free_set->rebuild();
2129   }
2130 }
2131 
2132 void ShenandoahHeap::print_extended_on(outputStream *st) const {
2133   print_on(st);
2134   print_heap_regions_on(st);
2135 }
2136 
2137 bool ShenandoahHeap::is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self) {
2138   size_t slice = r->index() / _bitmap_regions_per_slice;
2139 
2140   size_t regions_from = _bitmap_regions_per_slice * slice;
2141   size_t regions_to   = MIN2(num_regions(), _bitmap_regions_per_slice * (slice + 1));
2142   for (size_t g = regions_from; g < regions_to; g++) {
2143     assert (g / _bitmap_regions_per_slice == slice, "same slice");
2144     if (skip_self && g == r->index()) continue;
2145     if (get_region(g)->is_committed()) {
2146       return true;
2147     }
2148   }
2149   return false;
2150 }
2151 
2152 bool ShenandoahHeap::commit_bitmap_slice(ShenandoahHeapRegion* r) {
2153   shenandoah_assert_heaplocked();
2154 
2155   // Bitmaps in special regions do not need commits
2156   if (_bitmap_region_special) {
2157     return true;
2158   }
2159 
2160   if (is_bitmap_slice_committed(r, true)) {
2161     // Some other region from the group is already committed, meaning the bitmap
2162     // slice is already committed, we exit right away.
2163     return true;
2164   }
2165 
2166   // Commit the bitmap slice:
2167   size_t slice = r->index() / _bitmap_regions_per_slice;
2168   size_t off = _bitmap_bytes_per_slice * slice;
2169   size_t len = _bitmap_bytes_per_slice;
2170   char* start = (char*) _bitmap_region.start() + off;
2171 
2172   if (!os::commit_memory(start, len, false)) {
2173     return false;
2174   }
2175 
2176   if (AlwaysPreTouch) {
2177     os::pretouch_memory(start, start + len, _pretouch_bitmap_page_size);
2178   }
2179 
2180   return true;
2181 }
2182 
2183 bool ShenandoahHeap::uncommit_bitmap_slice(ShenandoahHeapRegion *r) {
2184   shenandoah_assert_heaplocked();
2185 
2186   // Bitmaps in special regions do not need uncommits
2187   if (_bitmap_region_special) {
2188     return true;
2189   }
2190 
2191   if (is_bitmap_slice_committed(r, true)) {
2192     // Some other region from the group is still committed, meaning the bitmap
2193     // slice is should stay committed, exit right away.
2194     return true;
2195   }
2196 
2197   // Uncommit the bitmap slice:
2198   size_t slice = r->index() / _bitmap_regions_per_slice;
2199   size_t off = _bitmap_bytes_per_slice * slice;
2200   size_t len = _bitmap_bytes_per_slice;
2201   if (!os::uncommit_memory((char*)_bitmap_region.start() + off, len)) {
2202     return false;
2203   }
2204   return true;
2205 }
2206 
2207 void ShenandoahHeap::safepoint_synchronize_begin() {
2208   if (ShenandoahSuspendibleWorkers || UseStringDeduplication) {
2209     SuspendibleThreadSet::synchronize();
2210   }
2211 }
2212 
2213 void ShenandoahHeap::safepoint_synchronize_end() {
2214   if (ShenandoahSuspendibleWorkers || UseStringDeduplication) {
2215     SuspendibleThreadSet::desynchronize();
2216   }
2217 }
2218 
2219 void ShenandoahHeap::entry_uncommit(double shrink_before, size_t shrink_until) {
2220   static const char *msg = "Concurrent uncommit";
2221   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_uncommit, true /* log_heap_usage */);
2222   EventMark em("%s", msg);
2223 
2224   op_uncommit(shrink_before, shrink_until);
2225 }
2226 
2227 void ShenandoahHeap::try_inject_alloc_failure() {
2228   if (ShenandoahAllocFailureALot && !cancelled_gc() && ((os::random() % 1000) > 950)) {
2229     _inject_alloc_failure.set();
2230     os::naked_short_sleep(1);
2231     if (cancelled_gc()) {
2232       log_info(gc)("Allocation failure was successfully injected");
2233     }
2234   }
2235 }
2236 
2237 bool ShenandoahHeap::should_inject_alloc_failure() {
2238   return _inject_alloc_failure.is_set() && _inject_alloc_failure.try_unset();
2239 }
2240 
2241 void ShenandoahHeap::initialize_serviceability() {
2242   _memory_pool = new ShenandoahMemoryPool(this);
2243   _cycle_memory_manager.add_pool(_memory_pool);
2244   _stw_memory_manager.add_pool(_memory_pool);
2245 }
2246 
2247 GrowableArray<GCMemoryManager*> ShenandoahHeap::memory_managers() {
2248   GrowableArray<GCMemoryManager*> memory_managers(2);
2249   memory_managers.append(&_cycle_memory_manager);
2250   memory_managers.append(&_stw_memory_manager);
2251   return memory_managers;
2252 }
2253 
2254 GrowableArray<MemoryPool*> ShenandoahHeap::memory_pools() {
2255   GrowableArray<MemoryPool*> memory_pools(1);
2256   memory_pools.append(_memory_pool);
2257   return memory_pools;
2258 }
2259 
2260 MemoryUsage ShenandoahHeap::memory_usage() {
2261   return _memory_pool->get_memory_usage();
2262 }
2263 
2264 ShenandoahRegionIterator::ShenandoahRegionIterator() :
2265   _heap(ShenandoahHeap::heap()),
2266   _index(0) {}
2267 
2268 ShenandoahRegionIterator::ShenandoahRegionIterator(ShenandoahHeap* heap) :
2269   _heap(heap),
2270   _index(0) {}
2271 
2272 void ShenandoahRegionIterator::reset() {
2273   _index = 0;
2274 }
2275 
2276 bool ShenandoahRegionIterator::has_next() const {
2277   return _index < _heap->num_regions();
2278 }
2279 
2280 char ShenandoahHeap::gc_state() const {
2281   return _gc_state.raw_value();
2282 }
2283 
2284 ShenandoahLiveData* ShenandoahHeap::get_liveness_cache(uint worker_id) {
2285 #ifdef ASSERT
2286   assert(_liveness_cache != NULL, "sanity");
2287   assert(worker_id < _max_workers, "sanity");
2288   for (uint i = 0; i < num_regions(); i++) {
2289     assert(_liveness_cache[worker_id][i] == 0, "liveness cache should be empty");
2290   }
2291 #endif
2292   return _liveness_cache[worker_id];
2293 }
2294 
2295 void ShenandoahHeap::flush_liveness_cache(uint worker_id) {
2296   assert(worker_id < _max_workers, "sanity");
2297   assert(_liveness_cache != NULL, "sanity");
2298   ShenandoahLiveData* ld = _liveness_cache[worker_id];
2299   for (uint i = 0; i < num_regions(); i++) {
2300     ShenandoahLiveData live = ld[i];
2301     if (live > 0) {
2302       ShenandoahHeapRegion* r = get_region(i);
2303       r->increase_live_data_gc_words(live);
2304       ld[i] = 0;
2305     }
2306   }
2307 }