1 /*
   2  * Copyright (c) 2013, 2018, Red Hat, Inc. All rights reserved.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "memory/allocation.hpp"
  26 
  27 #include "gc/shared/gcTimer.hpp"
  28 #include "gc/shared/gcTraceTime.inline.hpp"
  29 #include "gc/shared/memAllocator.hpp"
  30 #include "gc/shared/plab.hpp"
  31 
  32 #include "gc/shenandoah/parallelCleaning.hpp"
  33 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  34 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
  35 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  36 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  37 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
  38 #include "gc/shenandoah/shenandoahControlThread.hpp"
  39 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  40 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  41 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  42 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
  43 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  44 #include "gc/shenandoah/shenandoahMarkCompact.hpp"
  45 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  46 #include "gc/shenandoah/shenandoahMemoryPool.hpp"
  47 #include "gc/shenandoah/shenandoahMetrics.hpp"
  48 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  49 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  50 #include "gc/shenandoah/shenandoahPacer.inline.hpp"
  51 #include "gc/shenandoah/shenandoahPadding.hpp"
  52 #include "gc/shenandoah/shenandoahParallelCleaning.inline.hpp"
  53 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
  54 #include "gc/shenandoah/shenandoahStringDedup.hpp"
  55 #include "gc/shenandoah/shenandoahTaskqueue.hpp"
  56 #include "gc/shenandoah/shenandoahUtils.hpp"
  57 #include "gc/shenandoah/shenandoahVerifier.hpp"
  58 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
  59 #include "gc/shenandoah/shenandoahVMOperations.hpp"
  60 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
  61 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  62 #include "gc/shenandoah/mode/shenandoahIUMode.hpp"
  63 #include "gc/shenandoah/mode/shenandoahPassiveMode.hpp"
  64 #include "gc/shenandoah/mode/shenandoahSATBMode.hpp"
  65 #if INCLUDE_JFR
  66 #include "gc/shenandoah/shenandoahJfrSupport.hpp"
  67 #endif
  68 
  69 #include "memory/metaspace.hpp"
  70 #include "runtime/vmThread.hpp"
  71 #include "services/mallocTracker.hpp"
  72 
  73 ShenandoahHeap* ShenandoahHeap::_heap = NULL;
  74 
  75 #ifdef ASSERT
  76 template <class T>
  77 void ShenandoahAssertToSpaceClosure::do_oop_work(T* p) {
  78   T o = RawAccess<>::oop_load(p);
  79   if (! CompressedOops::is_null(o)) {
  80     oop obj = CompressedOops::decode_not_null(o);
  81     shenandoah_assert_not_forwarded(p, obj);
  82   }
  83 }
  84 
  85 void ShenandoahAssertToSpaceClosure::do_oop(narrowOop* p) { do_oop_work(p); }
  86 void ShenandoahAssertToSpaceClosure::do_oop(oop* p)       { do_oop_work(p); }
  87 #endif
  88 
  89 class ShenandoahPretouchHeapTask : public AbstractGangTask {
  90 private:
  91   ShenandoahRegionIterator _regions;
  92   const size_t _page_size;
  93 public:
  94   ShenandoahPretouchHeapTask(size_t page_size) :
  95     AbstractGangTask("Shenandoah Pretouch Heap"),
  96     _page_size(page_size) {}
  97 
  98   virtual void work(uint worker_id) {
  99     ShenandoahHeapRegion* r = _regions.next();
 100     while (r != NULL) {
 101       if (r->is_committed()) {
 102         os::pretouch_memory(r->bottom(), r->end(), _page_size);
 103       }
 104       r = _regions.next();
 105     }
 106   }
 107 };
 108 
 109 class ShenandoahPretouchBitmapTask : public AbstractGangTask {
 110 private:
 111   ShenandoahRegionIterator _regions;
 112   char* _bitmap_base;
 113   const size_t _bitmap_size;
 114   const size_t _page_size;
 115 public:
 116   ShenandoahPretouchBitmapTask(char* bitmap_base, size_t bitmap_size, size_t page_size) :
 117     AbstractGangTask("Shenandoah Pretouch Bitmap"),
 118     _bitmap_base(bitmap_base),
 119     _bitmap_size(bitmap_size),
 120     _page_size(page_size) {}
 121 
 122   virtual void work(uint worker_id) {
 123     ShenandoahHeapRegion* r = _regions.next();
 124     while (r != NULL) {
 125       size_t start = r->index()       * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
 126       size_t end   = (r->index() + 1) * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
 127       assert (end <= _bitmap_size, "end is sane: " SIZE_FORMAT " < " SIZE_FORMAT, end, _bitmap_size);
 128 
 129       if (r->is_committed()) {
 130         os::pretouch_memory(_bitmap_base + start, _bitmap_base + end, _page_size);
 131       }
 132 
 133       r = _regions.next();
 134     }
 135   }
 136 };
 137 
 138 jint ShenandoahHeap::initialize() {
 139   //
 140   // Figure out heap sizing
 141   //
 142 
 143   size_t init_byte_size = collector_policy()->initial_heap_byte_size();
 144   size_t min_byte_size  = collector_policy()->min_heap_byte_size();
 145   size_t max_byte_size  = collector_policy()->max_heap_byte_size();
 146   size_t heap_alignment = collector_policy()->heap_alignment();
 147 
 148   size_t reg_size_bytes = ShenandoahHeapRegion::region_size_bytes();
 149 
 150   Universe::check_alignment(max_byte_size,  reg_size_bytes, "Shenandoah heap");
 151   Universe::check_alignment(init_byte_size, reg_size_bytes, "Shenandoah heap");
 152 
 153   _num_regions = ShenandoahHeapRegion::region_count();
 154 
 155   // Now we know the number of regions, initialize the heuristics.
 156   initialize_heuristics();
 157 
 158   size_t num_committed_regions = init_byte_size / reg_size_bytes;
 159   num_committed_regions = MIN2(num_committed_regions, _num_regions);
 160   assert(num_committed_regions <= _num_regions, "sanity");
 161   _initial_size = num_committed_regions * reg_size_bytes;
 162 
 163   size_t num_min_regions = min_byte_size / reg_size_bytes;
 164   num_min_regions = MIN2(num_min_regions, _num_regions);
 165   assert(num_min_regions <= _num_regions, "sanity");
 166   _minimum_size = num_min_regions * reg_size_bytes;
 167 
 168   _committed = _initial_size;
 169 
 170   size_t heap_page_size   = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
 171   size_t bitmap_page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
 172   size_t region_page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
 173 
 174   //
 175   // Reserve and commit memory for heap
 176   //
 177 
 178   ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size, heap_alignment);
 179   initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*) (heap_rs.base() + heap_rs.size()));
 180   _heap_region = MemRegion((HeapWord*)heap_rs.base(), heap_rs.size() / HeapWordSize);
 181   _heap_region_special = heap_rs.special();
 182 
 183   assert((((size_t) base()) & ShenandoahHeapRegion::region_size_bytes_mask()) == 0,
 184          "Misaligned heap: " PTR_FORMAT, p2i(base()));
 185 
 186 #if SHENANDOAH_OPTIMIZED_OBJTASK
 187   // The optimized ObjArrayChunkedTask takes some bits away from the full object bits.
 188   // Fail if we ever attempt to address more than we can.
 189   if ((uintptr_t)heap_rs.end() >= ObjArrayChunkedTask::max_addressable()) {
 190     FormatBuffer<512> buf("Shenandoah reserved [" PTR_FORMAT ", " PTR_FORMAT") for the heap, \n"
 191                           "but max object address is " PTR_FORMAT ". Try to reduce heap size, or try other \n"
 192                           "VM options that allocate heap at lower addresses (HeapBaseMinAddress, AllocateHeapAt, etc).",
 193                 p2i(heap_rs.base()), p2i(heap_rs.end()), ObjArrayChunkedTask::max_addressable());
 194     vm_exit_during_initialization("Fatal Error", buf);
 195   }
 196 #endif
 197 
 198   ReservedSpace sh_rs = heap_rs.first_part(max_byte_size);
 199   if (!_heap_region_special) {
 200     os::commit_memory_or_exit(sh_rs.base(), _initial_size, heap_alignment, false,
 201                               "Cannot commit heap memory");
 202   }
 203 
 204   //
 205   // Reserve and commit memory for bitmap(s)
 206   //
 207 
 208   _bitmap_size = MarkBitMap::compute_size(heap_rs.size());
 209   _bitmap_size = align_up(_bitmap_size, bitmap_page_size);
 210 
 211   size_t bitmap_bytes_per_region = reg_size_bytes / MarkBitMap::heap_map_factor();
 212 
 213   guarantee(bitmap_bytes_per_region != 0,
 214             "Bitmap bytes per region should not be zero");
 215   guarantee(is_power_of_2(bitmap_bytes_per_region),
 216             "Bitmap bytes per region should be power of two: " SIZE_FORMAT, bitmap_bytes_per_region);
 217 
 218   if (bitmap_page_size > bitmap_bytes_per_region) {
 219     _bitmap_regions_per_slice = bitmap_page_size / bitmap_bytes_per_region;
 220     _bitmap_bytes_per_slice = bitmap_page_size;
 221   } else {
 222     _bitmap_regions_per_slice = 1;
 223     _bitmap_bytes_per_slice = bitmap_bytes_per_region;
 224   }
 225 
 226   guarantee(_bitmap_regions_per_slice >= 1,
 227             "Should have at least one region per slice: " SIZE_FORMAT,
 228             _bitmap_regions_per_slice);
 229 
 230   guarantee(((_bitmap_bytes_per_slice) % bitmap_page_size) == 0,
 231             "Bitmap slices should be page-granular: bps = " SIZE_FORMAT ", page size = " SIZE_FORMAT,
 232             _bitmap_bytes_per_slice, bitmap_page_size);
 233 
 234   ReservedSpace bitmap(_bitmap_size, bitmap_page_size);
 235   MemTracker::record_virtual_memory_type(bitmap.base(), mtGC);
 236   _bitmap_region = MemRegion((HeapWord*) bitmap.base(), bitmap.size() / HeapWordSize);
 237   _bitmap_region_special = bitmap.special();
 238 
 239   size_t bitmap_init_commit = _bitmap_bytes_per_slice *
 240                               align_up(num_committed_regions, _bitmap_regions_per_slice) / _bitmap_regions_per_slice;
 241   bitmap_init_commit = MIN2(_bitmap_size, bitmap_init_commit);
 242   if (!_bitmap_region_special) {
 243     os::commit_memory_or_exit((char *) _bitmap_region.start(), bitmap_init_commit, bitmap_page_size, false,
 244                               "Cannot commit bitmap memory");
 245   }
 246 
 247   _marking_context = new ShenandoahMarkingContext(_heap_region, _bitmap_region, _num_regions);
 248 
 249   if (ShenandoahVerify) {
 250     ReservedSpace verify_bitmap(_bitmap_size, bitmap_page_size);
 251     if (!verify_bitmap.special()) {
 252       os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), bitmap_page_size, false,
 253                                 "Cannot commit verification bitmap memory");
 254     }
 255     MemTracker::record_virtual_memory_type(verify_bitmap.base(), mtGC);
 256     MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize);
 257     _verification_bit_map.initialize(_heap_region, verify_bitmap_region);
 258     _verifier = new ShenandoahVerifier(this, &_verification_bit_map);
 259   }
 260 
 261   // Reserve aux bitmap for use in object_iterate(). We don't commit it here.
 262   ReservedSpace aux_bitmap(_bitmap_size, bitmap_page_size);
 263   MemTracker::record_virtual_memory_type(aux_bitmap.base(), mtGC);
 264   _aux_bitmap_region = MemRegion((HeapWord*) aux_bitmap.base(), aux_bitmap.size() / HeapWordSize);
 265   _aux_bitmap_region_special = aux_bitmap.special();
 266   _aux_bit_map.initialize(_heap_region, _aux_bitmap_region);
 267 
 268   //
 269   // Create regions and region sets
 270   //
 271   size_t region_align = align_up(sizeof(ShenandoahHeapRegion), SHENANDOAH_CACHE_LINE_SIZE);
 272   size_t region_storage_size = align_up(region_align * _num_regions, region_page_size);
 273   region_storage_size = align_up(region_storage_size, os::vm_allocation_granularity());
 274 
 275   ReservedSpace region_storage(region_storage_size, region_page_size);
 276   MemTracker::record_virtual_memory_type(region_storage.base(), mtGC);
 277   if (!region_storage.special()) {
 278     os::commit_memory_or_exit(region_storage.base(), region_storage_size, region_page_size, false,
 279                               "Cannot commit region memory");
 280   }
 281 
 282   // Try to fit the collection set bitmap at lower addresses. This optimizes code generation for cset checks.
 283   // Go up until a sensible limit (subject to encoding constraints) and try to reserve the space there.
 284   // If not successful, bite a bullet and allocate at whatever address.
 285   {
 286     size_t cset_align = MAX2<size_t>(os::vm_page_size(), os::vm_allocation_granularity());
 287     size_t cset_size = align_up(((size_t) sh_rs.base() + sh_rs.size()) >> ShenandoahHeapRegion::region_size_bytes_shift(), cset_align);
 288 
 289     uintptr_t min = ShenandoahUtils::round_up_power_of_2(cset_align);
 290     uintptr_t max = (1u << 30u);
 291 
 292     for (uintptr_t addr = min; addr <= max; addr <<= 1u) {
 293       char* req_addr = (char*)addr;
 294       assert(is_aligned(req_addr, cset_align), "Should be aligned");
 295       ReservedSpace cset_rs(cset_size, cset_align, false, req_addr);
 296       if (cset_rs.is_reserved()) {
 297         assert(cset_rs.base() == req_addr, "Allocated where requested: " PTR_FORMAT ", " PTR_FORMAT, p2i(cset_rs.base()), addr);
 298         _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
 299         break;
 300       }
 301     }
 302 
 303     if (_collection_set == NULL) {
 304       ReservedSpace cset_rs(cset_size, cset_align, false);
 305       _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
 306     }
 307   }
 308 
 309   _regions = NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, _num_regions, mtGC);
 310   _free_set = new ShenandoahFreeSet(this, _num_regions);
 311 
 312   {
 313     ShenandoahHeapLocker locker(lock());
 314 
 315     for (size_t i = 0; i < _num_regions; i++) {
 316       HeapWord* start = (HeapWord*)sh_rs.base() + ShenandoahHeapRegion::region_size_words() * i;
 317       bool is_committed = i < num_committed_regions;
 318       void* loc = region_storage.base() + i * region_align;
 319 
 320       ShenandoahHeapRegion* r = new (loc) ShenandoahHeapRegion(start, i, is_committed);
 321       assert(is_aligned(r, SHENANDOAH_CACHE_LINE_SIZE), "Sanity");
 322 
 323       _marking_context->initialize_top_at_mark_start(r);
 324       _regions[i] = r;
 325       assert(!collection_set()->is_in(i), "New region should not be in collection set");
 326     }
 327 
 328     // Initialize to complete
 329     _marking_context->mark_complete();
 330 
 331     _free_set->rebuild();
 332   }
 333 
 334   if (AlwaysPreTouch) {
 335     // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads,
 336     // before initialize() below zeroes it with initializing thread. For any given region,
 337     // we touch the region and the corresponding bitmaps from the same thread.
 338     ShenandoahPushWorkerScope scope(workers(), _max_workers, false);
 339 
 340     _pretouch_heap_page_size = heap_page_size;
 341     _pretouch_bitmap_page_size = bitmap_page_size;
 342 
 343 #ifdef LINUX
 344     // UseTransparentHugePages would madvise that backing memory can be coalesced into huge
 345     // pages. But, the kernel needs to know that every small page is used, in order to coalesce
 346     // them into huge one. Therefore, we need to pretouch with smaller pages.
 347     if (UseTransparentHugePages) {
 348       _pretouch_heap_page_size = (size_t)os::vm_page_size();
 349       _pretouch_bitmap_page_size = (size_t)os::vm_page_size();
 350     }
 351 #endif
 352 
 353     // OS memory managers may want to coalesce back-to-back pages. Make their jobs
 354     // simpler by pre-touching continuous spaces (heap and bitmap) separately.
 355 
 356     ShenandoahPretouchBitmapTask bcl(bitmap.base(), _bitmap_size, _pretouch_bitmap_page_size);
 357     _workers->run_task(&bcl);
 358 
 359     ShenandoahPretouchHeapTask hcl(_pretouch_heap_page_size);
 360     _workers->run_task(&hcl);
 361   }
 362 
 363   //
 364   // Initialize the rest of GC subsystems
 365   //
 366 
 367   BarrierSet::set_barrier_set(new ShenandoahBarrierSet(this));
 368 
 369   _liveness_cache = NEW_C_HEAP_ARRAY(ShenandoahLiveData*, _max_workers, mtGC);
 370   for (uint worker = 0; worker < _max_workers; worker++) {
 371     _liveness_cache[worker] = NEW_C_HEAP_ARRAY(ShenandoahLiveData, _num_regions, mtGC);
 372     Copy::fill_to_bytes(_liveness_cache[worker], _num_regions * sizeof(ShenandoahLiveData));
 373   }
 374 
 375   // The call below uses stuff (the SATB* things) that are in G1, but probably
 376   // belong into a shared location.
 377   ShenandoahBarrierSet::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,
 378                                                SATB_Q_FL_lock,
 379                                                20 /*G1SATBProcessCompletedThreshold */,
 380                                                Shared_SATB_Q_lock);
 381 
 382   _monitoring_support = new ShenandoahMonitoringSupport(this);
 383   _phase_timings = new ShenandoahPhaseTimings(max_workers());
 384   ShenandoahStringDedup::initialize();
 385   ShenandoahCodeRoots::initialize();
 386 
 387   if (ShenandoahPacing) {
 388     _pacer = new ShenandoahPacer(this);
 389     _pacer->setup_for_idle();
 390   } else {
 391     _pacer = NULL;
 392   }
 393 
 394   _control_thread = new ShenandoahControlThread();
 395 
 396   log_info(gc, init)("Initialize Shenandoah heap: " SIZE_FORMAT "%s initial, " SIZE_FORMAT "%s min, " SIZE_FORMAT "%s max",
 397                      byte_size_in_proper_unit(_initial_size),  proper_unit_for_byte_size(_initial_size),
 398                      byte_size_in_proper_unit(_minimum_size),  proper_unit_for_byte_size(_minimum_size),
 399                      byte_size_in_proper_unit(max_capacity()), proper_unit_for_byte_size(max_capacity())
 400   );
 401 
 402   log_info(gc, init)("Safepointing mechanism: %s",
 403                      SafepointMechanism::uses_thread_local_poll() ? "thread-local poll" :
 404                      (SafepointMechanism::uses_global_page_poll() ? "global-page poll" : "unknown"));
 405 
 406   return JNI_OK;
 407 }
 408 
 409 void ShenandoahHeap::initialize_heuristics() {
 410   if (ShenandoahGCMode != NULL) {
 411     if (strcmp(ShenandoahGCMode, "satb") == 0) {
 412       _gc_mode = new ShenandoahSATBMode();
 413     } else if (strcmp(ShenandoahGCMode, "iu") == 0) {
 414       _gc_mode = new ShenandoahIUMode();
 415     } else if (strcmp(ShenandoahGCMode, "passive") == 0) {
 416       _gc_mode = new ShenandoahPassiveMode();
 417     } else {
 418       vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option");
 419     }
 420   } else {
 421     ShouldNotReachHere();
 422   }
 423   _gc_mode->initialize_flags();
 424   if (_gc_mode->is_diagnostic() && !UnlockDiagnosticVMOptions) {
 425     vm_exit_during_initialization(
 426             err_msg("GC mode \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
 427                     _gc_mode->name()));
 428   }
 429   if (_gc_mode->is_experimental() && !UnlockExperimentalVMOptions) {
 430     vm_exit_during_initialization(
 431             err_msg("GC mode \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
 432                     _gc_mode->name()));
 433   }
 434   log_info(gc, init)("Shenandoah GC mode: %s",
 435                      _gc_mode->name());
 436 
 437   _heuristics = _gc_mode->initialize_heuristics();
 438 
 439   if (_heuristics->is_diagnostic() && !UnlockDiagnosticVMOptions) {
 440     vm_exit_during_initialization(
 441             err_msg("Heuristics \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
 442                     _heuristics->name()));
 443   }
 444   if (_heuristics->is_experimental() && !UnlockExperimentalVMOptions) {
 445     vm_exit_during_initialization(
 446             err_msg("Heuristics \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
 447                     _heuristics->name()));
 448   }
 449   log_info(gc, init)("Shenandoah heuristics: %s",
 450                      _heuristics->name());
 451 }
 452 
 453 #ifdef _MSC_VER
 454 #pragma warning( push )
 455 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
 456 #endif
 457 
 458 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
 459   CollectedHeap(),
 460   _initial_size(0),
 461   _used(0),
 462   _committed(0),
 463   _bytes_allocated_since_gc_start(0),
 464   _max_workers(MAX2(ConcGCThreads, ParallelGCThreads)),
 465   _workers(NULL),
 466   _safepoint_workers(NULL),
 467   _heap_region_special(false),
 468   _num_regions(0),
 469   _regions(NULL),
 470   _update_refs_iterator(this),
 471   _control_thread(NULL),
 472   _shenandoah_policy(policy),
 473   _heuristics(NULL),
 474   _free_set(NULL),
 475   _scm(new ShenandoahConcurrentMark()),
 476   _full_gc(new ShenandoahMarkCompact()),
 477   _pacer(NULL),
 478   _verifier(NULL),
 479   _phase_timings(NULL),
 480   _monitoring_support(NULL),
 481   _memory_pool(NULL),
 482   _stw_memory_manager("Shenandoah Pauses", "end of GC pause"),
 483   _cycle_memory_manager("Shenandoah Cycles", "end of GC cycle"),
 484   _gc_timer(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
 485   _soft_ref_policy(),
 486   _ref_processor(NULL),
 487   _marking_context(NULL),
 488   _bitmap_size(0),
 489   _bitmap_regions_per_slice(0),
 490   _bitmap_bytes_per_slice(0),
 491   _bitmap_region_special(false),
 492   _aux_bitmap_region_special(false),
 493   _liveness_cache(NULL),
 494   _collection_set(NULL)
 495 {
 496   _heap = this;
 497 
 498   log_info(gc, init)("GC threads: " UINT32_FORMAT " parallel, " UINT32_FORMAT " concurrent", ParallelGCThreads, ConcGCThreads);
 499 
 500   _max_workers = MAX2(_max_workers, 1U);
 501   _workers = new ShenandoahWorkGang("Shenandoah GC Threads", _max_workers,
 502                             /* are_GC_task_threads */ true,
 503                             /* are_ConcurrentGC_threads */ true);
 504   if (_workers == NULL) {
 505     vm_exit_during_initialization("Failed necessary allocation.");
 506   } else {
 507     _workers->initialize_workers();
 508   }
 509 
 510   if (ParallelGCThreads > 1) {
 511     _safepoint_workers = new ShenandoahWorkGang("Safepoint Cleanup Thread",
 512                                                 ParallelGCThreads,
 513                       /* are_GC_task_threads */ false,
 514                  /* are_ConcurrentGC_threads */ false);
 515     _safepoint_workers->initialize_workers();
 516   }
 517 }
 518 
 519 #ifdef _MSC_VER
 520 #pragma warning( pop )
 521 #endif
 522 
 523 class ShenandoahResetBitmapTask : public AbstractGangTask {
 524 private:
 525   ShenandoahRegionIterator _regions;
 526 
 527 public:
 528   ShenandoahResetBitmapTask() :
 529     AbstractGangTask("Parallel Reset Bitmap Task") {}
 530 
 531   void work(uint worker_id) {
 532     ShenandoahHeapRegion* region = _regions.next();
 533     ShenandoahHeap* heap = ShenandoahHeap::heap();
 534     ShenandoahMarkingContext* const ctx = heap->marking_context();
 535     while (region != NULL) {
 536       if (heap->is_bitmap_slice_committed(region)) {
 537         ctx->clear_bitmap(region);
 538       }
 539       region = _regions.next();
 540     }
 541   }
 542 };
 543 
 544 void ShenandoahHeap::reset_mark_bitmap() {
 545   assert_gc_workers(_workers->active_workers());
 546   mark_incomplete_marking_context();
 547 
 548   ShenandoahResetBitmapTask task;
 549   _workers->run_task(&task);
 550 }
 551 
 552 void ShenandoahHeap::print_on(outputStream* st) const {
 553   st->print_cr("Shenandoah Heap");
 554   st->print_cr(" " SIZE_FORMAT "%s total, " SIZE_FORMAT "%s committed, " SIZE_FORMAT "%s used",
 555                byte_size_in_proper_unit(max_capacity()), proper_unit_for_byte_size(max_capacity()),
 556                byte_size_in_proper_unit(committed()),    proper_unit_for_byte_size(committed()),
 557                byte_size_in_proper_unit(used()),         proper_unit_for_byte_size(used()));
 558   st->print_cr(" " SIZE_FORMAT " x " SIZE_FORMAT"%s regions",
 559                num_regions(),
 560                byte_size_in_proper_unit(ShenandoahHeapRegion::region_size_bytes()),
 561                proper_unit_for_byte_size(ShenandoahHeapRegion::region_size_bytes()));
 562 
 563   st->print("Status: ");
 564   if (has_forwarded_objects())               st->print("has forwarded objects, ");
 565   if (is_concurrent_mark_in_progress())      st->print("marking, ");
 566   if (is_evacuation_in_progress())           st->print("evacuating, ");
 567   if (is_update_refs_in_progress())          st->print("updating refs, ");
 568   if (is_degenerated_gc_in_progress())       st->print("degenerated gc, ");
 569   if (is_full_gc_in_progress())              st->print("full gc, ");
 570   if (is_full_gc_move_in_progress())         st->print("full gc move, ");
 571 
 572   if (cancelled_gc()) {
 573     st->print("cancelled");
 574   } else {
 575     st->print("not cancelled");
 576   }
 577   st->cr();
 578 
 579   st->print_cr("Reserved region:");
 580   st->print_cr(" - [" PTR_FORMAT ", " PTR_FORMAT ") ",
 581                p2i(reserved_region().start()),
 582                p2i(reserved_region().end()));
 583 
 584   ShenandoahCollectionSet* cset = collection_set();
 585   st->print_cr("Collection set:");
 586   if (cset != NULL) {
 587     st->print_cr(" - map (vanilla): " PTR_FORMAT, p2i(cset->map_address()));
 588     st->print_cr(" - map (biased):  " PTR_FORMAT, p2i(cset->biased_map_address()));
 589   } else {
 590     st->print_cr(" (NULL)");
 591   }
 592 
 593   st->cr();
 594   MetaspaceUtils::print_on(st);
 595 
 596   if (Verbose) {
 597     print_heap_regions_on(st);
 598   }
 599 }
 600 
 601 class ShenandoahInitWorkerGCLABClosure : public ThreadClosure {
 602 public:
 603   void do_thread(Thread* thread) {
 604     assert(thread != NULL, "Sanity");
 605     assert(thread->is_Worker_thread(), "Only worker thread expected");
 606     ShenandoahThreadLocalData::initialize_gclab(thread);
 607   }
 608 };
 609 
 610 void ShenandoahHeap::post_initialize() {
 611   CollectedHeap::post_initialize();
 612   MutexLocker ml(Threads_lock);
 613 
 614   ShenandoahInitWorkerGCLABClosure init_gclabs;
 615   _workers->threads_do(&init_gclabs);
 616 
 617   // gclab can not be initialized early during VM startup, as it can not determinate its max_size.
 618   // Now, we will let WorkGang to initialize gclab when new worker is created.
 619   _workers->set_initialize_gclab();
 620 
 621   _scm->initialize(_max_workers);
 622   _full_gc->initialize(_gc_timer);
 623 
 624   ref_processing_init();
 625 
 626   _heuristics->initialize();
 627 
 628   JFR_ONLY(ShenandoahJFRSupport::register_jfr_type_serializers());
 629 }
 630 
 631 size_t ShenandoahHeap::used() const {
 632   return OrderAccess::load_acquire(&_used);
 633 }
 634 
 635 size_t ShenandoahHeap::committed() const {
 636   OrderAccess::acquire();
 637   return _committed;
 638 }
 639 
 640 void ShenandoahHeap::increase_committed(size_t bytes) {
 641   shenandoah_assert_heaplocked_or_safepoint();
 642   _committed += bytes;
 643 }
 644 
 645 void ShenandoahHeap::decrease_committed(size_t bytes) {
 646   shenandoah_assert_heaplocked_or_safepoint();
 647   _committed -= bytes;
 648 }
 649 
 650 void ShenandoahHeap::increase_used(size_t bytes) {
 651   Atomic::add(bytes, &_used);
 652 }
 653 
 654 void ShenandoahHeap::set_used(size_t bytes) {
 655   OrderAccess::release_store_fence(&_used, bytes);
 656 }
 657 
 658 void ShenandoahHeap::decrease_used(size_t bytes) {
 659   assert(used() >= bytes, "never decrease heap size by more than we've left");
 660   Atomic::sub(bytes, &_used);
 661 }
 662 
 663 void ShenandoahHeap::increase_allocated(size_t bytes) {
 664   Atomic::add(bytes, &_bytes_allocated_since_gc_start);
 665 }
 666 
 667 void ShenandoahHeap::notify_mutator_alloc_words(size_t words, bool waste) {
 668   size_t bytes = words * HeapWordSize;
 669   if (!waste) {
 670     increase_used(bytes);
 671   }
 672   increase_allocated(bytes);
 673   if (ShenandoahPacing) {
 674     control_thread()->pacing_notify_alloc(words);
 675     if (waste) {
 676       pacer()->claim_for_alloc(words, true);
 677     }
 678   }
 679 }
 680 
 681 size_t ShenandoahHeap::capacity() const {
 682   return committed();
 683 }
 684 
 685 size_t ShenandoahHeap::max_capacity() const {
 686   return _num_regions * ShenandoahHeapRegion::region_size_bytes();
 687 }
 688 
 689 size_t ShenandoahHeap::min_capacity() const {
 690   return _minimum_size;
 691 }
 692 
 693 size_t ShenandoahHeap::initial_capacity() const {
 694   return _initial_size;
 695 }
 696 
 697 bool ShenandoahHeap::is_in(const void* p) const {
 698   HeapWord* heap_base = (HeapWord*) base();
 699   HeapWord* last_region_end = heap_base + ShenandoahHeapRegion::region_size_words() * num_regions();
 700   return p >= heap_base && p < last_region_end;
 701 }
 702 
 703 void ShenandoahHeap::op_uncommit(double shrink_before) {
 704   assert (ShenandoahUncommit, "should be enabled");
 705 
 706   // Application allocates from the beginning of the heap, and GC allocates at
 707   // the end of it. It is more efficient to uncommit from the end, so that applications
 708   // could enjoy the near committed regions. GC allocations are much less frequent,
 709   // and therefore can accept the committing costs.
 710 
 711   size_t count = 0;
 712   for (size_t i = num_regions(); i > 0; i--) { // care about size_t underflow
 713     ShenandoahHeapRegion* r = get_region(i - 1);
 714     if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
 715       ShenandoahHeapLocker locker(lock());
 716       if (r->is_empty_committed()) {
 717         // Do not uncommit below minimal capacity
 718         if (committed() < min_capacity() + ShenandoahHeapRegion::region_size_bytes()) {
 719           break;
 720         }
 721 
 722         r->make_uncommitted();
 723         count++;
 724       }
 725     }
 726     SpinPause(); // allow allocators to take the lock
 727   }
 728 
 729   if (count > 0) {
 730     control_thread()->notify_heap_changed();
 731   }
 732 }
 733 
 734 HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) {
 735   // New object should fit the GCLAB size
 736   size_t min_size = MAX2(size, PLAB::min_size());
 737 
 738   // Figure out size of new GCLAB, looking back at heuristics. Expand aggressively.
 739   size_t new_size = ShenandoahThreadLocalData::gclab_size(thread) * 2;
 740   new_size = MIN2(new_size, PLAB::max_size());
 741   new_size = MAX2(new_size, PLAB::min_size());
 742 
 743   // Record new heuristic value even if we take any shortcut. This captures
 744   // the case when moderately-sized objects always take a shortcut. At some point,
 745   // heuristics should catch up with them.
 746   ShenandoahThreadLocalData::set_gclab_size(thread, new_size);
 747 
 748   if (new_size < size) {
 749     // New size still does not fit the object. Fall back to shared allocation.
 750     // This avoids retiring perfectly good GCLABs, when we encounter a large object.
 751     return NULL;
 752   }
 753 
 754   // Retire current GCLAB, and allocate a new one.
 755   PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
 756   gclab->retire();
 757 
 758   size_t actual_size = 0;
 759   HeapWord* gclab_buf = allocate_new_gclab(min_size, new_size, &actual_size);
 760   if (gclab_buf == NULL) {
 761     return NULL;
 762   }
 763 
 764   assert (size <= actual_size, "allocation should fit");
 765 
 766   if (ZeroTLAB) {
 767     // ..and clear it.
 768     Copy::zero_to_words(gclab_buf, actual_size);
 769   } else {
 770     // ...and zap just allocated object.
 771 #ifdef ASSERT
 772     // Skip mangling the space corresponding to the object header to
 773     // ensure that the returned space is not considered parsable by
 774     // any concurrent GC thread.
 775     size_t hdr_size = oopDesc::header_size();
 776     Copy::fill_to_words(gclab_buf + hdr_size, actual_size - hdr_size, badHeapWordVal);
 777 #endif // ASSERT
 778   }
 779   gclab->set_buf(gclab_buf, actual_size);
 780   return gclab->allocate(size);
 781 }
 782 
 783 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t min_size,
 784                                             size_t requested_size,
 785                                             size_t* actual_size) {
 786   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_tlab(min_size, requested_size);
 787   HeapWord* res = allocate_memory(req);
 788   if (res != NULL) {
 789     *actual_size = req.actual_size();
 790   } else {
 791     *actual_size = 0;
 792   }
 793   return res;
 794 }
 795 
 796 HeapWord* ShenandoahHeap::allocate_new_gclab(size_t min_size,
 797                                              size_t word_size,
 798                                              size_t* actual_size) {
 799   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_gclab(min_size, word_size);
 800   HeapWord* res = allocate_memory(req);
 801   if (res != NULL) {
 802     *actual_size = req.actual_size();
 803   } else {
 804     *actual_size = 0;
 805   }
 806   return res;
 807 }
 808 
 809 HeapWord* ShenandoahHeap::allocate_memory(ShenandoahAllocRequest& req) {
 810   intptr_t pacer_epoch = 0;
 811   bool in_new_region = false;
 812   HeapWord* result = NULL;
 813 
 814   if (req.is_mutator_alloc()) {
 815     if (ShenandoahPacing) {
 816       pacer()->pace_for_alloc(req.size());
 817       pacer_epoch = pacer()->epoch();
 818     }
 819 
 820     if (!ShenandoahAllocFailureALot || !should_inject_alloc_failure()) {
 821       result = allocate_memory_under_lock(req, in_new_region);
 822     }
 823 
 824     // Allocation failed, block until control thread reacted, then retry allocation.
 825     //
 826     // It might happen that one of the threads requesting allocation would unblock
 827     // way later after GC happened, only to fail the second allocation, because
 828     // other threads have already depleted the free storage. In this case, a better
 829     // strategy is to try again, as long as GC makes progress.
 830     //
 831     // Then, we need to make sure the allocation was retried after at least one
 832     // Full GC, which means we want to try more than ShenandoahFullGCThreshold times.
 833 
 834     size_t tries = 0;
 835 
 836     while (result == NULL && _progress_last_gc.is_set()) {
 837       tries++;
 838       control_thread()->handle_alloc_failure(req);
 839       result = allocate_memory_under_lock(req, in_new_region);
 840     }
 841 
 842     while (result == NULL && tries <= ShenandoahFullGCThreshold) {
 843       tries++;
 844       control_thread()->handle_alloc_failure(req);
 845       result = allocate_memory_under_lock(req, in_new_region);
 846     }
 847 
 848   } else {
 849     assert(req.is_gc_alloc(), "Can only accept GC allocs here");
 850     result = allocate_memory_under_lock(req, in_new_region);
 851     // Do not call handle_alloc_failure() here, because we cannot block.
 852     // The allocation failure would be handled by the LRB slowpath with handle_alloc_failure_evac().
 853   }
 854 
 855   if (in_new_region) {
 856     control_thread()->notify_heap_changed();
 857   }
 858 
 859   if (result != NULL) {
 860     size_t requested = req.size();
 861     size_t actual = req.actual_size();
 862 
 863     assert (req.is_lab_alloc() || (requested == actual),
 864             "Only LAB allocations are elastic: %s, requested = " SIZE_FORMAT ", actual = " SIZE_FORMAT,
 865             ShenandoahAllocRequest::alloc_type_to_string(req.type()), requested, actual);
 866 
 867     if (req.is_mutator_alloc()) {
 868       notify_mutator_alloc_words(actual, false);
 869 
 870       // If we requested more than we were granted, give the rest back to pacer.
 871       // This only matters if we are in the same pacing epoch: do not try to unpace
 872       // over the budget for the other phase.
 873       if (ShenandoahPacing && (pacer_epoch > 0) && (requested > actual)) {
 874         pacer()->unpace_for_alloc(pacer_epoch, requested - actual);
 875       }
 876     } else {
 877       increase_used(actual*HeapWordSize);
 878     }
 879   }
 880 
 881   return result;
 882 }
 883 
 884 HeapWord* ShenandoahHeap::allocate_memory_under_lock(ShenandoahAllocRequest& req, bool& in_new_region) {
 885   ShenandoahHeapLocker locker(lock());
 886   return _free_set->allocate(req, in_new_region);
 887 }
 888 
 889 HeapWord* ShenandoahHeap::mem_allocate(size_t size,
 890                                         bool*  gc_overhead_limit_was_exceeded) {
 891   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared(size);
 892   return allocate_memory(req);
 893 }
 894 
 895 MetaWord* ShenandoahHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
 896                                                              size_t size,
 897                                                              Metaspace::MetadataType mdtype) {
 898   MetaWord* result;
 899 
 900   // Inform metaspace OOM to GC heuristics if class unloading is possible.
 901   if (heuristics()->can_unload_classes()) {
 902     ShenandoahHeuristics* h = heuristics();
 903     h->record_metaspace_oom();
 904   }
 905 
 906   // Expand and retry allocation
 907   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
 908   if (result != NULL) {
 909     return result;
 910   }
 911 
 912   // Start full GC
 913   collect(GCCause::_metadata_GC_clear_soft_refs);
 914 
 915   // Retry allocation
 916   result = loader_data->metaspace_non_null()->allocate(size, mdtype);
 917   if (result != NULL) {
 918     return result;
 919   }
 920 
 921   // Expand and retry allocation
 922   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
 923   if (result != NULL) {
 924     return result;
 925   }
 926 
 927   // Out of memory
 928   return NULL;
 929 }
 930 
 931 class ShenandoahConcurrentEvacuateRegionObjectClosure : public ObjectClosure {
 932 private:
 933   ShenandoahHeap* const _heap;
 934   Thread* const _thread;
 935 public:
 936   ShenandoahConcurrentEvacuateRegionObjectClosure(ShenandoahHeap* heap) :
 937     _heap(heap), _thread(Thread::current()) {}
 938 
 939   void do_object(oop p) {
 940     shenandoah_assert_marked(NULL, p);
 941     if (!p->is_forwarded()) {
 942       _heap->evacuate_object(p, _thread);
 943     }
 944   }
 945 };
 946 
 947 class ShenandoahEvacuationTask : public AbstractGangTask {
 948 private:
 949   ShenandoahHeap* const _sh;
 950   ShenandoahCollectionSet* const _cs;
 951   bool _concurrent;
 952 public:
 953   ShenandoahEvacuationTask(ShenandoahHeap* sh,
 954                            ShenandoahCollectionSet* cs,
 955                            bool concurrent) :
 956     AbstractGangTask("Parallel Evacuation Task"),
 957     _sh(sh),
 958     _cs(cs),
 959     _concurrent(concurrent)
 960   {}
 961 
 962   void work(uint worker_id) {
 963     if (_concurrent) {
 964       ShenandoahConcurrentWorkerSession worker_session(worker_id);
 965       ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
 966       ShenandoahEvacOOMScope oom_evac_scope;
 967       do_work();
 968     } else {
 969       ShenandoahParallelWorkerSession worker_session(worker_id);
 970       ShenandoahEvacOOMScope oom_evac_scope;
 971       do_work();
 972     }
 973   }
 974 
 975 private:
 976   void do_work() {
 977     ShenandoahConcurrentEvacuateRegionObjectClosure cl(_sh);
 978     ShenandoahHeapRegion* r;
 979     while ((r =_cs->claim_next()) != NULL) {
 980       assert(r->has_live(), "Region " SIZE_FORMAT " should have been reclaimed early", r->index());
 981       _sh->marked_object_iterate(r, &cl);
 982 
 983       if (ShenandoahPacing) {
 984         _sh->pacer()->report_evac(r->used() >> LogHeapWordSize);
 985       }
 986 
 987       if (_sh->check_cancelled_gc_and_yield(_concurrent)) {
 988         break;
 989       }
 990     }
 991   }
 992 };
 993 
 994 void ShenandoahHeap::trash_cset_regions() {
 995   ShenandoahHeapLocker locker(lock());
 996 
 997   ShenandoahCollectionSet* set = collection_set();
 998   ShenandoahHeapRegion* r;
 999   set->clear_current_index();
1000   while ((r = set->next()) != NULL) {
1001     r->make_trash();
1002   }
1003   collection_set()->clear();
1004 }
1005 
1006 void ShenandoahHeap::print_heap_regions_on(outputStream* st) const {
1007   st->print_cr("Heap Regions:");
1008   st->print_cr("EU=empty-uncommitted, EC=empty-committed, R=regular, H=humongous start, HC=humongous continuation, CS=collection set, T=trash, P=pinned");
1009   st->print_cr("BTE=bottom/top/end, U=used, T=TLAB allocs, G=GCLAB allocs, S=shared allocs, L=live data");
1010   st->print_cr("R=root, CP=critical pins, TAMS=top-at-mark-start, UWM=update watermark");
1011   st->print_cr("SN=alloc sequence number");
1012 
1013   for (size_t i = 0; i < num_regions(); i++) {
1014     get_region(i)->print_on(st);
1015   }
1016 }
1017 
1018 void ShenandoahHeap::trash_humongous_region_at(ShenandoahHeapRegion* start) {
1019   assert(start->is_humongous_start(), "reclaim regions starting with the first one");
1020 
1021   oop humongous_obj = oop(start->bottom());
1022   size_t size = humongous_obj->size();
1023   size_t required_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize);
1024   size_t index = start->index() + required_regions - 1;
1025 
1026   assert(!start->has_live(), "liveness must be zero");
1027 
1028   for(size_t i = 0; i < required_regions; i++) {
1029     // Reclaim from tail. Otherwise, assertion fails when printing region to trace log,
1030     // as it expects that every region belongs to a humongous region starting with a humongous start region.
1031     ShenandoahHeapRegion* region = get_region(index --);
1032 
1033     assert(region->is_humongous(), "expect correct humongous start or continuation");
1034     assert(!region->is_cset(), "Humongous region should not be in collection set");
1035 
1036     region->make_trash_immediate();
1037   }
1038 }
1039 
1040 class ShenandoahRetireGCLABClosure : public ThreadClosure {
1041 public:
1042   void do_thread(Thread* thread) {
1043     PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1044     assert(gclab != NULL, "GCLAB should be initialized for %s", thread->name());
1045     gclab->retire();
1046   }
1047 };
1048 
1049 void ShenandoahHeap::make_parsable(bool retire_tlabs) {
1050   if (UseTLAB) {
1051     CollectedHeap::ensure_parsability(retire_tlabs);
1052   }
1053   ShenandoahRetireGCLABClosure cl;
1054   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1055     cl.do_thread(t);
1056   }
1057   workers()->threads_do(&cl);
1058 }
1059 
1060 void ShenandoahHeap::resize_tlabs() {
1061   CollectedHeap::resize_all_tlabs();
1062 }
1063 
1064 void ShenandoahHeap::accumulate_statistics_tlabs() {
1065   CollectedHeap::accumulate_statistics_all_tlabs();
1066 }
1067 
1068 class ShenandoahEvacuateUpdateRootsTask : public AbstractGangTask {
1069 private:
1070   ShenandoahRootEvacuator* _rp;
1071 
1072 public:
1073   ShenandoahEvacuateUpdateRootsTask(ShenandoahRootEvacuator* rp) :
1074     AbstractGangTask("Shenandoah evacuate and update roots"),
1075     _rp(rp) {}
1076 
1077   void work(uint worker_id) {
1078     ShenandoahParallelWorkerSession worker_session(worker_id);
1079     ShenandoahEvacOOMScope oom_evac_scope;
1080     ShenandoahEvacuateUpdateRootsClosure cl;
1081     MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations);
1082     _rp->roots_do(worker_id, &cl);
1083   }
1084 };
1085 
1086 void ShenandoahHeap::evacuate_and_update_roots() {
1087 #if COMPILER2_OR_JVMCI
1088   DerivedPointerTable::clear();
1089 #endif
1090   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only iterate roots while world is stopped");
1091 
1092   {
1093     ShenandoahRootEvacuator rp(workers()->active_workers(), ShenandoahPhaseTimings::init_evac);
1094     ShenandoahEvacuateUpdateRootsTask roots_task(&rp);
1095     workers()->run_task(&roots_task);
1096   }
1097 
1098 #if COMPILER2_OR_JVMCI
1099   DerivedPointerTable::update_pointers();
1100 #endif
1101 }
1102 
1103 // Returns size in bytes
1104 size_t ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const {
1105   if (ShenandoahElasticTLAB) {
1106     // With Elastic TLABs, return the max allowed size, and let the allocation path
1107     // figure out the safe size for current allocation.
1108     return ShenandoahHeapRegion::max_tlab_size_bytes();
1109   } else {
1110     return MIN2(_free_set->unsafe_peek_free(), ShenandoahHeapRegion::max_tlab_size_bytes());
1111   }
1112 }
1113 
1114 size_t ShenandoahHeap::max_tlab_size() const {
1115   // Returns size in words
1116   return ShenandoahHeapRegion::max_tlab_size_words();
1117 }
1118 
1119 class ShenandoahRetireAndResetGCLABClosure : public ThreadClosure {
1120 public:
1121   void do_thread(Thread* thread) {
1122     PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1123     gclab->retire();
1124     if (ShenandoahThreadLocalData::gclab_size(thread) > 0) {
1125       ShenandoahThreadLocalData::set_gclab_size(thread, 0);
1126     }
1127   }
1128 };
1129 
1130 void ShenandoahHeap::retire_and_reset_gclabs() {
1131   ShenandoahRetireAndResetGCLABClosure cl;
1132   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1133     cl.do_thread(t);
1134   }
1135   workers()->threads_do(&cl);
1136 }
1137 
1138 void ShenandoahHeap::collect(GCCause::Cause cause) {
1139   control_thread()->request_gc(cause);
1140 }
1141 
1142 void ShenandoahHeap::do_full_collection(bool clear_all_soft_refs) {
1143   //assert(false, "Shouldn't need to do full collections");
1144 }
1145 
1146 CollectorPolicy* ShenandoahHeap::collector_policy() const {
1147   return _shenandoah_policy;
1148 }
1149 
1150 HeapWord* ShenandoahHeap::block_start(const void* addr) const {
1151   ShenandoahHeapRegion* r = heap_region_containing(addr);
1152   if (r != NULL) {
1153     return r->block_start(addr);
1154   }
1155   return NULL;
1156 }
1157 
1158 size_t ShenandoahHeap::block_size(const HeapWord* addr) const {
1159   ShenandoahHeapRegion* r = heap_region_containing(addr);
1160   return r->block_size(addr);
1161 }
1162 
1163 bool ShenandoahHeap::block_is_obj(const HeapWord* addr) const {
1164   ShenandoahHeapRegion* r = heap_region_containing(addr);
1165   return r->block_is_obj(addr);
1166 }
1167 
1168 jlong ShenandoahHeap::millis_since_last_gc() {
1169   double v = heuristics()->time_since_last_gc() * 1000;
1170   assert(0 <= v && v <= max_jlong, "value should fit: %f", v);
1171   return (jlong)v;
1172 }
1173 
1174 void ShenandoahHeap::prepare_for_verify() {
1175   if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
1176     make_parsable(false);
1177   }
1178 }
1179 
1180 void ShenandoahHeap::print_gc_threads_on(outputStream* st) const {
1181   workers()->print_worker_threads_on(st);
1182   if (ShenandoahStringDedup::is_enabled()) {
1183     ShenandoahStringDedup::print_worker_threads_on(st);
1184   }
1185 }
1186 
1187 void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const {
1188   workers()->threads_do(tcl);
1189   if (_safepoint_workers != NULL) {
1190     _safepoint_workers->threads_do(tcl);
1191   }
1192   if (ShenandoahStringDedup::is_enabled()) {
1193     ShenandoahStringDedup::threads_do(tcl);
1194   }
1195 }
1196 
1197 void ShenandoahHeap::print_tracing_info() const {
1198   LogTarget(Info, gc, stats) lt;
1199   if (lt.is_enabled()) {
1200     ResourceMark rm;
1201     LogStream ls(lt);
1202 
1203     phase_timings()->print_global_on(&ls);
1204 
1205     ls.cr();
1206     ls.cr();
1207 
1208     shenandoah_policy()->print_gc_stats(&ls);
1209 
1210     ls.cr();
1211     ls.cr();
1212   }
1213 }
1214 
1215 void ShenandoahHeap::verify(VerifyOption vo) {
1216   if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
1217     if (ShenandoahVerify) {
1218       verifier()->verify_generic(vo);
1219     } else {
1220       // TODO: Consider allocating verification bitmaps on demand,
1221       // and turn this on unconditionally.
1222     }
1223   }
1224 }
1225 size_t ShenandoahHeap::tlab_capacity(Thread *thr) const {
1226   return _free_set->capacity();
1227 }
1228 
1229 class ObjectIterateScanRootClosure : public BasicOopIterateClosure {
1230 private:
1231   MarkBitMap* _bitmap;
1232   Stack<oop,mtGC>* _oop_stack;
1233   ShenandoahHeap* const _heap;
1234   ShenandoahMarkingContext* const _marking_context;
1235 
1236   template <class T>
1237   void do_oop_work(T* p) {
1238     T o = RawAccess<>::oop_load(p);
1239     if (!CompressedOops::is_null(o)) {
1240       oop obj = CompressedOops::decode_not_null(o);
1241       obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
1242 
1243       assert(oopDesc::is_oop(obj), "must be a valid oop");
1244       if (!_bitmap->isMarked((HeapWord*) obj)) {
1245         _bitmap->mark((HeapWord*) obj);
1246         _oop_stack->push(obj);
1247       }
1248     }
1249   }
1250 public:
1251   ObjectIterateScanRootClosure(MarkBitMap* bitmap, Stack<oop,mtGC>* oop_stack) :
1252     _bitmap(bitmap), _oop_stack(oop_stack), _heap(ShenandoahHeap::heap()),
1253     _marking_context(_heap->marking_context()) {}
1254   void do_oop(oop* p)       { do_oop_work(p); }
1255   void do_oop(narrowOop* p) { do_oop_work(p); }
1256 };
1257 
1258 /*
1259  * This is public API, used in preparation of object_iterate().
1260  * Since we don't do linear scan of heap in object_iterate() (see comment below), we don't
1261  * need to make the heap parsable. For Shenandoah-internal linear heap scans that we can
1262  * control, we call SH::make_tlabs_parsable().
1263  */
1264 void ShenandoahHeap::ensure_parsability(bool retire_tlabs) {
1265   // No-op.
1266 }
1267 
1268 /*
1269  * Iterates objects in the heap. This is public API, used for, e.g., heap dumping.
1270  *
1271  * We cannot safely iterate objects by doing a linear scan at random points in time. Linear
1272  * scanning needs to deal with dead objects, which may have dead Klass* pointers (e.g.
1273  * calling oopDesc::size() would crash) or dangling reference fields (crashes) etc. Linear
1274  * scanning therefore depends on having a valid marking bitmap to support it. However, we only
1275  * have a valid marking bitmap after successful marking. In particular, we *don't* have a valid
1276  * marking bitmap during marking, after aborted marking or during/after cleanup (when we just
1277  * wiped the bitmap in preparation for next marking).
1278  *
1279  * For all those reasons, we implement object iteration as a single marking traversal, reporting
1280  * objects as we mark+traverse through the heap, starting from GC roots. JVMTI IterateThroughHeap
1281  * is allowed to report dead objects, but is not required to do so.
1282  */
1283 void ShenandoahHeap::object_iterate(ObjectClosure* cl) {
1284   assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1285   if (!_aux_bitmap_region_special && !os::commit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size(), false)) {
1286     log_warning(gc)("Could not commit native memory for auxiliary marking bitmap for heap iteration");
1287     return;
1288   }
1289 
1290   // Reset bitmap
1291   _aux_bit_map.clear();
1292 
1293   Stack<oop,mtGC> oop_stack;
1294 
1295   ObjectIterateScanRootClosure oops(&_aux_bit_map, &oop_stack);
1296 
1297   {
1298     // First, we process GC roots according to current GC cycle.
1299     // This populates the work stack with initial objects.
1300     // It is important to relinquish the associated locks before diving
1301     // into heap dumper.
1302     ShenandoahHeapIterationRootScanner rp;
1303     rp.roots_do(&oops);
1304   }
1305 
1306   // Work through the oop stack to traverse heap.
1307   while (! oop_stack.is_empty()) {
1308     oop obj = oop_stack.pop();
1309     assert(oopDesc::is_oop(obj), "must be a valid oop");
1310     cl->do_object(obj);
1311     obj->oop_iterate(&oops);
1312   }
1313 
1314   assert(oop_stack.is_empty(), "should be empty");
1315 
1316   if (!_aux_bitmap_region_special && !os::uncommit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size())) {
1317     log_warning(gc)("Could not uncommit native memory for auxiliary marking bitmap for heap iteration");
1318   }
1319 }
1320 
1321 void ShenandoahHeap::safe_object_iterate(ObjectClosure* cl) {
1322   assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1323   object_iterate(cl);
1324 }
1325 
1326 void ShenandoahHeap::heap_region_iterate(ShenandoahHeapRegionClosure* blk) const {
1327   for (size_t i = 0; i < num_regions(); i++) {
1328     ShenandoahHeapRegion* current = get_region(i);
1329     blk->heap_region_do(current);
1330   }
1331 }
1332 
1333 class ShenandoahParallelHeapRegionTask : public AbstractGangTask {
1334 private:
1335   ShenandoahHeap* const _heap;
1336   ShenandoahHeapRegionClosure* const _blk;
1337 
1338   shenandoah_padding(0);
1339   volatile size_t _index;
1340   shenandoah_padding(1);
1341 
1342 public:
1343   ShenandoahParallelHeapRegionTask(ShenandoahHeapRegionClosure* blk) :
1344           AbstractGangTask("Parallel Region Task"),
1345           _heap(ShenandoahHeap::heap()), _blk(blk), _index(0) {}
1346 
1347   void work(uint worker_id) {
1348     size_t stride = ShenandoahParallelRegionStride;
1349 
1350     size_t max = _heap->num_regions();
1351     while (_index < max) {
1352       size_t cur = Atomic::add(stride, &_index) - stride;
1353       size_t start = cur;
1354       size_t end = MIN2(cur + stride, max);
1355       if (start >= max) break;
1356 
1357       for (size_t i = cur; i < end; i++) {
1358         ShenandoahHeapRegion* current = _heap->get_region(i);
1359         _blk->heap_region_do(current);
1360       }
1361     }
1362   }
1363 };
1364 
1365 void ShenandoahHeap::parallel_heap_region_iterate(ShenandoahHeapRegionClosure* blk) const {
1366   assert(blk->is_thread_safe(), "Only thread-safe closures here");
1367   if (num_regions() > ShenandoahParallelRegionStride) {
1368     ShenandoahParallelHeapRegionTask task(blk);
1369     workers()->run_task(&task);
1370   } else {
1371     heap_region_iterate(blk);
1372   }
1373 }
1374 
1375 class ShenandoahInitMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
1376 private:
1377   ShenandoahMarkingContext* const _ctx;
1378 public:
1379   ShenandoahInitMarkUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
1380 
1381   void heap_region_do(ShenandoahHeapRegion* r) {
1382     assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index());
1383     if (r->is_active()) {
1384       // Check if region needs updating its TAMS. We have updated it already during concurrent
1385       // reset, so it is very likely we don't need to do another write here.
1386       if (_ctx->top_at_mark_start(r) != r->top()) {
1387         _ctx->capture_top_at_mark_start(r);
1388       }
1389     } else {
1390       assert(_ctx->top_at_mark_start(r) == r->top(),
1391              "Region " SIZE_FORMAT " should already have correct TAMS", r->index());
1392     }
1393   }
1394 
1395   bool is_thread_safe() { return true; }
1396 };
1397 
1398 void ShenandoahHeap::op_init_mark() {
1399   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
1400   assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
1401 
1402   assert(marking_context()->is_bitmap_clear(), "need clear marking bitmap");
1403   assert(!marking_context()->is_complete(), "should not be complete");
1404   assert(!has_forwarded_objects(), "No forwarded objects on this path");
1405 
1406   if (ShenandoahVerify) {
1407     verifier()->verify_before_concmark();
1408   }
1409 
1410   {
1411     ShenandoahGCPhase phase(ShenandoahPhaseTimings::accumulate_stats);
1412     accumulate_statistics_tlabs();
1413   }
1414 
1415   if (VerifyBeforeGC) {
1416     Universe::verify();
1417   }
1418 
1419   set_concurrent_mark_in_progress(true);
1420   // We need to reset all TLABs because we'd lose marks on all objects allocated in them.
1421   {
1422     ShenandoahGCSubPhase phase(ShenandoahPhaseTimings::make_parsable);
1423     make_parsable(true);
1424   }
1425 
1426   {
1427     ShenandoahGCSubPhase phase(ShenandoahPhaseTimings::init_update_region_states);
1428     ShenandoahInitMarkUpdateRegionStateClosure cl;
1429     parallel_heap_region_iterate(&cl);
1430   }
1431 
1432   // Make above changes visible to worker threads
1433   OrderAccess::fence();
1434 
1435   concurrent_mark()->mark_roots(ShenandoahPhaseTimings::scan_roots);
1436 
1437   if (UseTLAB) {
1438     ShenandoahGCSubPhase phase(ShenandoahPhaseTimings::resize_tlabs);
1439     resize_tlabs();
1440   }
1441 
1442   if (ShenandoahPacing) {
1443     pacer()->setup_for_mark();
1444   }
1445 }
1446 
1447 void ShenandoahHeap::op_mark() {
1448   concurrent_mark()->mark_from_roots();
1449 }
1450 
1451 class ShenandoahFinalMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
1452 private:
1453   ShenandoahMarkingContext* const _ctx;
1454   ShenandoahHeapLock* const _lock;
1455 
1456 public:
1457   ShenandoahFinalMarkUpdateRegionStateClosure() :
1458     _ctx(ShenandoahHeap::heap()->complete_marking_context()), _lock(ShenandoahHeap::heap()->lock()) {}
1459 
1460   void heap_region_do(ShenandoahHeapRegion* r) {
1461     if (r->is_active()) {
1462       // All allocations past TAMS are implicitly live, adjust the region data.
1463       // Bitmaps/TAMS are swapped at this point, so we need to poll complete bitmap.
1464       HeapWord *tams = _ctx->top_at_mark_start(r);
1465       HeapWord *top = r->top();
1466       if (top > tams) {
1467         r->increase_live_data_alloc_words(pointer_delta(top, tams));
1468       }
1469 
1470       // We are about to select the collection set, make sure it knows about
1471       // current pinning status. Also, this allows trashing more regions that
1472       // now have their pinning status dropped.
1473       if (r->is_pinned()) {
1474         if (r->pin_count() == 0) {
1475           ShenandoahHeapLocker locker(_lock);
1476           r->make_unpinned();
1477         }
1478       } else {
1479         if (r->pin_count() > 0) {
1480           ShenandoahHeapLocker locker(_lock);
1481           r->make_pinned();
1482         }
1483       }
1484 
1485       // Remember limit for updating refs. It's guaranteed that we get no
1486       // from-space-refs written from here on.
1487       r->set_update_watermark_at_safepoint(r->top());
1488     } else {
1489       assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index());
1490       assert(_ctx->top_at_mark_start(r) == r->top(),
1491              "Region " SIZE_FORMAT " should have correct TAMS", r->index());
1492     }
1493   }
1494 
1495   bool is_thread_safe() { return true; }
1496 };
1497 
1498 void ShenandoahHeap::op_final_mark() {
1499   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
1500   assert(!has_forwarded_objects(), "No forwarded objects on this path");
1501 
1502   // It is critical that we
1503   // evacuate roots right after finishing marking, so that we don't
1504   // get unmarked objects in the roots.
1505 
1506   if (!cancelled_gc()) {
1507     concurrent_mark()->finish_mark_from_roots(/* full_gc = */ false);
1508 
1509     // Marking is completed, deactivate SATB barrier
1510     set_concurrent_mark_in_progress(false);
1511     mark_complete_marking_context();
1512 
1513     parallel_cleaning(false /* full gc*/);
1514 
1515     if (ShenandoahVerify) {
1516       verifier()->verify_roots_no_forwarded();
1517     }
1518 
1519     {
1520       ShenandoahGCSubPhase phase(ShenandoahPhaseTimings::final_update_region_states);
1521       ShenandoahFinalMarkUpdateRegionStateClosure cl;
1522       parallel_heap_region_iterate(&cl);
1523 
1524       assert_pinned_region_status();
1525     }
1526 
1527     // Retire the TLABs, which will force threads to reacquire their TLABs after the pause.
1528     // This is needed for two reasons. Strong one: new allocations would be with new freeset,
1529     // which would be outside the collection set, so no cset writes would happen there.
1530     // Weaker one: new allocations would happen past update watermark, and so less work would
1531     // be needed for reference updates (would update the large filler instead).
1532     {
1533       ShenandoahGCSubPhase phase(ShenandoahPhaseTimings::retire_tlabs);
1534       make_parsable(true);
1535     }
1536 
1537     {
1538       ShenandoahGCSubPhase phase(ShenandoahPhaseTimings::choose_cset);
1539       ShenandoahHeapLocker locker(lock());
1540       _collection_set->clear();
1541       heuristics()->choose_collection_set(_collection_set);
1542     }
1543 
1544     {
1545       ShenandoahGCSubPhase phase(ShenandoahPhaseTimings::final_rebuild_freeset);
1546       ShenandoahHeapLocker locker(lock());
1547       _free_set->rebuild();
1548     }
1549 
1550     // If collection set has candidates, start evacuation.
1551     // Otherwise, bypass the rest of the cycle.
1552     if (!collection_set()->is_empty()) {
1553       ShenandoahGCSubPhase init_evac(ShenandoahPhaseTimings::init_evac);
1554 
1555       if (ShenandoahVerify) {
1556         verifier()->verify_before_evacuation();
1557       }
1558 
1559       set_evacuation_in_progress(true);
1560       // From here on, we need to update references.
1561       set_has_forwarded_objects(true);
1562 
1563       if (!is_degenerated_gc_in_progress()) {
1564         evacuate_and_update_roots();
1565       }
1566 
1567       if (ShenandoahPacing) {
1568         pacer()->setup_for_evac();
1569       }
1570 
1571       if (ShenandoahVerify) {
1572         verifier()->verify_roots_no_forwarded();
1573         verifier()->verify_during_evacuation();
1574       }
1575     } else {
1576       if (ShenandoahVerify) {
1577         verifier()->verify_after_concmark();
1578       }
1579 
1580       if (VerifyAfterGC) {
1581         Universe::verify();
1582       }
1583     }
1584 
1585   } else {
1586     // If this cycle was updating references, we need to keep the has_forwarded_objects
1587     // flag on, for subsequent phases to deal with it.
1588     concurrent_mark()->cancel();
1589     set_concurrent_mark_in_progress(false);
1590 
1591     if (process_references()) {
1592       // Abandon reference processing right away: pre-cleaning must have failed.
1593       ReferenceProcessor *rp = ref_processor();
1594       rp->disable_discovery();
1595       rp->abandon_partial_discovery();
1596       rp->verify_no_references_recorded();
1597     }
1598   }
1599 }
1600 
1601 void ShenandoahHeap::op_conc_evac() {
1602   ShenandoahEvacuationTask task(this, _collection_set, true);
1603   workers()->run_task(&task);
1604 }
1605 
1606 void ShenandoahHeap::op_stw_evac() {
1607   ShenandoahEvacuationTask task(this, _collection_set, false);
1608   workers()->run_task(&task);
1609 }
1610 
1611 void ShenandoahHeap::op_updaterefs() {
1612   update_heap_references(true);
1613 }
1614 
1615 void ShenandoahHeap::op_cleanup_early() {
1616   free_set()->recycle_trash();
1617 }
1618 
1619 void ShenandoahHeap::op_cleanup_complete() {
1620   free_set()->recycle_trash();
1621 }
1622 
1623 class ShenandoahResetUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
1624 private:
1625   ShenandoahMarkingContext* const _ctx;
1626 public:
1627   ShenandoahResetUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
1628 
1629   void heap_region_do(ShenandoahHeapRegion* r) {
1630     if (r->is_active()) {
1631       // Reset live data and set TAMS optimistically. We would recheck these under the pause
1632       // anyway to capture any updates that happened since now.
1633       r->clear_live_data();
1634       _ctx->capture_top_at_mark_start(r);
1635     }
1636   }
1637 
1638   bool is_thread_safe() { return true; }
1639 };
1640 
1641 void ShenandoahHeap::op_reset() {
1642   if (ShenandoahPacing) {
1643     pacer()->setup_for_reset();
1644   }
1645   reset_mark_bitmap();
1646 
1647   ShenandoahResetUpdateRegionStateClosure cl;
1648   parallel_heap_region_iterate(&cl);
1649 }
1650 
1651 void ShenandoahHeap::op_preclean() {
1652   if (ShenandoahPacing) {
1653     pacer()->setup_for_preclean();
1654   }
1655   concurrent_mark()->preclean_weak_refs();
1656 }
1657 
1658 void ShenandoahHeap::op_full(GCCause::Cause cause) {
1659   ShenandoahMetricsSnapshot metrics;
1660   metrics.snap_before();
1661 
1662   full_gc()->do_it(cause);
1663   if (UseTLAB) {
1664     ShenandoahGCSubPhase phase(ShenandoahPhaseTimings::full_gc_resize_tlabs);
1665     resize_all_tlabs();
1666   }
1667 
1668   metrics.snap_after();
1669 
1670   if (metrics.is_good_progress()) {
1671     _progress_last_gc.set();
1672   } else {
1673     // Nothing to do. Tell the allocation path that we have failed to make
1674     // progress, and it can finally fail.
1675     _progress_last_gc.unset();
1676   }
1677 }
1678 
1679 void ShenandoahHeap::op_degenerated(ShenandoahDegenPoint point) {
1680   // Degenerated GC is STW, but it can also fail. Current mechanics communicates
1681   // GC failure via cancelled_concgc() flag. So, if we detect the failure after
1682   // some phase, we have to upgrade the Degenerate GC to Full GC.
1683 
1684   clear_cancelled_gc();
1685 
1686   ShenandoahMetricsSnapshot metrics;
1687   metrics.snap_before();
1688 
1689   switch (point) {
1690     // The cases below form the Duff's-like device: it describes the actual GC cycle,
1691     // but enters it at different points, depending on which concurrent phase had
1692     // degenerated.
1693 
1694     case _degenerated_outside_cycle:
1695       // We have degenerated from outside the cycle, which means something is bad with
1696       // the heap, most probably heavy humongous fragmentation, or we are very low on free
1697       // space. It makes little sense to wait for Full GC to reclaim as much as it can, when
1698       // we can do the most aggressive degen cycle, which includes processing references and
1699       // class unloading, unless those features are explicitly disabled.
1700       //
1701       // Note that we can only do this for "outside-cycle" degens, otherwise we would risk
1702       // changing the cycle parameters mid-cycle during concurrent -> degenerated handover.
1703       set_process_references(heuristics()->can_process_references());
1704       set_unload_classes(heuristics()->can_unload_classes());
1705 
1706       op_reset();
1707 
1708       op_init_mark();
1709       if (cancelled_gc()) {
1710         op_degenerated_fail();
1711         return;
1712       }
1713 
1714     case _degenerated_mark:
1715       op_final_mark();
1716       if (cancelled_gc()) {
1717         op_degenerated_fail();
1718         return;
1719       }
1720 
1721       op_cleanup_early();
1722 
1723     case _degenerated_evac:
1724       // If heuristics thinks we should do the cycle, this flag would be set,
1725       // and we can do evacuation. Otherwise, it would be the shortcut cycle.
1726       if (is_evacuation_in_progress()) {
1727 
1728         // Degeneration under oom-evac protocol might have left some objects in
1729         // collection set un-evacuated. Restart evacuation from the beginning to
1730         // capture all objects. For all the objects that are already evacuated,
1731         // it would be a simple check, which is supposed to be fast. This is also
1732         // safe to do even without degeneration, as CSet iterator is at beginning
1733         // in preparation for evacuation anyway.
1734         //
1735         // Before doing that, we need to make sure we never had any cset-pinned
1736         // regions. This may happen if allocation failure happened when evacuating
1737         // the about-to-be-pinned object, oom-evac protocol left the object in
1738         // the collection set, and then the pin reached the cset region. If we continue
1739         // the cycle here, we would trash the cset and alive objects in it. To avoid
1740         // it, we fail degeneration right away and slide into Full GC to recover.
1741 
1742         {
1743           sync_pinned_region_status();
1744           collection_set()->clear_current_index();
1745 
1746           ShenandoahHeapRegion* r;
1747           while ((r = collection_set()->next()) != NULL) {
1748             if (r->is_pinned()) {
1749               cancel_gc(GCCause::_shenandoah_upgrade_to_full_gc);
1750               op_degenerated_fail();
1751               return;
1752             }
1753           }
1754 
1755           collection_set()->clear_current_index();
1756         }
1757 
1758         op_stw_evac();
1759         if (cancelled_gc()) {
1760           op_degenerated_fail();
1761           return;
1762         }
1763       }
1764 
1765       // If heuristics thinks we should do the cycle, this flag would be set,
1766       // and we need to do update-refs. Otherwise, it would be the shortcut cycle.
1767       if (has_forwarded_objects()) {
1768         op_init_updaterefs();
1769         if (cancelled_gc()) {
1770           op_degenerated_fail();
1771           return;
1772         }
1773       }
1774 
1775     case _degenerated_updaterefs:
1776       if (has_forwarded_objects()) {
1777         op_final_updaterefs();
1778         if (cancelled_gc()) {
1779           op_degenerated_fail();
1780           return;
1781         }
1782       }
1783 
1784       op_cleanup_complete();
1785       break;
1786 
1787     default:
1788       ShouldNotReachHere();
1789   }
1790 
1791   if (ShenandoahVerify) {
1792     verifier()->verify_after_degenerated();
1793   }
1794 
1795   if (VerifyAfterGC) {
1796     Universe::verify();
1797   }
1798 
1799   metrics.snap_after();
1800 
1801   // Check for futility and fail. There is no reason to do several back-to-back Degenerated cycles,
1802   // because that probably means the heap is overloaded and/or fragmented.
1803   if (!metrics.is_good_progress()) {
1804     _progress_last_gc.unset();
1805     cancel_gc(GCCause::_shenandoah_upgrade_to_full_gc);
1806     op_degenerated_futile();
1807   } else {
1808     _progress_last_gc.set();
1809   }
1810 }
1811 
1812 void ShenandoahHeap::op_degenerated_fail() {
1813   log_info(gc)("Cannot finish degeneration, upgrading to Full GC");
1814   shenandoah_policy()->record_degenerated_upgrade_to_full();
1815   op_full(GCCause::_shenandoah_upgrade_to_full_gc);
1816 }
1817 
1818 void ShenandoahHeap::op_degenerated_futile() {
1819   shenandoah_policy()->record_degenerated_upgrade_to_full();
1820   op_full(GCCause::_shenandoah_upgrade_to_full_gc);
1821 }
1822 
1823 void ShenandoahHeap::force_satb_flush_all_threads() {
1824   if (!is_concurrent_mark_in_progress()) {
1825     // No need to flush SATBs
1826     return;
1827   }
1828 
1829   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1830     ShenandoahThreadLocalData::set_force_satb_flush(t, true);
1831   }
1832   // The threads are not "acquiring" their thread-local data, but it does not
1833   // hurt to "release" the updates here anyway.
1834   OrderAccess::fence();
1835 }
1836 
1837 void ShenandoahHeap::set_gc_state_all_threads(char state) {
1838   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1839     ShenandoahThreadLocalData::set_gc_state(t, state);
1840   }
1841 }
1842 
1843 void ShenandoahHeap::set_gc_state_mask(uint mask, bool value) {
1844   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should really be Shenandoah safepoint");
1845   _gc_state.set_cond(mask, value);
1846   set_gc_state_all_threads(_gc_state.raw_value());
1847 }
1848 
1849 void ShenandoahHeap::set_concurrent_mark_in_progress(bool in_progress) {
1850   if (has_forwarded_objects()) {
1851     set_gc_state_mask(MARKING | UPDATEREFS, in_progress);
1852   } else {
1853     set_gc_state_mask(MARKING, in_progress);
1854   }
1855   ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
1856 }
1857 
1858 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
1859   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only call this at safepoint");
1860   set_gc_state_mask(EVACUATION, in_progress);
1861 }
1862 
1863 void ShenandoahHeap::ref_processing_init() {
1864   assert(_max_workers > 0, "Sanity");
1865 
1866   bool mt_processing = ParallelRefProcEnabled && (ParallelGCThreads > 1);
1867   bool mt_discovery = _max_workers > 1;
1868 
1869   _ref_processor =
1870     new ReferenceProcessor(&_subject_to_discovery,  // is_subject_to_discovery
1871                            mt_processing,           // MT processing
1872                            _max_workers,            // Degree of MT processing
1873                            mt_discovery,            // MT discovery
1874                            _max_workers,            // Degree of MT discovery
1875                            false,                   // Reference discovery is not atomic
1876                            NULL,                    // No closure, should be installed before use
1877                            true);                   // Scale worker threads
1878 
1879   log_info(gc, init)("Reference processing: %s discovery, %s processing",
1880           mt_discovery ? "parallel" : "serial",
1881           mt_processing ? "parallel" : "serial");
1882 
1883   shenandoah_assert_rp_isalive_not_installed();
1884 }
1885 
1886 GCTracer* ShenandoahHeap::tracer() {
1887   return shenandoah_policy()->tracer();
1888 }
1889 
1890 size_t ShenandoahHeap::tlab_used(Thread* thread) const {
1891   return _free_set->used();
1892 }
1893 
1894 void ShenandoahHeap::cancel_gc(GCCause::Cause cause) {
1895   if (try_cancel_gc()) {
1896     FormatBuffer<> msg("Cancelling GC: %s", GCCause::to_string(cause));
1897     log_info(gc)("%s", msg.buffer());
1898     Events::log(Thread::current(), "%s", msg.buffer());
1899   }
1900 }
1901 
1902 uint ShenandoahHeap::max_workers() {
1903   return _max_workers;
1904 }
1905 
1906 void ShenandoahHeap::stop() {
1907   // The shutdown sequence should be able to terminate when GC is running.
1908 
1909   // Step 0. Notify policy to disable event recording.
1910   _shenandoah_policy->record_shutdown();
1911 
1912   // Step 1. Notify control thread that we are in shutdown.
1913   // Note that we cannot do that with stop(), because stop() is blocking and waits for the actual shutdown.
1914   // Doing stop() here would wait for the normal GC cycle to complete, never falling through to cancel below.
1915   control_thread()->prepare_for_graceful_shutdown();
1916 
1917   // Step 2. Notify GC workers that we are cancelling GC.
1918   cancel_gc(GCCause::_shenandoah_stop_vm);
1919 
1920   // Step 3. Wait until GC worker exits normally.
1921   control_thread()->stop();
1922 
1923   // Step 4. Stop String Dedup thread if it is active
1924   if (ShenandoahStringDedup::is_enabled()) {
1925     ShenandoahStringDedup::stop();
1926   }
1927 }
1928 
1929 void ShenandoahHeap::stw_unload_classes(bool full_gc) {
1930   if (!unload_classes()) return;
1931 
1932   // Unload classes and purge SystemDictionary.
1933   {
1934     ShenandoahGCSubPhase phase(full_gc ?
1935                                ShenandoahPhaseTimings::full_gc_purge_class_unload :
1936                                ShenandoahPhaseTimings::purge_class_unload);
1937     bool purged_class = SystemDictionary::do_unloading(gc_timer());
1938 
1939     ShenandoahIsAliveSelector is_alive;
1940     uint num_workers = _workers->active_workers();
1941     ParallelCleaningTask unlink_task(is_alive.is_alive_closure(), true, true, num_workers, purged_class);
1942     _workers->run_task(&unlink_task);
1943   }
1944 
1945   {
1946     ShenandoahGCSubPhase phase(full_gc ?
1947                                ShenandoahPhaseTimings::full_gc_purge_cldg :
1948                                ShenandoahPhaseTimings::purge_cldg);
1949     ClassLoaderDataGraph::purge();
1950   }
1951   // Resize and verify metaspace
1952   MetaspaceGC::compute_new_size();
1953   MetaspaceUtils::verify_metrics();
1954 }
1955 
1956 // Process leftover weak oops: update them, if needed or assert they do not
1957 // need updating otherwise.
1958 // Weak processor API requires us to visit the oops, even if we are not doing
1959 // anything to them.
1960 void ShenandoahHeap::stw_process_weak_roots(bool full_gc) {
1961   ShenandoahGCSubPhase root_phase(full_gc ?
1962                                   ShenandoahPhaseTimings::full_gc_purge :
1963                                   ShenandoahPhaseTimings::purge);
1964   uint num_workers = _workers->active_workers();
1965   ShenandoahPhaseTimings::Phase timing_phase = full_gc ?
1966                                                ShenandoahPhaseTimings::full_gc_purge_weak_par :
1967                                                ShenandoahPhaseTimings::purge_weak_par;
1968   ShenandoahGCSubPhase phase(timing_phase);
1969 
1970   // Cleanup weak roots
1971   if (has_forwarded_objects()) {
1972     ShenandoahForwardedIsAliveClosure is_alive;
1973     ShenandoahUpdateRefsClosure keep_alive;
1974     ShenandoahParallelWeakRootsCleaningTask<ShenandoahForwardedIsAliveClosure, ShenandoahUpdateRefsClosure>
1975       cleaning_task(timing_phase, &is_alive, &keep_alive, num_workers);
1976     _workers->run_task(&cleaning_task);
1977   } else {
1978     ShenandoahIsAliveClosure is_alive;
1979 #ifdef ASSERT
1980   ShenandoahAssertNotForwardedClosure verify_cl;
1981   ShenandoahParallelWeakRootsCleaningTask<ShenandoahIsAliveClosure, ShenandoahAssertNotForwardedClosure>
1982     cleaning_task(timing_phase, &is_alive, &verify_cl, num_workers);
1983 #else
1984   ShenandoahParallelWeakRootsCleaningTask<ShenandoahIsAliveClosure, DoNothingClosure>
1985     cleaning_task(timing_phase, &is_alive, &do_nothing_cl, num_workers);
1986 #endif
1987     _workers->run_task(&cleaning_task);
1988   }
1989 }
1990 
1991 void ShenandoahHeap::parallel_cleaning(bool full_gc) {
1992   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
1993   stw_process_weak_roots(full_gc);
1994   stw_unload_classes(full_gc);
1995 }
1996 
1997 void ShenandoahHeap::set_has_forwarded_objects(bool cond) {
1998   set_gc_state_mask(HAS_FORWARDED, cond);
1999 }
2000 
2001 void ShenandoahHeap::set_process_references(bool pr) {
2002   _process_references.set_cond(pr);
2003 }
2004 
2005 void ShenandoahHeap::set_unload_classes(bool uc) {
2006   _unload_classes.set_cond(uc);
2007 }
2008 
2009 bool ShenandoahHeap::process_references() const {
2010   return _process_references.is_set();
2011 }
2012 
2013 bool ShenandoahHeap::unload_classes() const {
2014   return _unload_classes.is_set();
2015 }
2016 
2017 address ShenandoahHeap::in_cset_fast_test_addr() {
2018   ShenandoahHeap* heap = ShenandoahHeap::heap();
2019   assert(heap->collection_set() != NULL, "Sanity");
2020   return (address) heap->collection_set()->biased_map_address();
2021 }
2022 
2023 address ShenandoahHeap::cancelled_gc_addr() {
2024   return (address) ShenandoahHeap::heap()->_cancelled_gc.addr_of();
2025 }
2026 
2027 address ShenandoahHeap::gc_state_addr() {
2028   return (address) ShenandoahHeap::heap()->_gc_state.addr_of();
2029 }
2030 
2031 size_t ShenandoahHeap::bytes_allocated_since_gc_start() {
2032   return OrderAccess::load_acquire(&_bytes_allocated_since_gc_start);
2033 }
2034 
2035 void ShenandoahHeap::reset_bytes_allocated_since_gc_start() {
2036   OrderAccess::release_store_fence(&_bytes_allocated_since_gc_start, (size_t)0);
2037 }
2038 
2039 void ShenandoahHeap::set_degenerated_gc_in_progress(bool in_progress) {
2040   _degenerated_gc_in_progress.set_cond(in_progress);
2041 }
2042 
2043 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) {
2044   _full_gc_in_progress.set_cond(in_progress);
2045 }
2046 
2047 void ShenandoahHeap::set_full_gc_move_in_progress(bool in_progress) {
2048   assert (is_full_gc_in_progress(), "should be");
2049   _full_gc_move_in_progress.set_cond(in_progress);
2050 }
2051 
2052 void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) {
2053   set_gc_state_mask(UPDATEREFS, in_progress);
2054 }
2055 
2056 void ShenandoahHeap::register_nmethod(nmethod* nm) {
2057   ShenandoahCodeRoots::add_nmethod(nm);
2058 }
2059 
2060 void ShenandoahHeap::unregister_nmethod(nmethod* nm) {
2061   ShenandoahCodeRoots::remove_nmethod(nm);
2062 }
2063 
2064 oop ShenandoahHeap::pin_object(JavaThread* thr, oop o) {
2065   heap_region_containing(o)->record_pin();
2066   return o;
2067 }
2068 
2069 void ShenandoahHeap::unpin_object(JavaThread* thr, oop o) {
2070   heap_region_containing(o)->record_unpin();
2071 }
2072 
2073 void ShenandoahHeap::sync_pinned_region_status() {
2074   ShenandoahHeapLocker locker(lock());
2075 
2076   for (size_t i = 0; i < num_regions(); i++) {
2077     ShenandoahHeapRegion *r = get_region(i);
2078     if (r->is_active()) {
2079       if (r->is_pinned()) {
2080         if (r->pin_count() == 0) {
2081           r->make_unpinned();
2082         }
2083       } else {
2084         if (r->pin_count() > 0) {
2085           r->make_pinned();
2086         }
2087       }
2088     }
2089   }
2090 
2091   assert_pinned_region_status();
2092 }
2093 
2094 #ifdef ASSERT
2095 void ShenandoahHeap::assert_pinned_region_status() {
2096   for (size_t i = 0; i < num_regions(); i++) {
2097     ShenandoahHeapRegion* r = get_region(i);
2098     assert((r->is_pinned() && r->pin_count() > 0) || (!r->is_pinned() && r->pin_count() == 0),
2099            "Region " SIZE_FORMAT " pinning status is inconsistent", i);
2100   }
2101 }
2102 #endif
2103 
2104 ConcurrentGCTimer* ShenandoahHeap::gc_timer() const {
2105   return _gc_timer;
2106 }
2107 
2108 #ifdef ASSERT
2109 void ShenandoahHeap::assert_gc_workers(uint nworkers) {
2110   assert(nworkers > 0 && nworkers <= max_workers(), "Sanity");
2111 
2112   if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
2113     if (UseDynamicNumberOfGCThreads ||
2114         (FLAG_IS_DEFAULT(ParallelGCThreads) && ForceDynamicNumberOfGCThreads)) {
2115       assert(nworkers <= ParallelGCThreads, "Cannot use more than it has");
2116     } else {
2117       // Use ParallelGCThreads inside safepoints
2118       assert(nworkers == ParallelGCThreads, "Use ParalleGCThreads within safepoints");
2119     }
2120   } else {
2121     if (UseDynamicNumberOfGCThreads ||
2122         (FLAG_IS_DEFAULT(ConcGCThreads) && ForceDynamicNumberOfGCThreads)) {
2123       assert(nworkers <= ConcGCThreads, "Cannot use more than it has");
2124     } else {
2125       // Use ConcGCThreads outside safepoints
2126       assert(nworkers == ConcGCThreads, "Use ConcGCThreads outside safepoints");
2127     }
2128   }
2129 }
2130 #endif
2131 
2132 ShenandoahVerifier* ShenandoahHeap::verifier() {
2133   guarantee(ShenandoahVerify, "Should be enabled");
2134   assert (_verifier != NULL, "sanity");
2135   return _verifier;
2136 }
2137 
2138 template<class T>
2139 class ShenandoahUpdateHeapRefsTask : public AbstractGangTask {
2140 private:
2141   T cl;
2142   ShenandoahHeap* _heap;
2143   ShenandoahRegionIterator* _regions;
2144   bool _concurrent;
2145 public:
2146   ShenandoahUpdateHeapRefsTask(ShenandoahRegionIterator* regions, bool concurrent) :
2147     AbstractGangTask("Concurrent Update References Task"),
2148     cl(T()),
2149     _heap(ShenandoahHeap::heap()),
2150     _regions(regions),
2151     _concurrent(concurrent) {
2152   }
2153 
2154   void work(uint worker_id) {
2155     if (_concurrent) {
2156       ShenandoahConcurrentWorkerSession worker_session(worker_id);
2157       ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
2158       do_work();
2159     } else {
2160       ShenandoahParallelWorkerSession worker_session(worker_id);
2161       do_work();
2162     }
2163   }
2164 
2165 private:
2166   void do_work() {
2167     ShenandoahHeapRegion* r = _regions->next();
2168     ShenandoahMarkingContext* const ctx = _heap->complete_marking_context();
2169     while (r != NULL) {
2170       HeapWord* update_watermark = r->get_update_watermark();
2171       assert (update_watermark >= r->bottom(), "sanity");
2172       if (r->is_active() && !r->is_cset()) {
2173         _heap->marked_object_oop_iterate(r, &cl, update_watermark);
2174       }
2175       if (ShenandoahPacing) {
2176         _heap->pacer()->report_updaterefs(pointer_delta(update_watermark, r->bottom()));
2177       }
2178       if (_heap->check_cancelled_gc_and_yield(_concurrent)) {
2179         return;
2180       }
2181       r = _regions->next();
2182     }
2183   }
2184 };
2185 
2186 void ShenandoahHeap::update_heap_references(bool concurrent) {
2187   ShenandoahUpdateHeapRefsTask<ShenandoahUpdateHeapRefsClosure> task(&_update_refs_iterator, concurrent);
2188   workers()->run_task(&task);
2189 }
2190 
2191 void ShenandoahHeap::op_init_updaterefs() {
2192   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
2193 
2194   set_evacuation_in_progress(false);
2195 
2196   {
2197     ShenandoahGCSubPhase phase(ShenandoahPhaseTimings::init_update_refs_retire_gclabs);
2198     retire_and_reset_gclabs();
2199   }
2200 
2201   if (ShenandoahVerify) {
2202     if (!is_degenerated_gc_in_progress()) {
2203       verifier()->verify_roots_no_forwarded_except(ShenandoahRootVerifier::ThreadRoots);
2204     }
2205     verifier()->verify_before_updaterefs();
2206   }
2207 
2208   set_update_refs_in_progress(true);
2209 
2210   _update_refs_iterator.reset();
2211 
2212   if (ShenandoahPacing) {
2213     pacer()->setup_for_updaterefs();
2214   }
2215 }
2216 
2217 class ShenandoahFinalUpdateRefsUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
2218 private:
2219   ShenandoahHeapLock* const _lock;
2220 
2221 public:
2222   ShenandoahFinalUpdateRefsUpdateRegionStateClosure() : _lock(ShenandoahHeap::heap()->lock()) {}
2223 
2224   void heap_region_do(ShenandoahHeapRegion* r) {
2225     // Drop unnecessary "pinned" state from regions that does not have CP marks
2226     // anymore, as this would allow trashing them.
2227 
2228     if (r->is_active()) {
2229       if (r->is_pinned()) {
2230         if (r->pin_count() == 0) {
2231           ShenandoahHeapLocker locker(_lock);
2232           r->make_unpinned();
2233         }
2234       } else {
2235         if (r->pin_count() > 0) {
2236           ShenandoahHeapLocker locker(_lock);
2237           r->make_pinned();
2238         }
2239       }
2240     }
2241   }
2242 
2243   bool is_thread_safe() { return true; }
2244 };
2245 
2246 void ShenandoahHeap::op_final_updaterefs() {
2247   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
2248 
2249   // Check if there is left-over work, and finish it
2250   if (_update_refs_iterator.has_next()) {
2251     ShenandoahGCSubPhase phase(ShenandoahPhaseTimings::final_update_refs_finish_work);
2252 
2253     // Finish updating references where we left off.
2254     clear_cancelled_gc();
2255     update_heap_references(false);
2256   }
2257 
2258   // Clear cancelled GC, if set. On cancellation path, the block before would handle
2259   // everything. On degenerated paths, cancelled gc would not be set anyway.
2260   if (cancelled_gc()) {
2261     clear_cancelled_gc();
2262   }
2263   assert(!cancelled_gc(), "Should have been done right before");
2264 
2265   if (ShenandoahVerify && !is_degenerated_gc_in_progress()) {
2266     verifier()->verify_roots_no_forwarded_except(ShenandoahRootVerifier::ThreadRoots);
2267   }
2268 
2269   if (is_degenerated_gc_in_progress()) {
2270     concurrent_mark()->update_roots(ShenandoahPhaseTimings::degen_gc_update_roots);
2271   } else {
2272     concurrent_mark()->update_thread_roots(ShenandoahPhaseTimings::final_update_refs_roots);
2273   }
2274 
2275   // Has to be done before cset is clear
2276   if (ShenandoahVerify) {
2277     verifier()->verify_roots_in_to_space();
2278   }
2279 
2280   {
2281     ShenandoahGCSubPhase phase(ShenandoahPhaseTimings::final_update_refs_update_region_states);
2282     ShenandoahFinalUpdateRefsUpdateRegionStateClosure cl;
2283     parallel_heap_region_iterate(&cl);
2284 
2285     assert_pinned_region_status();
2286   }
2287 
2288   {
2289     ShenandoahGCSubPhase phase(ShenandoahPhaseTimings::final_update_refs_trash_cset);
2290     trash_cset_regions();
2291   }
2292 
2293   set_has_forwarded_objects(false);
2294   set_update_refs_in_progress(false);
2295 
2296   if (ShenandoahVerify) {
2297     verifier()->verify_after_updaterefs();
2298   }
2299 
2300   if (VerifyAfterGC) {
2301     Universe::verify();
2302   }
2303 
2304   {
2305     ShenandoahGCSubPhase phase(ShenandoahPhaseTimings::final_update_refs_rebuild_freeset);
2306     ShenandoahHeapLocker locker(lock());
2307     _free_set->rebuild();
2308   }
2309 }
2310 
2311 void ShenandoahHeap::print_extended_on(outputStream *st) const {
2312   print_on(st);
2313   print_heap_regions_on(st);
2314 }
2315 
2316 bool ShenandoahHeap::is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self) {
2317   size_t slice = r->index() / _bitmap_regions_per_slice;
2318 
2319   size_t regions_from = _bitmap_regions_per_slice * slice;
2320   size_t regions_to   = MIN2(num_regions(), _bitmap_regions_per_slice * (slice + 1));
2321   for (size_t g = regions_from; g < regions_to; g++) {
2322     assert (g / _bitmap_regions_per_slice == slice, "same slice");
2323     if (skip_self && g == r->index()) continue;
2324     if (get_region(g)->is_committed()) {
2325       return true;
2326     }
2327   }
2328   return false;
2329 }
2330 
2331 bool ShenandoahHeap::commit_bitmap_slice(ShenandoahHeapRegion* r) {
2332   shenandoah_assert_heaplocked();
2333 
2334   // Bitmaps in special regions do not need commits
2335   if (_bitmap_region_special) {
2336     return true;
2337   }
2338 
2339   if (is_bitmap_slice_committed(r, true)) {
2340     // Some other region from the group is already committed, meaning the bitmap
2341     // slice is already committed, we exit right away.
2342     return true;
2343   }
2344 
2345   // Commit the bitmap slice:
2346   size_t slice = r->index() / _bitmap_regions_per_slice;
2347   size_t off = _bitmap_bytes_per_slice * slice;
2348   size_t len = _bitmap_bytes_per_slice;
2349   char* start = (char*) _bitmap_region.start() + off;
2350 
2351   if (!os::commit_memory(start, len, false)) {
2352     return false;
2353   }
2354 
2355   if (AlwaysPreTouch) {
2356     os::pretouch_memory(start, start + len, _pretouch_bitmap_page_size);
2357   }
2358 
2359   return true;
2360 }
2361 
2362 bool ShenandoahHeap::uncommit_bitmap_slice(ShenandoahHeapRegion *r) {
2363   shenandoah_assert_heaplocked();
2364 
2365   // Bitmaps in special regions do not need uncommits
2366   if (_bitmap_region_special) {
2367     return true;
2368   }
2369 
2370   if (is_bitmap_slice_committed(r, true)) {
2371     // Some other region from the group is still committed, meaning the bitmap
2372     // slice is should stay committed, exit right away.
2373     return true;
2374   }
2375 
2376   // Uncommit the bitmap slice:
2377   size_t slice = r->index() / _bitmap_regions_per_slice;
2378   size_t off = _bitmap_bytes_per_slice * slice;
2379   size_t len = _bitmap_bytes_per_slice;
2380   if (!os::uncommit_memory((char*)_bitmap_region.start() + off, len)) {
2381     return false;
2382   }
2383   return true;
2384 }
2385 
2386 void ShenandoahHeap::safepoint_synchronize_begin() {
2387   if (ShenandoahSuspendibleWorkers || UseStringDeduplication) {
2388     SuspendibleThreadSet::synchronize();
2389   }
2390 }
2391 
2392 void ShenandoahHeap::safepoint_synchronize_end() {
2393   if (ShenandoahSuspendibleWorkers || UseStringDeduplication) {
2394     SuspendibleThreadSet::desynchronize();
2395   }
2396 }
2397 
2398 void ShenandoahHeap::vmop_entry_init_mark() {
2399   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2400   ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_mark_gross);
2401 
2402   try_inject_alloc_failure();
2403   VM_ShenandoahInitMark op;
2404   VMThread::execute(&op); // jump to entry_init_mark() under safepoint
2405 }
2406 
2407 void ShenandoahHeap::vmop_entry_final_mark() {
2408   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2409   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_mark_gross);
2410 
2411   try_inject_alloc_failure();
2412   VM_ShenandoahFinalMarkStartEvac op;
2413   VMThread::execute(&op); // jump to entry_final_mark under safepoint
2414 }
2415 
2416 void ShenandoahHeap::vmop_entry_init_updaterefs() {
2417   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2418   ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_refs_gross);
2419 
2420   try_inject_alloc_failure();
2421   VM_ShenandoahInitUpdateRefs op;
2422   VMThread::execute(&op);
2423 }
2424 
2425 void ShenandoahHeap::vmop_entry_final_updaterefs() {
2426   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2427   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs_gross);
2428 
2429   try_inject_alloc_failure();
2430   VM_ShenandoahFinalUpdateRefs op;
2431   VMThread::execute(&op);
2432 }
2433 
2434 void ShenandoahHeap::vmop_entry_full(GCCause::Cause cause) {
2435   TraceCollectorStats tcs(monitoring_support()->full_stw_collection_counters());
2436   ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_gross);
2437 
2438   try_inject_alloc_failure();
2439   VM_ShenandoahFullGC op(cause);
2440   VMThread::execute(&op);
2441 }
2442 
2443 void ShenandoahHeap::vmop_degenerated(ShenandoahDegenPoint point) {
2444   TraceCollectorStats tcs(monitoring_support()->full_stw_collection_counters());
2445   ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_gross);
2446 
2447   VM_ShenandoahDegeneratedGC degenerated_gc((int)point);
2448   VMThread::execute(&degenerated_gc);
2449 }
2450 
2451 void ShenandoahHeap::entry_init_mark() {
2452   const char* msg = init_mark_event_message();
2453   ShenandoahPausePhase gc_phase(msg);
2454   EventMark em("%s", msg);
2455 
2456   ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_mark);
2457 
2458   ShenandoahWorkerScope scope(workers(),
2459                               ShenandoahWorkerPolicy::calc_workers_for_init_marking(),
2460                               "init marking");
2461 
2462   op_init_mark();
2463 }
2464 
2465 void ShenandoahHeap::entry_final_mark() {
2466   const char* msg = final_mark_event_message();
2467   ShenandoahPausePhase gc_phase(msg);
2468   EventMark em("%s", msg);
2469 
2470   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_mark);
2471 
2472   ShenandoahWorkerScope scope(workers(),
2473                               ShenandoahWorkerPolicy::calc_workers_for_final_marking(),
2474                               "final marking");
2475 
2476   op_final_mark();
2477 }
2478 
2479 void ShenandoahHeap::entry_init_updaterefs() {
2480   static const char* msg = "Pause Init Update Refs";
2481   ShenandoahPausePhase gc_phase(msg);
2482   EventMark em("%s", msg);
2483 
2484   ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_refs);
2485 
2486   // No workers used in this phase, no setup required
2487 
2488   op_init_updaterefs();
2489 }
2490 
2491 void ShenandoahHeap::entry_final_updaterefs() {
2492   static const char* msg = "Pause Final Update Refs";
2493   ShenandoahPausePhase gc_phase(msg);
2494   EventMark em("%s", msg);
2495 
2496   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs);
2497 
2498   ShenandoahWorkerScope scope(workers(),
2499                               ShenandoahWorkerPolicy::calc_workers_for_final_update_ref(),
2500                               "final reference update");
2501 
2502   op_final_updaterefs();
2503 }
2504 
2505 void ShenandoahHeap::entry_full(GCCause::Cause cause) {
2506   static const char* msg = "Pause Full";
2507   ShenandoahPausePhase gc_phase(msg, true /* log_heap_usage */);
2508   EventMark em("%s", msg);
2509 
2510   ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc);
2511 
2512   ShenandoahWorkerScope scope(workers(),
2513                               ShenandoahWorkerPolicy::calc_workers_for_fullgc(),
2514                               "full gc");
2515 
2516   op_full(cause);
2517 }
2518 
2519 void ShenandoahHeap::entry_degenerated(int point) {
2520   ShenandoahDegenPoint dpoint = (ShenandoahDegenPoint)point;
2521   const char* msg = degen_event_message(dpoint);
2522   ShenandoahPausePhase gc_phase(msg, true /* log_heap_usage */);
2523   EventMark em("%s", msg);
2524 
2525   ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc);
2526 
2527   ShenandoahWorkerScope scope(workers(),
2528                               ShenandoahWorkerPolicy::calc_workers_for_stw_degenerated(),
2529                               "stw degenerated gc");
2530 
2531   set_degenerated_gc_in_progress(true);
2532   op_degenerated(dpoint);
2533   set_degenerated_gc_in_progress(false);
2534 }
2535 
2536 void ShenandoahHeap::entry_mark() {
2537   TraceCollectorStats tcs(monitoring_support()->concurrent_collection_counters());
2538 
2539   const char* msg = conc_mark_event_message();
2540   ShenandoahConcurrentPhase gc_phase(msg);
2541   EventMark em("%s", msg);
2542 
2543   ShenandoahGCPhase conc_mark_phase(ShenandoahPhaseTimings::conc_mark);
2544 
2545   ShenandoahWorkerScope scope(workers(),
2546                               ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
2547                               "concurrent marking");
2548 
2549   try_inject_alloc_failure();
2550   op_mark();
2551 }
2552 
2553 void ShenandoahHeap::entry_evac() {
2554   TraceCollectorStats tcs(monitoring_support()->concurrent_collection_counters());
2555 
2556   static const char* msg = "Concurrent evacuation";
2557   ShenandoahConcurrentPhase gc_phase(msg);
2558   EventMark em("%s", msg);
2559 
2560   ShenandoahGCPhase conc_evac_phase(ShenandoahPhaseTimings::conc_evac);
2561 
2562   ShenandoahWorkerScope scope(workers(),
2563                               ShenandoahWorkerPolicy::calc_workers_for_conc_evac(),
2564                               "concurrent evacuation");
2565 
2566   try_inject_alloc_failure();
2567   op_conc_evac();
2568 }
2569 
2570 void ShenandoahHeap::entry_updaterefs() {
2571   static const char* msg = "Concurrent update references";
2572   ShenandoahConcurrentPhase gc_phase(msg);
2573   EventMark em("%s", msg);
2574 
2575   ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_update_refs);
2576 
2577   ShenandoahWorkerScope scope(workers(),
2578                               ShenandoahWorkerPolicy::calc_workers_for_conc_update_ref(),
2579                               "concurrent reference update");
2580 
2581   try_inject_alloc_failure();
2582   op_updaterefs();
2583 }
2584 
2585 void ShenandoahHeap::entry_cleanup_early() {
2586   static const char* msg = "Concurrent cleanup";
2587   ShenandoahConcurrentPhase gc_phase(msg,  true /* log_heap_usage */);
2588   EventMark em("%s", msg);
2589 
2590   ShenandoahGCSubPhase phase(ShenandoahPhaseTimings::conc_cleanup_early);
2591 
2592   // This phase does not use workers, no need for setup
2593 
2594   try_inject_alloc_failure();
2595   op_cleanup_early();
2596 }
2597 
2598 void ShenandoahHeap::entry_cleanup_complete() {
2599   static const char* msg = "Concurrent cleanup";
2600   ShenandoahConcurrentPhase gc_phase(msg,  true /* log_heap_usage */);
2601   EventMark em("%s", msg);
2602 
2603   ShenandoahGCSubPhase phase(ShenandoahPhaseTimings::conc_cleanup_complete);
2604 
2605   // This phase does not use workers, no need for setup
2606 
2607   try_inject_alloc_failure();
2608   op_cleanup_complete();
2609 }
2610 
2611 void ShenandoahHeap::entry_reset() {
2612   static const char* msg = "Concurrent reset";
2613   ShenandoahConcurrentPhase gc_phase(msg);
2614   EventMark em("%s", msg);
2615 
2616   ShenandoahGCSubPhase phase(ShenandoahPhaseTimings::conc_reset);
2617 
2618   ShenandoahWorkerScope scope(workers(),
2619                               ShenandoahWorkerPolicy::calc_workers_for_conc_reset(),
2620                               "concurrent reset");
2621 
2622   try_inject_alloc_failure();
2623   op_reset();
2624 }
2625 
2626 void ShenandoahHeap::entry_preclean() {
2627   if (ShenandoahPreclean && process_references()) {
2628     static const char* msg = "Concurrent precleaning";
2629     ShenandoahConcurrentPhase gc_phase(msg);
2630     EventMark em("%s", msg);
2631 
2632     ShenandoahGCSubPhase conc_preclean(ShenandoahPhaseTimings::conc_preclean);
2633 
2634     ShenandoahWorkerScope scope(workers(),
2635                                 ShenandoahWorkerPolicy::calc_workers_for_conc_preclean(),
2636                                 "concurrent preclean",
2637                                 /* check_workers = */ false);
2638 
2639     try_inject_alloc_failure();
2640     op_preclean();
2641   }
2642 }
2643 
2644 void ShenandoahHeap::entry_uncommit(double shrink_before) {
2645   static const char *msg = "Concurrent uncommit";
2646   ShenandoahConcurrentPhase gc_phase(msg, true /* log_heap_usage */);
2647   EventMark em("%s", msg);
2648 
2649   ShenandoahGCSubPhase phase(ShenandoahPhaseTimings::conc_uncommit);
2650 
2651   op_uncommit(shrink_before);
2652 }
2653 
2654 void ShenandoahHeap::try_inject_alloc_failure() {
2655   if (ShenandoahAllocFailureALot && !cancelled_gc() && ((os::random() % 1000) > 950)) {
2656     _inject_alloc_failure.set();
2657     os::naked_short_sleep(1);
2658     if (cancelled_gc()) {
2659       log_info(gc)("Allocation failure was successfully injected");
2660     }
2661   }
2662 }
2663 
2664 bool ShenandoahHeap::should_inject_alloc_failure() {
2665   return _inject_alloc_failure.is_set() && _inject_alloc_failure.try_unset();
2666 }
2667 
2668 void ShenandoahHeap::initialize_serviceability() {
2669   _memory_pool = new ShenandoahMemoryPool(this);
2670   _cycle_memory_manager.add_pool(_memory_pool);
2671   _stw_memory_manager.add_pool(_memory_pool);
2672 }
2673 
2674 GrowableArray<GCMemoryManager*> ShenandoahHeap::memory_managers() {
2675   GrowableArray<GCMemoryManager*> memory_managers(2);
2676   memory_managers.append(&_cycle_memory_manager);
2677   memory_managers.append(&_stw_memory_manager);
2678   return memory_managers;
2679 }
2680 
2681 GrowableArray<MemoryPool*> ShenandoahHeap::memory_pools() {
2682   GrowableArray<MemoryPool*> memory_pools(1);
2683   memory_pools.append(_memory_pool);
2684   return memory_pools;
2685 }
2686 
2687 MemoryUsage ShenandoahHeap::memory_usage() {
2688   return _memory_pool->get_memory_usage();
2689 }
2690 
2691 void ShenandoahHeap::enter_evacuation() {
2692   _oom_evac_handler.enter_evacuation();
2693 }
2694 
2695 void ShenandoahHeap::leave_evacuation() {
2696   _oom_evac_handler.leave_evacuation();
2697 }
2698 
2699 ShenandoahRegionIterator::ShenandoahRegionIterator() :
2700   _heap(ShenandoahHeap::heap()),
2701   _index(0) {}
2702 
2703 ShenandoahRegionIterator::ShenandoahRegionIterator(ShenandoahHeap* heap) :
2704   _heap(heap),
2705   _index(0) {}
2706 
2707 void ShenandoahRegionIterator::reset() {
2708   _index = 0;
2709 }
2710 
2711 bool ShenandoahRegionIterator::has_next() const {
2712   return _index < _heap->num_regions();
2713 }
2714 
2715 char ShenandoahHeap::gc_state() const {
2716   return _gc_state.raw_value();
2717 }
2718 
2719 void ShenandoahHeap::deduplicate_string(oop str) {
2720   assert(java_lang_String::is_instance(str), "invariant");
2721 
2722   if (ShenandoahStringDedup::is_enabled()) {
2723     ShenandoahStringDedup::deduplicate(str);
2724   }
2725 }
2726 
2727 const char* ShenandoahHeap::init_mark_event_message() const {
2728   assert(!has_forwarded_objects(), "Should not have forwarded objects here");
2729 
2730   bool proc_refs = process_references();
2731   bool unload_cls = unload_classes();
2732 
2733   if (proc_refs && unload_cls) {
2734     return "Pause Init Mark (process weakrefs) (unload classes)";
2735   } else if (proc_refs) {
2736     return "Pause Init Mark (process weakrefs)";
2737   } else if (unload_cls) {
2738     return "Pause Init Mark (unload classes)";
2739   } else {
2740     return "Pause Init Mark";
2741   }
2742 }
2743 
2744 const char* ShenandoahHeap::final_mark_event_message() const {
2745   assert(!has_forwarded_objects(), "Should not have forwarded objects here");
2746 
2747   bool proc_refs = process_references();
2748   bool unload_cls = unload_classes();
2749 
2750   if (proc_refs && unload_cls) {
2751     return "Pause Final Mark (process weakrefs) (unload classes)";
2752   } else if (proc_refs) {
2753     return "Pause Final Mark (process weakrefs)";
2754   } else if (unload_cls) {
2755     return "Pause Final Mark (unload classes)";
2756   } else {
2757     return "Pause Final Mark";
2758   }
2759 }
2760 
2761 const char* ShenandoahHeap::conc_mark_event_message() const {
2762   assert(!has_forwarded_objects(), "Should not have forwarded objects here");
2763 
2764   bool proc_refs = process_references();
2765   bool unload_cls = unload_classes();
2766 
2767   if (proc_refs && unload_cls) {
2768     return "Concurrent marking (process weakrefs) (unload classes)";
2769   } else if (proc_refs) {
2770     return "Concurrent marking (process weakrefs)";
2771   } else if (unload_cls) {
2772     return "Concurrent marking (unload classes)";
2773   } else {
2774     return "Concurrent marking";
2775   }
2776 }
2777 
2778 const char* ShenandoahHeap::degen_event_message(ShenandoahDegenPoint point) const {
2779   switch (point) {
2780     case _degenerated_unset:
2781       return "Pause Degenerated GC (<UNSET>)";
2782     case _degenerated_outside_cycle:
2783       return "Pause Degenerated GC (Outside of Cycle)";
2784     case _degenerated_mark:
2785       return "Pause Degenerated GC (Mark)";
2786     case _degenerated_evac:
2787       return "Pause Degenerated GC (Evacuation)";
2788     case _degenerated_updaterefs:
2789       return "Pause Degenerated GC (Update Refs)";
2790     default:
2791       ShouldNotReachHere();
2792       return "ERROR";
2793   }
2794 }
2795 
2796 ShenandoahLiveData* ShenandoahHeap::get_liveness_cache(uint worker_id) {
2797 #ifdef ASSERT
2798   assert(_liveness_cache != NULL, "sanity");
2799   assert(worker_id < _max_workers, "sanity");
2800   for (uint i = 0; i < num_regions(); i++) {
2801     assert(_liveness_cache[worker_id][i] == 0, "liveness cache should be empty");
2802   }
2803 #endif
2804   return _liveness_cache[worker_id];
2805 }
2806 
2807 void ShenandoahHeap::flush_liveness_cache(uint worker_id) {
2808   assert(worker_id < _max_workers, "sanity");
2809   assert(_liveness_cache != NULL, "sanity");
2810   ShenandoahLiveData* ld = _liveness_cache[worker_id];
2811   for (uint i = 0; i < num_regions(); i++) {
2812     ShenandoahLiveData live = ld[i];
2813     if (live > 0) {
2814       ShenandoahHeapRegion* r = get_region(i);
2815       r->increase_live_data_gc_words(live);
2816       ld[i] = 0;
2817     }
2818   }
2819 }