1 /*
   2  * Copyright (c) 2013, 2018, Red Hat, Inc. All rights reserved.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "memory/allocation.hpp"
  26 
  27 #include "gc_implementation/shared/gcTimer.hpp"
  28 #include "gc_implementation/shenandoah/shenandoahGCTraceTime.hpp"
  29 
  30 #include "gc_implementation/shenandoah/shenandoahBarrierSet.hpp"
  31 #include "gc_implementation/shenandoah/shenandoahClosures.inline.hpp"
  32 #include "gc_implementation/shenandoah/shenandoahCollectionSet.hpp"
  33 #include "gc_implementation/shenandoah/shenandoahCollectorPolicy.hpp"
  34 #include "gc_implementation/shenandoah/shenandoahConcurrentMark.inline.hpp"
  35 #include "gc_implementation/shenandoah/shenandoahControlThread.hpp"
  36 #include "gc_implementation/shenandoah/shenandoahFreeSet.hpp"
  37 #include "gc_implementation/shenandoah/shenandoahPhaseTimings.hpp"
  38 #include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp"
  39 #include "gc_implementation/shenandoah/shenandoahHeapRegion.inline.hpp"
  40 #include "gc_implementation/shenandoah/shenandoahHeapRegionSet.hpp"
  41 #include "gc_implementation/shenandoah/shenandoahMarkCompact.hpp"
  42 #include "gc_implementation/shenandoah/shenandoahMarkingContext.inline.hpp"
  43 #include "gc_implementation/shenandoah/shenandoahMonitoringSupport.hpp"
  44 #include "gc_implementation/shenandoah/shenandoahMetrics.hpp"
  45 #include "gc_implementation/shenandoah/shenandoahOopClosures.inline.hpp"
  46 #include "gc_implementation/shenandoah/shenandoahPacer.inline.hpp"
  47 #include "gc_implementation/shenandoah/shenandoahPadding.hpp"
  48 #include "gc_implementation/shenandoah/shenandoahParallelCleaning.hpp"
  49 #include "gc_implementation/shenandoah/shenandoahRootProcessor.inline.hpp"
  50 #include "gc_implementation/shenandoah/shenandoahTaskqueue.hpp"
  51 #include "gc_implementation/shenandoah/shenandoahUtils.hpp"
  52 #include "gc_implementation/shenandoah/shenandoahVerifier.hpp"
  53 #include "gc_implementation/shenandoah/shenandoahCodeRoots.hpp"
  54 #include "gc_implementation/shenandoah/shenandoahVMOperations.hpp"
  55 #include "gc_implementation/shenandoah/shenandoahWorkGroup.hpp"
  56 #include "gc_implementation/shenandoah/shenandoahWorkerPolicy.hpp"
  57 #include "gc_implementation/shenandoah/heuristics/shenandoahHeuristics.hpp"
  58 #include "gc_implementation/shenandoah/mode/shenandoahIUMode.hpp"
  59 #include "gc_implementation/shenandoah/mode/shenandoahPassiveMode.hpp"
  60 #include "gc_implementation/shenandoah/mode/shenandoahSATBMode.hpp"
  61 #if INCLUDE_JFR
  62 #include "gc_implementation/shenandoah/shenandoahJfrSupport.hpp"
  63 #endif
  64 
  65 #include "memory/metaspace.hpp"
  66 #include "runtime/vmThread.hpp"
  67 #include "services/mallocTracker.hpp"
  68 
  69 ShenandoahHeap* ShenandoahHeap::_heap = NULL;
  70 
  71 class ShenandoahPretouchHeapTask : public AbstractGangTask {
  72 private:
  73   ShenandoahRegionIterator _regions;
  74   const size_t _page_size;
  75 public:
  76   ShenandoahPretouchHeapTask(size_t page_size) :
  77     AbstractGangTask("Shenandoah Pretouch Heap"),
  78     _page_size(page_size) {}
  79 
  80   virtual void work(uint worker_id) {
  81     ShenandoahHeapRegion* r = _regions.next();
  82     while (r != NULL) {
  83       if (r->is_committed()) {
  84         os::pretouch_memory((char *) r->bottom(), (char *) r->end());
  85       }
  86       r = _regions.next();
  87     }
  88   }
  89 };
  90 
  91 class ShenandoahPretouchBitmapTask : public AbstractGangTask {
  92 private:
  93   ShenandoahRegionIterator _regions;
  94   char* _bitmap_base;
  95   const size_t _bitmap_size;
  96   const size_t _page_size;
  97 public:
  98   ShenandoahPretouchBitmapTask(char* bitmap_base, size_t bitmap_size, size_t page_size) :
  99     AbstractGangTask("Shenandoah Pretouch Bitmap"),
 100     _bitmap_base(bitmap_base),
 101     _bitmap_size(bitmap_size),
 102     _page_size(page_size) {}
 103 
 104   virtual void work(uint worker_id) {
 105     ShenandoahHeapRegion* r = _regions.next();
 106     while (r != NULL) {
 107       size_t start = r->index()       * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
 108       size_t end   = (r->index() + 1) * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
 109       assert (end <= _bitmap_size, err_msg("end is sane: " SIZE_FORMAT " < " SIZE_FORMAT, end, _bitmap_size));
 110 
 111       if (r->is_committed()) {
 112         os::pretouch_memory(_bitmap_base + start, _bitmap_base + end);
 113       }
 114 
 115       r = _regions.next();
 116     }
 117   }
 118 };
 119 
 120 jint ShenandoahHeap::initialize() {
 121   CollectedHeap::pre_initialize();
 122 
 123   //
 124   // Figure out heap sizing
 125   //
 126 
 127   size_t init_byte_size = collector_policy()->initial_heap_byte_size();
 128   size_t min_byte_size  = collector_policy()->min_heap_byte_size();
 129   size_t max_byte_size  = collector_policy()->max_heap_byte_size();
 130   size_t heap_alignment = collector_policy()->heap_alignment();
 131 
 132   size_t reg_size_bytes = ShenandoahHeapRegion::region_size_bytes();
 133 
 134   Universe::check_alignment(max_byte_size,  reg_size_bytes, "Shenandoah heap");
 135   Universe::check_alignment(init_byte_size, reg_size_bytes, "Shenandoah heap");
 136 
 137   _num_regions = ShenandoahHeapRegion::region_count();
 138   assert(_num_regions == (max_byte_size / reg_size_bytes),
 139          err_msg("Regions should cover entire heap exactly: " SIZE_FORMAT " != " SIZE_FORMAT "/" SIZE_FORMAT,
 140                  _num_regions, max_byte_size, reg_size_bytes));
 141   // Now we know the number of regions, initialize the heuristics.
 142   initialize_heuristics();
 143 
 144   size_t num_committed_regions = init_byte_size / reg_size_bytes;
 145   num_committed_regions = MIN2(num_committed_regions, _num_regions);
 146   assert(num_committed_regions <= _num_regions, "sanity");
 147   _initial_size = num_committed_regions * reg_size_bytes;
 148 
 149   size_t num_min_regions = min_byte_size / reg_size_bytes;
 150   num_min_regions = MIN2(num_min_regions, _num_regions);
 151   assert(num_min_regions <= _num_regions, "sanity");
 152   _minimum_size = num_min_regions * reg_size_bytes;
 153 
 154   // Default to max heap size.
 155   _soft_max_size = _num_regions * reg_size_bytes;
 156 
 157   _committed = _initial_size;
 158 
 159   size_t heap_page_size   = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
 160   size_t bitmap_page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
 161   size_t region_page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
 162 
 163   //
 164   // Reserve and commit memory for heap
 165   //
 166 
 167   ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size, heap_alignment);
 168   _reserved.set_word_size(0);
 169   _reserved.set_start((HeapWord*)heap_rs.base());
 170   _reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size()));
 171   _heap_region = MemRegion((HeapWord*)heap_rs.base(), heap_rs.size() / HeapWordSize);
 172   _heap_region_special = heap_rs.special();
 173 
 174   assert((((size_t) base()) & ShenandoahHeapRegion::region_size_bytes_mask()) == 0,
 175          err_msg("Misaligned heap: " PTR_FORMAT, p2i(base())));
 176 
 177 #if SHENANDOAH_OPTIMIZED_MARKTASK
 178   // The optimized ObjArrayChunkedTask takes some bits away from the full object bits.
 179   // Fail if we ever attempt to address more than we can.
 180   if ((uintptr_t)(heap_rs.base() + heap_rs.size()) >= ShenandoahMarkTask::max_addressable()) {
 181     FormatBuffer<512> buf("Shenandoah reserved [" PTR_FORMAT ", " PTR_FORMAT") for the heap, \n"
 182                           "but max object address is " PTR_FORMAT ". Try to reduce heap size, or try other \n"
 183                           "VM options that allocate heap at lower addresses (HeapBaseMinAddress, AllocateHeapAt, etc).",
 184                 p2i(heap_rs.base()), p2i(heap_rs.base() + heap_rs.size()), ShenandoahMarkTask::max_addressable());
 185     vm_exit_during_initialization("Fatal Error", buf);
 186   }
 187 #endif
 188 
 189   ReservedSpace sh_rs = heap_rs.first_part(max_byte_size);
 190   if (!_heap_region_special) {
 191     os::commit_memory_or_exit(sh_rs.base(), _initial_size, heap_alignment, false,
 192                               "Cannot commit heap memory");
 193   }
 194 
 195   //
 196   // Reserve and commit memory for bitmap(s)
 197   //
 198 
 199   _bitmap_size = MarkBitMap::compute_size(heap_rs.size());
 200   _bitmap_size = align_size_up(_bitmap_size, bitmap_page_size);
 201 
 202   size_t bitmap_bytes_per_region = reg_size_bytes / MarkBitMap::heap_map_factor();
 203 
 204   guarantee(bitmap_bytes_per_region != 0,
 205             err_msg("Bitmap bytes per region should not be zero"));
 206   guarantee(is_power_of_2(bitmap_bytes_per_region),
 207             err_msg("Bitmap bytes per region should be power of two: " SIZE_FORMAT, bitmap_bytes_per_region));
 208 
 209   if (bitmap_page_size > bitmap_bytes_per_region) {
 210     _bitmap_regions_per_slice = bitmap_page_size / bitmap_bytes_per_region;
 211     _bitmap_bytes_per_slice = bitmap_page_size;
 212   } else {
 213     _bitmap_regions_per_slice = 1;
 214     _bitmap_bytes_per_slice = bitmap_bytes_per_region;
 215   }
 216 
 217   guarantee(_bitmap_regions_per_slice >= 1,
 218             err_msg("Should have at least one region per slice: " SIZE_FORMAT,
 219                     _bitmap_regions_per_slice));
 220 
 221   guarantee(((_bitmap_bytes_per_slice) % bitmap_page_size) == 0,
 222             err_msg("Bitmap slices should be page-granular: bps = " SIZE_FORMAT ", page size = " SIZE_FORMAT,
 223                     _bitmap_bytes_per_slice, bitmap_page_size));
 224 
 225   ReservedSpace bitmap(_bitmap_size, bitmap_page_size);
 226   MemTracker::record_virtual_memory_type(bitmap.base(), mtGC);
 227   _bitmap_region = MemRegion((HeapWord*) bitmap.base(), bitmap.size() / HeapWordSize);
 228   _bitmap_region_special = bitmap.special();
 229 
 230   size_t bitmap_init_commit = _bitmap_bytes_per_slice *
 231                               align_size_up(num_committed_regions, _bitmap_regions_per_slice) / _bitmap_regions_per_slice;
 232   bitmap_init_commit = MIN2(_bitmap_size, bitmap_init_commit);
 233   if (!_bitmap_region_special) {
 234     os::commit_memory_or_exit((char *) _bitmap_region.start(), bitmap_init_commit, bitmap_page_size, false,
 235                               "Cannot commit bitmap memory");
 236   }
 237 
 238   _marking_context = new ShenandoahMarkingContext(_heap_region, _bitmap_region, _num_regions);
 239 
 240   if (ShenandoahVerify) {
 241     ReservedSpace verify_bitmap(_bitmap_size, bitmap_page_size);
 242     if (!verify_bitmap.special()) {
 243       os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), bitmap_page_size, false,
 244                                 "Cannot commit verification bitmap memory");
 245     }
 246     MemTracker::record_virtual_memory_type(verify_bitmap.base(), mtGC);
 247     MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize);
 248     _verification_bit_map.initialize(_heap_region, verify_bitmap_region);
 249     _verifier = new ShenandoahVerifier(this, &_verification_bit_map);
 250   }
 251 
 252   // Reserve aux bitmap for use in object_iterate(). We don't commit it here.
 253   ReservedSpace aux_bitmap(_bitmap_size, bitmap_page_size);
 254   MemTracker::record_virtual_memory_type(aux_bitmap.base(), mtGC);
 255   _aux_bitmap_region = MemRegion((HeapWord*) aux_bitmap.base(), aux_bitmap.size() / HeapWordSize);
 256   _aux_bitmap_region_special = aux_bitmap.special();
 257   _aux_bit_map.initialize(_heap_region, _aux_bitmap_region);
 258 
 259   //
 260   // Create regions and region sets
 261   //
 262   size_t region_align = align_size_up(sizeof(ShenandoahHeapRegion), SHENANDOAH_CACHE_LINE_SIZE);
 263   size_t region_storage_size = align_size_up(region_align * _num_regions, region_page_size);
 264   region_storage_size = align_size_up(region_storage_size, os::vm_allocation_granularity());
 265 
 266   ReservedSpace region_storage(region_storage_size, region_page_size);
 267   MemTracker::record_virtual_memory_type(region_storage.base(), mtGC);
 268   if (!region_storage.special()) {
 269     os::commit_memory_or_exit(region_storage.base(), region_storage_size, region_page_size, false,
 270                               "Cannot commit region memory");
 271   }
 272 
 273   // Try to fit the collection set bitmap at lower addresses. This optimizes code generation for cset checks.
 274   // Go up until a sensible limit (subject to encoding constraints) and try to reserve the space there.
 275   // If not successful, bite a bullet and allocate at whatever address.
 276   {
 277     size_t cset_align = MAX2<size_t>(os::vm_page_size(), os::vm_allocation_granularity());
 278     size_t cset_size = align_size_up(((size_t) sh_rs.base() + sh_rs.size()) >> ShenandoahHeapRegion::region_size_bytes_shift(), cset_align);
 279 
 280     uintptr_t min = ShenandoahUtils::round_up_power_of_2(cset_align);
 281     uintptr_t max = (1u << 30u);
 282 
 283     for (uintptr_t addr = min; addr <= max; addr <<= 1u) {
 284       char* req_addr = (char*)addr;
 285       assert(is_ptr_aligned(req_addr, cset_align), "Should be aligned");
 286       ReservedSpace cset_rs(cset_size, cset_align, false, req_addr);
 287       if (cset_rs.is_reserved()) {
 288         assert(cset_rs.base() == req_addr, err_msg("Allocated where requested: " PTR_FORMAT ", " PTR_FORMAT, p2i(cset_rs.base()), addr));
 289         _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
 290         break;
 291       }
 292     }
 293 
 294     if (_collection_set == NULL) {
 295       ReservedSpace cset_rs(cset_size, cset_align, false);
 296       _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
 297     }
 298   }
 299 
 300   _regions = NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, _num_regions, mtGC);
 301   _free_set = new ShenandoahFreeSet(this, _num_regions);
 302 
 303   {
 304     ShenandoahHeapLocker locker(lock());
 305 
 306     for (size_t i = 0; i < _num_regions; i++) {
 307       HeapWord* start = (HeapWord*)sh_rs.base() + ShenandoahHeapRegion::region_size_words() * i;
 308       bool is_committed = i < num_committed_regions;
 309       void* loc = region_storage.base() + i * region_align;
 310 
 311       ShenandoahHeapRegion* r = new (loc) ShenandoahHeapRegion(start, i, is_committed);
 312       assert(is_ptr_aligned(r, SHENANDOAH_CACHE_LINE_SIZE), "Sanity");
 313 
 314       _marking_context->initialize_top_at_mark_start(r);
 315       _regions[i] = r;
 316       assert(!collection_set()->is_in(i), "New region should not be in collection set");
 317     }
 318 
 319     // Initialize to complete
 320     _marking_context->mark_complete();
 321 
 322     _free_set->rebuild();
 323   }
 324 
 325   if (AlwaysPreTouch) {
 326     // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads,
 327     // before initialize() below zeroes it with initializing thread. For any given region,
 328     // we touch the region and the corresponding bitmaps from the same thread.
 329     ShenandoahPushWorkerScope scope(workers(), _max_workers, false);
 330 
 331     _pretouch_heap_page_size = heap_page_size;
 332     _pretouch_bitmap_page_size = bitmap_page_size;
 333 
 334 #ifdef LINUX
 335     // UseTransparentHugePages would madvise that backing memory can be coalesced into huge
 336     // pages. But, the kernel needs to know that every small page is used, in order to coalesce
 337     // them into huge one. Therefore, we need to pretouch with smaller pages.
 338     if (UseTransparentHugePages) {
 339       _pretouch_heap_page_size = (size_t)os::vm_page_size();
 340       _pretouch_bitmap_page_size = (size_t)os::vm_page_size();
 341     }
 342 #endif
 343 
 344     // OS memory managers may want to coalesce back-to-back pages. Make their jobs
 345     // simpler by pre-touching continuous spaces (heap and bitmap) separately.
 346 
 347     ShenandoahPretouchBitmapTask bcl(bitmap.base(), _bitmap_size, _pretouch_bitmap_page_size);
 348     _workers->run_task(&bcl);
 349 
 350     ShenandoahPretouchHeapTask hcl(_pretouch_heap_page_size);
 351     _workers->run_task(&hcl);
 352   }
 353 
 354   //
 355   // Initialize the rest of GC subsystems
 356   //
 357 
 358   set_barrier_set(new ShenandoahBarrierSet(this));
 359 
 360   _liveness_cache = NEW_C_HEAP_ARRAY(ShenandoahLiveData*, _max_workers, mtGC);
 361   for (uint worker = 0; worker < _max_workers; worker++) {
 362     _liveness_cache[worker] = NEW_C_HEAP_ARRAY(ShenandoahLiveData, _num_regions, mtGC);
 363     Copy::fill_to_bytes(_liveness_cache[worker], _num_regions * sizeof(ShenandoahLiveData));
 364   }
 365 
 366   // The call below uses stuff (the SATB* things) that are in G1, but probably
 367   // belong into a shared location.
 368   JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,
 369                                                SATB_Q_FL_lock,
 370                                                20 /*G1SATBProcessCompletedThreshold */,
 371                                                Shared_SATB_Q_lock);
 372 
 373   _monitoring_support = new ShenandoahMonitoringSupport(this);
 374   _phase_timings = new ShenandoahPhaseTimings(max_workers());
 375   ShenandoahStringDedup::initialize();
 376   ShenandoahCodeRoots::initialize();
 377 
 378   if (ShenandoahPacing) {
 379     _pacer = new ShenandoahPacer(this);
 380     _pacer->setup_for_idle();
 381   } else {
 382     _pacer = NULL;
 383   }
 384 
 385   _control_thread = new ShenandoahControlThread();
 386 
 387   log_info(gc, init)("Initialize Shenandoah heap: " SIZE_FORMAT "%s initial, " SIZE_FORMAT "%s min, " SIZE_FORMAT "%s max",
 388                      byte_size_in_proper_unit(_initial_size),  proper_unit_for_byte_size(_initial_size),
 389                      byte_size_in_proper_unit(_minimum_size),  proper_unit_for_byte_size(_minimum_size),
 390                      byte_size_in_proper_unit(max_capacity()), proper_unit_for_byte_size(max_capacity())
 391   );
 392 
 393   return JNI_OK;
 394 }
 395 
 396 #ifdef _MSC_VER
 397 #pragma warning( push )
 398 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
 399 #endif
 400 
 401 void ShenandoahHeap::initialize_heuristics() {
 402   if (ShenandoahGCMode != NULL) {
 403     if (strcmp(ShenandoahGCMode, "satb") == 0) {
 404       _gc_mode = new ShenandoahSATBMode();
 405     } else if (strcmp(ShenandoahGCMode, "iu") == 0) {
 406       _gc_mode = new ShenandoahIUMode();
 407     } else if (strcmp(ShenandoahGCMode, "passive") == 0) {
 408       _gc_mode = new ShenandoahPassiveMode();
 409     } else {
 410       vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option");
 411     }
 412   } else {
 413     ShouldNotReachHere();
 414   }
 415   _gc_mode->initialize_flags();
 416   if (_gc_mode->is_diagnostic() && !UnlockDiagnosticVMOptions) {
 417     vm_exit_during_initialization(
 418             err_msg("GC mode \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
 419                     _gc_mode->name()));
 420   }
 421   if (_gc_mode->is_experimental() && !UnlockExperimentalVMOptions) {
 422     vm_exit_during_initialization(
 423             err_msg("GC mode \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
 424                     _gc_mode->name()));
 425   }
 426   log_info(gc, init)("Shenandoah GC mode: %s",
 427                      _gc_mode->name());
 428 
 429   _heuristics = _gc_mode->initialize_heuristics();
 430 
 431   if (_heuristics->is_diagnostic() && !UnlockDiagnosticVMOptions) {
 432     vm_exit_during_initialization(
 433             err_msg("Heuristics \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
 434                     _heuristics->name()));
 435   }
 436   if (_heuristics->is_experimental() && !UnlockExperimentalVMOptions) {
 437     vm_exit_during_initialization(
 438             err_msg("Heuristics \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
 439                     _heuristics->name()));
 440   }
 441   log_info(gc, init)("Shenandoah heuristics: %s",
 442                      _heuristics->name());
 443 }
 444 
 445 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
 446   SharedHeap(policy),
 447   _shenandoah_policy(policy),
 448   _heap_region_special(false),
 449   _regions(NULL),
 450   _free_set(NULL),
 451   _collection_set(NULL),
 452   _update_refs_iterator(this),
 453   _bytes_allocated_since_gc_start(0),
 454   _max_workers((uint)MAX2(ConcGCThreads, ParallelGCThreads)),
 455   _ref_processor(NULL),
 456   _marking_context(NULL),
 457   _bitmap_size(0),
 458   _bitmap_regions_per_slice(0),
 459   _bitmap_bytes_per_slice(0),
 460   _bitmap_region_special(false),
 461   _aux_bitmap_region_special(false),
 462   _liveness_cache(NULL),
 463   _aux_bit_map(),
 464   _verifier(NULL),
 465   _pacer(NULL),
 466   _gc_timer(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
 467   _phase_timings(NULL)
 468 {
 469   _heap = this;
 470 
 471   log_info(gc, init)("GC threads: " UINTX_FORMAT " parallel, " UINTX_FORMAT " concurrent", ParallelGCThreads, ConcGCThreads);
 472 
 473   _scm = new ShenandoahConcurrentMark();
 474 
 475   _full_gc = new ShenandoahMarkCompact();
 476   _used = 0;
 477 
 478   _max_workers = MAX2(_max_workers, 1U);
 479 
 480   // SharedHeap did not initialize this for us, and we want our own workgang anyway.
 481   assert(SharedHeap::_workers == NULL && _workers == NULL, "Should not be initialized yet");
 482   _workers = new ShenandoahWorkGang("Shenandoah GC Threads", _max_workers,
 483                             /* are_GC_task_threads */true,
 484                             /* are_ConcurrentGC_threads */false);
 485   if (_workers == NULL) {
 486     vm_exit_during_initialization("Failed necessary allocation.");
 487   } else {
 488     _workers->initialize_workers();
 489   }
 490   assert(SharedHeap::_workers == _workers, "Sanity: initialized the correct field");
 491 }
 492 
 493 #ifdef _MSC_VER
 494 #pragma warning( pop )
 495 #endif
 496 
 497 class ShenandoahResetBitmapTask : public AbstractGangTask {
 498 private:
 499   ShenandoahRegionIterator _regions;
 500 
 501 public:
 502   ShenandoahResetBitmapTask() :
 503     AbstractGangTask("Parallel Reset Bitmap Task") {}
 504 
 505   void work(uint worker_id) {
 506     ShenandoahHeapRegion* region = _regions.next();
 507     ShenandoahHeap* heap = ShenandoahHeap::heap();
 508     ShenandoahMarkingContext* const ctx = heap->marking_context();
 509     while (region != NULL) {
 510       if (heap->is_bitmap_slice_committed(region)) {
 511         ctx->clear_bitmap(region);
 512       }
 513       region = _regions.next();
 514     }
 515   }
 516 };
 517 
 518 void ShenandoahHeap::reset_mark_bitmap() {
 519   assert_gc_workers(_workers->active_workers());
 520   mark_incomplete_marking_context();
 521 
 522   ShenandoahResetBitmapTask task;
 523   _workers->run_task(&task);
 524 }
 525 
 526 void ShenandoahHeap::print_on(outputStream* st) const {
 527   st->print_cr("Shenandoah Heap");
 528   st->print_cr(" " SIZE_FORMAT "%s max, " SIZE_FORMAT "%s soft max, " SIZE_FORMAT "%s committed, " SIZE_FORMAT "%s used",
 529                byte_size_in_proper_unit(max_capacity()), proper_unit_for_byte_size(max_capacity()),
 530                byte_size_in_proper_unit(soft_max_capacity()), proper_unit_for_byte_size(soft_max_capacity()),
 531                byte_size_in_proper_unit(committed()),    proper_unit_for_byte_size(committed()),
 532                byte_size_in_proper_unit(used()),         proper_unit_for_byte_size(used()));
 533   st->print_cr(" " SIZE_FORMAT " x " SIZE_FORMAT"%s regions",
 534                num_regions(),
 535                byte_size_in_proper_unit(ShenandoahHeapRegion::region_size_bytes()),
 536                proper_unit_for_byte_size(ShenandoahHeapRegion::region_size_bytes()));
 537 
 538   st->print("Status: ");
 539   if (has_forwarded_objects())               st->print("has forwarded objects, ");
 540   if (is_concurrent_mark_in_progress())      st->print("marking, ");
 541   if (is_evacuation_in_progress())           st->print("evacuating, ");
 542   if (is_update_refs_in_progress())          st->print("updating refs, ");
 543   if (is_degenerated_gc_in_progress())       st->print("degenerated gc, ");
 544   if (is_full_gc_in_progress())              st->print("full gc, ");
 545   if (is_full_gc_move_in_progress())         st->print("full gc move, ");
 546 
 547   if (cancelled_gc()) {
 548     st->print("cancelled");
 549   } else {
 550     st->print("not cancelled");
 551   }
 552   st->cr();
 553 
 554   st->print_cr("Reserved region:");
 555   st->print_cr(" - [" PTR_FORMAT ", " PTR_FORMAT ") ",
 556                p2i(reserved_region().start()),
 557                p2i(reserved_region().end()));
 558 
 559   ShenandoahCollectionSet* cset = collection_set();
 560   st->print_cr("Collection set:");
 561   if (cset != NULL) {
 562     st->print_cr(" - map (vanilla): " PTR_FORMAT, p2i(cset->map_address()));
 563     st->print_cr(" - map (biased):  " PTR_FORMAT, p2i(cset->biased_map_address()));
 564   } else {
 565     st->print_cr(" (NULL)");
 566   }
 567 
 568   st->cr();
 569   MetaspaceAux::print_on(st);
 570 
 571   if (Verbose) {
 572     print_heap_regions_on(st);
 573   }
 574 }
 575 
 576 class ShenandoahInitGCLABClosure : public ThreadClosure {
 577 public:
 578   void do_thread(Thread* thread) {
 579     assert(thread == NULL || !thread->is_Java_thread(), "Don't expect JavaThread this early");
 580     if (thread != NULL && thread->is_Worker_thread()) {
 581       thread->gclab().initialize(true);
 582     }
 583   }
 584 };
 585 
 586 void ShenandoahHeap::post_initialize() {
 587   if (UseTLAB) {
 588     MutexLocker ml(Threads_lock);
 589 
 590     ShenandoahInitGCLABClosure init_gclabs;
 591     Threads::threads_do(&init_gclabs);
 592   }
 593 
 594   _scm->initialize(_max_workers);
 595   _full_gc->initialize(_gc_timer);
 596 
 597   ref_processing_init();
 598 
 599   _heuristics->initialize();
 600 
 601   JFR_ONLY(ShenandoahJFRSupport::register_jfr_type_serializers());
 602 }
 603 
 604 size_t ShenandoahHeap::used() const {
 605   OrderAccess::acquire();
 606   return (size_t) _used;
 607 }
 608 
 609 size_t ShenandoahHeap::committed() const {
 610   OrderAccess::acquire();
 611   return _committed;
 612 }
 613 
 614 void ShenandoahHeap::increase_committed(size_t bytes) {
 615   shenandoah_assert_heaplocked_or_safepoint();
 616   _committed += bytes;
 617 }
 618 
 619 void ShenandoahHeap::decrease_committed(size_t bytes) {
 620   shenandoah_assert_heaplocked_or_safepoint();
 621   _committed -= bytes;
 622 }
 623 
 624 void ShenandoahHeap::increase_used(size_t bytes) {
 625   Atomic::add(bytes, &_used);
 626 }
 627 
 628 void ShenandoahHeap::set_used(size_t bytes) {
 629   OrderAccess::release_store_fence(&_used, bytes);
 630 }
 631 
 632 void ShenandoahHeap::decrease_used(size_t bytes) {
 633   assert(used() >= bytes, "never decrease heap size by more than we've left");
 634   Atomic::add(-(jlong)bytes, &_used);
 635 }
 636 
 637 void ShenandoahHeap::increase_allocated(size_t bytes) {
 638   Atomic::add(bytes, &_bytes_allocated_since_gc_start);
 639 }
 640 
 641 void ShenandoahHeap::notify_mutator_alloc_words(size_t words, bool waste) {
 642   size_t bytes = words * HeapWordSize;
 643   if (!waste) {
 644     increase_used(bytes);
 645   }
 646   increase_allocated(bytes);
 647   if (ShenandoahPacing) {
 648     control_thread()->pacing_notify_alloc(words);
 649     if (waste) {
 650       pacer()->claim_for_alloc(words, true);
 651     }
 652   }
 653 }
 654 
 655 size_t ShenandoahHeap::capacity() const {
 656   return committed();
 657 }
 658 
 659 size_t ShenandoahHeap::max_capacity() const {
 660   return _num_regions * ShenandoahHeapRegion::region_size_bytes();
 661 }
 662 
 663 size_t ShenandoahHeap::soft_max_capacity() const {
 664   size_t v = OrderAccess::load_acquire((volatile size_t*)&_soft_max_size);
 665   assert(min_capacity() <= v && v <= max_capacity(),
 666          err_msg("Should be in bounds: " SIZE_FORMAT " <= " SIZE_FORMAT " <= " SIZE_FORMAT,
 667                  min_capacity(), v, max_capacity()));
 668   return v;
 669 }
 670 
 671 void ShenandoahHeap::set_soft_max_capacity(size_t v) {
 672   assert(min_capacity() <= v && v <= max_capacity(),
 673          err_msg("Should be in bounds: " SIZE_FORMAT " <= " SIZE_FORMAT " <= " SIZE_FORMAT,
 674                  min_capacity(), v, max_capacity()));
 675   OrderAccess::release_store_fence(&_soft_max_size, v);
 676 }
 677 
 678 size_t ShenandoahHeap::min_capacity() const {
 679   return _minimum_size;
 680 }
 681 
 682 size_t ShenandoahHeap::initial_capacity() const {
 683   return _initial_size;
 684 }
 685 
 686 bool ShenandoahHeap::is_in(const void* p) const {
 687   HeapWord* heap_base = (HeapWord*) base();
 688   HeapWord* last_region_end = heap_base + ShenandoahHeapRegion::region_size_words() * num_regions();
 689   return p >= heap_base && p < last_region_end;
 690 }
 691 
 692 void ShenandoahHeap::op_uncommit(double shrink_before, size_t shrink_until) {
 693   assert (ShenandoahUncommit, "should be enabled");
 694 
 695   // Application allocates from the beginning of the heap, and GC allocates at
 696   // the end of it. It is more efficient to uncommit from the end, so that applications
 697   // could enjoy the near committed regions. GC allocations are much less frequent,
 698   // and therefore can accept the committing costs.
 699 
 700   size_t count = 0;
 701   for (size_t i = num_regions(); i > 0; i--) { // care about size_t underflow
 702     ShenandoahHeapRegion* r = get_region(i - 1);
 703     if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
 704       ShenandoahHeapLocker locker(lock());
 705       if (r->is_empty_committed()) {
 706         if (committed() < shrink_until + ShenandoahHeapRegion::region_size_bytes()) {
 707           break;
 708         }
 709 
 710         r->make_uncommitted();
 711         count++;
 712       }
 713     }
 714     SpinPause(); // allow allocators to take the lock
 715   }
 716 
 717   if (count > 0) {
 718     _control_thread->notify_heap_changed();
 719   }
 720 }
 721 
 722 HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) {
 723   // Retain tlab and allocate object in shared space if
 724   // the amount free in the tlab is too large to discard.
 725   if (thread->gclab().free() > thread->gclab().refill_waste_limit()) {
 726     thread->gclab().record_slow_allocation(size);
 727     return NULL;
 728   }
 729 
 730   // Discard gclab and allocate a new one.
 731   // To minimize fragmentation, the last GCLAB may be smaller than the rest.
 732   size_t new_gclab_size = thread->gclab().compute_size(size);
 733 
 734   thread->gclab().clear_before_allocation();
 735 
 736   if (new_gclab_size == 0) {
 737     return NULL;
 738   }
 739 
 740   // Allocated object should fit in new GCLAB, and new_gclab_size should be larger than min
 741   size_t min_size = MAX2(size + ThreadLocalAllocBuffer::alignment_reserve(), ThreadLocalAllocBuffer::min_size());
 742   new_gclab_size = MAX2(new_gclab_size, min_size);
 743 
 744   // Allocate a new GCLAB...
 745   size_t actual_size = 0;
 746   HeapWord* obj = allocate_new_gclab(min_size, new_gclab_size, &actual_size);
 747 
 748   if (obj == NULL) {
 749     return NULL;
 750   }
 751 
 752   assert (size <= actual_size, "allocation should fit");
 753 
 754   if (ZeroTLAB) {
 755     // ..and clear it.
 756     Copy::zero_to_words(obj, actual_size);
 757   } else {
 758     // ...and zap just allocated object.
 759 #ifdef ASSERT
 760     // Skip mangling the space corresponding to the object header to
 761     // ensure that the returned space is not considered parsable by
 762     // any concurrent GC thread.
 763     size_t hdr_size = oopDesc::header_size();
 764     Copy::fill_to_words(obj + hdr_size, actual_size - hdr_size, badHeapWordVal);
 765 #endif // ASSERT
 766   }
 767   thread->gclab().fill(obj, obj + size, actual_size);
 768   return obj;
 769 }
 770 
 771 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t word_size) {
 772   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_tlab(word_size);
 773   return allocate_memory(req);
 774 }
 775 
 776 HeapWord* ShenandoahHeap::allocate_new_gclab(size_t min_size,
 777                                              size_t word_size,
 778                                              size_t* actual_size) {
 779   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_gclab(min_size, word_size);
 780   HeapWord* res = allocate_memory(req);
 781   if (res != NULL) {
 782     *actual_size = req.actual_size();
 783   } else {
 784     *actual_size = 0;
 785   }
 786   return res;
 787 }
 788 
 789 HeapWord* ShenandoahHeap::allocate_memory(ShenandoahAllocRequest& req) {
 790   intptr_t pacer_epoch = 0;
 791   bool in_new_region = false;
 792   HeapWord* result = NULL;
 793 
 794   if (req.is_mutator_alloc()) {
 795     if (ShenandoahPacing) {
 796       pacer()->pace_for_alloc(req.size());
 797       pacer_epoch = pacer()->epoch();
 798     }
 799 
 800     if (!ShenandoahAllocFailureALot || !should_inject_alloc_failure()) {
 801       result = allocate_memory_under_lock(req, in_new_region);
 802     }
 803 
 804     // Allocation failed, block until control thread reacted, then retry allocation.
 805     //
 806     // It might happen that one of the threads requesting allocation would unblock
 807     // way later after GC happened, only to fail the second allocation, because
 808     // other threads have already depleted the free storage. In this case, a better
 809     // strategy is to try again, as long as GC makes progress.
 810     //
 811     // Then, we need to make sure the allocation was retried after at least one
 812     // Full GC, which means we want to try more than ShenandoahFullGCThreshold times.
 813 
 814     size_t tries = 0;
 815 
 816     while (result == NULL && _progress_last_gc.is_set()) {
 817       tries++;
 818       control_thread()->handle_alloc_failure(req);
 819       result = allocate_memory_under_lock(req, in_new_region);
 820     }
 821 
 822     while (result == NULL && tries <= ShenandoahFullGCThreshold) {
 823       tries++;
 824       control_thread()->handle_alloc_failure(req);
 825       result = allocate_memory_under_lock(req, in_new_region);
 826     }
 827 
 828   } else {
 829     assert(req.is_gc_alloc(), "Can only accept GC allocs here");
 830     result = allocate_memory_under_lock(req, in_new_region);
 831     // Do not call handle_alloc_failure() here, because we cannot block.
 832     // The allocation failure would be handled by the WB slowpath with handle_alloc_failure_evac().
 833   }
 834 
 835   if (in_new_region) {
 836     control_thread()->notify_heap_changed();
 837   }
 838 
 839   if (result != NULL) {
 840     size_t requested = req.size();
 841     size_t actual = req.actual_size();
 842 
 843     assert (req.is_lab_alloc() || (requested == actual),
 844             err_msg("Only LAB allocations are elastic: %s, requested = " SIZE_FORMAT ", actual = " SIZE_FORMAT,
 845                     ShenandoahAllocRequest::alloc_type_to_string(req.type()), requested, actual));
 846 
 847     if (req.is_mutator_alloc()) {
 848       notify_mutator_alloc_words(actual, false);
 849 
 850       // If we requested more than we were granted, give the rest back to pacer.
 851       // This only matters if we are in the same pacing epoch: do not try to unpace
 852       // over the budget for the other phase.
 853       if (ShenandoahPacing && (pacer_epoch > 0) && (requested > actual)) {
 854         pacer()->unpace_for_alloc(pacer_epoch, requested - actual);
 855       }
 856     } else {
 857       increase_used(actual*HeapWordSize);
 858     }
 859   }
 860 
 861   return result;
 862 }
 863 
 864 HeapWord* ShenandoahHeap::allocate_memory_under_lock(ShenandoahAllocRequest& req, bool& in_new_region) {
 865   ShenandoahHeapLocker locker(lock());
 866   return _free_set->allocate(req, in_new_region);
 867 }
 868 
 869 HeapWord*  ShenandoahHeap::mem_allocate(size_t size,
 870                                         bool*  gc_overhead_limit_was_exceeded) {
 871   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared(size);
 872   return allocate_memory(req);
 873 }
 874 
 875 class ShenandoahConcurrentEvacuateRegionObjectClosure : public ObjectClosure {
 876 private:
 877   ShenandoahHeap* const _heap;
 878   Thread* const _thread;
 879 public:
 880   ShenandoahConcurrentEvacuateRegionObjectClosure(ShenandoahHeap* heap) :
 881     _heap(heap), _thread(Thread::current()) {}
 882 
 883   void do_object(oop p) {
 884     shenandoah_assert_marked(NULL, p);
 885     if (!p->is_forwarded()) {
 886       _heap->evacuate_object(p, _thread);
 887     }
 888   }
 889 };
 890 
 891 class ShenandoahEvacuationTask : public AbstractGangTask {
 892 private:
 893   ShenandoahHeap* const _sh;
 894   ShenandoahCollectionSet* const _cs;
 895   bool _concurrent;
 896 public:
 897   ShenandoahEvacuationTask(ShenandoahHeap* sh,
 898                            ShenandoahCollectionSet* cs,
 899                            bool concurrent) :
 900     AbstractGangTask("Parallel Evacuation Task"),
 901     _sh(sh),
 902     _cs(cs),
 903     _concurrent(concurrent)
 904   {}
 905 
 906   void work(uint worker_id) {
 907     ShenandoahEvacOOMScope oom_evac_scope;
 908     if (_concurrent) {
 909       ShenandoahConcurrentWorkerSession worker_session(worker_id);
 910       do_work();
 911     } else {
 912       ShenandoahParallelWorkerSession worker_session(worker_id);
 913       do_work();
 914     }
 915   }
 916 
 917 private:
 918   void do_work() {
 919     ShenandoahConcurrentEvacuateRegionObjectClosure cl(_sh);
 920     ShenandoahHeapRegion* r;
 921     while ((r =_cs->claim_next()) != NULL) {
 922       assert(r->has_live(), err_msg("Region " SIZE_FORMAT " should have been reclaimed early", r->index()));
 923       _sh->marked_object_iterate(r, &cl);
 924 
 925       if (ShenandoahPacing) {
 926         _sh->pacer()->report_evac(r->used() >> LogHeapWordSize);
 927       }
 928 
 929       if (_sh->cancelled_gc()) {
 930         break;
 931       }
 932     }
 933   }
 934 };
 935 
 936 void ShenandoahHeap::trash_cset_regions() {
 937   ShenandoahHeapLocker locker(lock());
 938 
 939   ShenandoahCollectionSet* set = collection_set();
 940   ShenandoahHeapRegion* r;
 941   set->clear_current_index();
 942   while ((r = set->next()) != NULL) {
 943     r->make_trash();
 944   }
 945   collection_set()->clear();
 946 }
 947 
 948 void ShenandoahHeap::print_heap_regions_on(outputStream* st) const {
 949   st->print_cr("Heap Regions:");
 950   st->print_cr("EU=empty-uncommitted, EC=empty-committed, R=regular, H=humongous start, HC=humongous continuation, CS=collection set, T=trash, P=pinned");
 951   st->print_cr("BTE=bottom/top/end, U=used, T=TLAB allocs, G=GCLAB allocs, S=shared allocs, L=live data");
 952   st->print_cr("R=root, CP=critical pins, TAMS=top-at-mark-start, UWM=update watermark");
 953   st->print_cr("SN=alloc sequence number");
 954 
 955   for (size_t i = 0; i < num_regions(); i++) {
 956     get_region(i)->print_on(st);
 957   }
 958 }
 959 
 960 void ShenandoahHeap::trash_humongous_region_at(ShenandoahHeapRegion* start) {
 961   assert(start->is_humongous_start(), "reclaim regions starting with the first one");
 962 
 963   oop humongous_obj = oop(start->bottom());
 964   size_t size = humongous_obj->size();
 965   size_t required_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize);
 966   size_t index = start->index() + required_regions - 1;
 967 
 968   assert(!start->has_live(), "liveness must be zero");
 969 
 970   for(size_t i = 0; i < required_regions; i++) {
 971      // Reclaim from tail. Otherwise, assertion fails when printing region to trace log,
 972      // as it expects that every region belongs to a humongous region starting with a humongous start region.
 973      ShenandoahHeapRegion* region = get_region(index --);
 974 
 975     assert(region->is_humongous(), "expect correct humongous start or continuation");
 976     assert(!region->is_cset(), "Humongous region should not be in collection set");
 977 
 978     region->make_trash_immediate();
 979   }
 980 }
 981 
 982 class ShenandoahRetireGCLABClosure : public ThreadClosure {
 983 private:
 984   bool _retire;
 985 public:
 986   ShenandoahRetireGCLABClosure(bool retire) : _retire(retire) {};
 987 
 988   void do_thread(Thread* thread) {
 989     assert(thread->gclab().is_initialized(), err_msg("GCLAB should be initialized for %s", thread->name()));
 990     thread->gclab().make_parsable(_retire);
 991   }
 992 };
 993 
 994 void ShenandoahHeap::make_parsable(bool retire_tlabs) {
 995   if (UseTLAB) {
 996     CollectedHeap::ensure_parsability(retire_tlabs);
 997     ShenandoahRetireGCLABClosure cl(retire_tlabs);
 998     Threads::java_threads_do(&cl);
 999     _workers->threads_do(&cl);
1000   }
1001 }
1002 
1003 class ShenandoahEvacuateUpdateRootsTask : public AbstractGangTask {
1004 private:
1005   ShenandoahRootEvacuator* _rp;
1006 
1007 public:
1008   ShenandoahEvacuateUpdateRootsTask(ShenandoahRootEvacuator* rp) :
1009     AbstractGangTask("Shenandoah evacuate and update roots"),
1010     _rp(rp) {}
1011 
1012   void work(uint worker_id) {
1013     ShenandoahParallelWorkerSession worker_session(worker_id);
1014     ShenandoahEvacOOMScope oom_evac_scope;
1015     ShenandoahEvacuateUpdateRootsClosure cl;
1016 
1017     MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations);
1018     _rp->roots_do(worker_id, &cl);
1019   }
1020 };
1021 
1022 void ShenandoahHeap::evacuate_and_update_roots() {
1023   COMPILER2_PRESENT(DerivedPointerTable::clear());
1024 
1025   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only iterate roots while world is stopped");
1026 
1027   {
1028     ShenandoahRootEvacuator rp(ShenandoahPhaseTimings::init_evac);
1029     ShenandoahEvacuateUpdateRootsTask roots_task(&rp);
1030     workers()->run_task(&roots_task);
1031   }
1032 
1033   COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
1034 }
1035 
1036 size_t  ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const {
1037   // Returns size in bytes
1038   return MIN2(_free_set->unsafe_peek_free(), ShenandoahHeapRegion::max_tlab_size_bytes());
1039 }
1040 
1041 size_t ShenandoahHeap::max_tlab_size() const {
1042   // Returns size in words
1043   return ShenandoahHeapRegion::max_tlab_size_words();
1044 }
1045 
1046 class ShenandoahResizeGCLABClosure : public ThreadClosure {
1047 public:
1048   void do_thread(Thread* thread) {
1049     assert(thread->gclab().is_initialized(), err_msg("GCLAB should be initialized for %s", thread->name()));
1050     thread->gclab().resize();
1051   }
1052 };
1053 
1054 void ShenandoahHeap::resize_all_tlabs() {
1055   CollectedHeap::resize_all_tlabs();
1056 
1057   ShenandoahResizeGCLABClosure cl;
1058   Threads::java_threads_do(&cl);
1059   _workers->threads_do(&cl);
1060 }
1061 
1062 class ShenandoahAccumulateStatisticsGCLABClosure : public ThreadClosure {
1063 public:
1064   void do_thread(Thread* thread) {
1065     assert(thread->gclab().is_initialized(), err_msg("GCLAB should be initialized for %s", thread->name()));
1066     thread->gclab().accumulate_statistics();
1067     thread->gclab().initialize_statistics();
1068   }
1069 };
1070 
1071 void ShenandoahHeap::accumulate_statistics_all_gclabs() {
1072   ShenandoahAccumulateStatisticsGCLABClosure cl;
1073   Threads::java_threads_do(&cl);
1074   _workers->threads_do(&cl);
1075 }
1076 
1077 void ShenandoahHeap::collect(GCCause::Cause cause) {
1078   _control_thread->request_gc(cause);
1079 }
1080 
1081 void ShenandoahHeap::do_full_collection(bool clear_all_soft_refs) {
1082   //assert(false, "Shouldn't need to do full collections");
1083 }
1084 
1085 CollectorPolicy* ShenandoahHeap::collector_policy() const {
1086   return _shenandoah_policy;
1087 }
1088 
1089 void ShenandoahHeap::resize_tlabs() {
1090   CollectedHeap::resize_all_tlabs();
1091 }
1092 
1093 void ShenandoahHeap::accumulate_statistics_tlabs() {
1094   CollectedHeap::accumulate_statistics_all_tlabs();
1095 }
1096 
1097 HeapWord* ShenandoahHeap::block_start(const void* addr) const {
1098   ShenandoahHeapRegion* r = heap_region_containing(addr);
1099   if (r != NULL) {
1100     return r->block_start(addr);
1101   }
1102   return NULL;
1103 }
1104 
1105 size_t ShenandoahHeap::block_size(const HeapWord* addr) const {
1106   ShenandoahHeapRegion* r = heap_region_containing(addr);
1107   return r->block_size(addr);
1108 }
1109 
1110 bool ShenandoahHeap::block_is_obj(const HeapWord* addr) const {
1111   ShenandoahHeapRegion* r = heap_region_containing(addr);
1112   return r->block_is_obj(addr);
1113 }
1114 
1115 jlong ShenandoahHeap::millis_since_last_gc() {
1116   double v = heuristics()->time_since_last_gc() * 1000;
1117   assert(0 <= v && v <= max_jlong, err_msg("value should fit: %f", v));
1118   return (jlong)v;
1119 }
1120 
1121 void ShenandoahHeap::prepare_for_verify() {
1122   if (SafepointSynchronize::is_at_safepoint()) {
1123     make_parsable(false);
1124   }
1125 }
1126 
1127 void ShenandoahHeap::print_gc_threads_on(outputStream* st) const {
1128   workers()->print_worker_threads_on(st);
1129   if (ShenandoahStringDedup::is_enabled()) {
1130     ShenandoahStringDedup::print_worker_threads_on(st);
1131   }
1132 }
1133 
1134 void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const {
1135   workers()->threads_do(tcl);
1136   if (ShenandoahStringDedup::is_enabled()) {
1137     ShenandoahStringDedup::threads_do(tcl);
1138   }
1139 }
1140 
1141 void ShenandoahHeap::print_tracing_info() const {
1142   if (PrintGC || TraceGen0Time || TraceGen1Time) {
1143     ResourceMark rm;
1144     outputStream* out = gclog_or_tty;
1145     phase_timings()->print_global_on(out);
1146 
1147     out->cr();
1148     out->cr();
1149 
1150     shenandoah_policy()->print_gc_stats(out);
1151 
1152     out->cr();
1153     out->cr();
1154   }
1155 }
1156 
1157 void ShenandoahHeap::verify(bool silent, VerifyOption vo) {
1158   if (ShenandoahSafepoint::is_at_shenandoah_safepoint() || ! UseTLAB) {
1159     if (ShenandoahVerify) {
1160       verifier()->verify_generic(vo);
1161     } else {
1162       // TODO: Consider allocating verification bitmaps on demand,
1163       // and turn this on unconditionally.
1164     }
1165   }
1166 }
1167 size_t ShenandoahHeap::tlab_capacity(Thread *thr) const {
1168   return _free_set->capacity();
1169 }
1170 
1171 class ObjectIterateScanRootClosure : public ExtendedOopClosure {
1172 private:
1173   MarkBitMap* _bitmap;
1174   Stack<oop,mtGC>* _oop_stack;
1175 
1176   template <class T>
1177   void do_oop_work(T* p) {
1178     T o = oopDesc::load_heap_oop(p);
1179     if (!oopDesc::is_null(o)) {
1180       oop obj = oopDesc::decode_heap_oop_not_null(o);
1181       obj = (oop) ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
1182       assert(obj->is_oop(), "must be a valid oop");
1183       if (!_bitmap->isMarked((HeapWord*) obj)) {
1184         _bitmap->mark((HeapWord*) obj);
1185         _oop_stack->push(obj);
1186       }
1187     }
1188   }
1189 public:
1190   ObjectIterateScanRootClosure(MarkBitMap* bitmap, Stack<oop,mtGC>* oop_stack) :
1191     _bitmap(bitmap), _oop_stack(oop_stack) {}
1192   void do_oop(oop* p)       { do_oop_work(p); }
1193   void do_oop(narrowOop* p) { do_oop_work(p); }
1194 };
1195 
1196 /*
1197  * This is public API, used in preparation of object_iterate().
1198  * Since we don't do linear scan of heap in object_iterate() (see comment below), we don't
1199  * need to make the heap parsable. For Shenandoah-internal linear heap scans that we can
1200  * control, we call SH::make_parsable().
1201  */
1202 void ShenandoahHeap::ensure_parsability(bool retire_tlabs) {
1203   // No-op.
1204 }
1205 
1206 /*
1207  * Iterates objects in the heap. This is public API, used for, e.g., heap dumping.
1208  *
1209  * We cannot safely iterate objects by doing a linear scan at random points in time. Linear
1210  * scanning needs to deal with dead objects, which may have dead Klass* pointers (e.g.
1211  * calling oopDesc::size() would crash) or dangling reference fields (crashes) etc. Linear
1212  * scanning therefore depends on having a valid marking bitmap to support it. However, we only
1213  * have a valid marking bitmap after successful marking. In particular, we *don't* have a valid
1214  * marking bitmap during marking, after aborted marking or during/after cleanup (when we just
1215  * wiped the bitmap in preparation for next marking).
1216  *
1217  * For all those reasons, we implement object iteration as a single marking traversal, reporting
1218  * objects as we mark+traverse through the heap, starting from GC roots. JVMTI IterateThroughHeap
1219  * is allowed to report dead objects, but is not required to do so.
1220  */
1221 void ShenandoahHeap::object_iterate(ObjectClosure* cl) {
1222   assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1223   if (!_aux_bitmap_region_special && !os::commit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size(), false)) {
1224     log_warning(gc)("Could not commit native memory for auxiliary marking bitmap for heap iteration");
1225     return;
1226   }
1227 
1228   // Reset bitmap
1229   _aux_bit_map.clear();
1230 
1231   Stack<oop,mtGC> oop_stack;
1232  
1233   ObjectIterateScanRootClosure oops(&_aux_bit_map, &oop_stack);
1234  
1235   {
1236     // First, we process GC roots according to current GC cycle.
1237     // This populates the work stack with initial objects.
1238     // It is important to relinquish the associated locks before diving
1239     // into heap dumper.
1240     ShenandoahHeapIterationRootScanner rp;
1241     rp.roots_do(&oops);
1242   }
1243 
1244   // Work through the oop stack to traverse heap.
1245   while (! oop_stack.is_empty()) {
1246     oop obj = oop_stack.pop();
1247     assert(obj->is_oop(), "must be a valid oop");
1248     cl->do_object(obj);
1249     obj->oop_iterate(&oops);
1250   }
1251 
1252   assert(oop_stack.is_empty(), "should be empty");
1253 
1254   if (!_aux_bitmap_region_special && !os::uncommit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size())) {
1255     log_warning(gc)("Could not uncommit native memory for auxiliary marking bitmap for heap iteration");
1256   }
1257 }
1258 
1259 void ShenandoahHeap::safe_object_iterate(ObjectClosure* cl) {
1260   assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1261   object_iterate(cl);
1262 }
1263 
1264 void ShenandoahHeap::oop_iterate(ExtendedOopClosure* cl) {
1265   ObjectToOopClosure cl2(cl);
1266   object_iterate(&cl2);
1267 }
1268 
1269 void  ShenandoahHeap::gc_prologue(bool b) {
1270   Unimplemented();
1271 }
1272 
1273 void  ShenandoahHeap::gc_epilogue(bool b) {
1274   Unimplemented();
1275 }
1276 
1277 void ShenandoahHeap::heap_region_iterate(ShenandoahHeapRegionClosure* blk) const {
1278   for (size_t i = 0; i < num_regions(); i++) {
1279     ShenandoahHeapRegion* current = get_region(i);
1280     blk->heap_region_do(current);
1281   }
1282 }
1283 
1284 class ShenandoahParallelHeapRegionTask : public AbstractGangTask {
1285 private:
1286   ShenandoahHeap* const _heap;
1287   ShenandoahHeapRegionClosure* const _blk;
1288 
1289   shenandoah_padding(0);
1290   volatile jint _index;
1291   shenandoah_padding(1);
1292 
1293 public:
1294   ShenandoahParallelHeapRegionTask(ShenandoahHeapRegionClosure* blk) :
1295           AbstractGangTask("Parallel Region Task"),
1296           _heap(ShenandoahHeap::heap()), _blk(blk), _index(0) {}
1297 
1298   void work(uint worker_id) {
1299     jint stride = (jint)ShenandoahParallelRegionStride;
1300 
1301     jint max = (jint)_heap->num_regions();
1302     while (_index < max) {
1303       jint cur = Atomic::add(stride, &_index) - stride;
1304       jint start = cur;
1305       jint end = MIN2(cur + stride, max);
1306       if (start >= max) break;
1307 
1308       for (jint i = cur; i < end; i++) {
1309         ShenandoahHeapRegion* current = _heap->get_region((size_t)i);
1310         _blk->heap_region_do(current);
1311       }
1312     }
1313   }
1314 };
1315 
1316 void ShenandoahHeap::parallel_heap_region_iterate(ShenandoahHeapRegionClosure* blk) const {
1317   assert(blk->is_thread_safe(), "Only thread-safe closures here");
1318   if (num_regions() > ShenandoahParallelRegionStride) {
1319     ShenandoahParallelHeapRegionTask task(blk);
1320     workers()->run_task(&task);
1321   } else {
1322     heap_region_iterate(blk);
1323   }
1324 }
1325 
1326 class ShenandoahInitMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
1327 private:
1328   ShenandoahMarkingContext* const _ctx;
1329 public:
1330   ShenandoahInitMarkUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
1331 
1332   void heap_region_do(ShenandoahHeapRegion* r) {
1333     assert(!r->has_live(),
1334            err_msg("Region " SIZE_FORMAT " should have no live data", r->index()));
1335     if (r->is_active()) {
1336       // Check if region needs updating its TAMS. We have updated it already during concurrent
1337       // reset, so it is very likely we don't need to do another write here.
1338       if (_ctx->top_at_mark_start(r) != r->top()) {
1339         _ctx->capture_top_at_mark_start(r);
1340       }
1341     } else {
1342       assert(_ctx->top_at_mark_start(r) == r->top(),
1343              err_msg("Region " SIZE_FORMAT " should already have correct TAMS", r->index()));
1344     }
1345   }
1346 
1347   bool is_thread_safe() { return true; }
1348 };
1349 
1350 void ShenandoahHeap::op_init_mark() {
1351   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
1352   assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
1353 
1354   assert(marking_context()->is_bitmap_clear(), "need clear marking bitmap");
1355   assert(!marking_context()->is_complete(), "should not be complete");
1356   assert(!has_forwarded_objects(), "No forwarded objects on this path");
1357 
1358   if (ShenandoahVerify) {
1359     verifier()->verify_before_concmark();
1360   }
1361 
1362   {
1363     ShenandoahGCPhase phase(ShenandoahPhaseTimings::accumulate_stats);
1364     accumulate_statistics_tlabs();
1365   }
1366 
1367   if (VerifyBeforeGC) {
1368     Universe::verify();
1369   }
1370 
1371   set_concurrent_mark_in_progress(true);
1372   // We need to reset all TLABs because we'd lose marks on all objects allocated in them.
1373   if (UseTLAB) {
1374     ShenandoahGCPhase phase(ShenandoahPhaseTimings::make_parsable);
1375     make_parsable(true);
1376   }
1377 
1378   {
1379     ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states);
1380     ShenandoahInitMarkUpdateRegionStateClosure cl;
1381     parallel_heap_region_iterate(&cl);
1382   }
1383 
1384   // Make above changes visible to worker threads
1385   OrderAccess::fence();
1386 
1387   concurrent_mark()->mark_roots(ShenandoahPhaseTimings::scan_roots);
1388 
1389   if (UseTLAB) {
1390     ShenandoahGCPhase phase(ShenandoahPhaseTimings::resize_tlabs);
1391     resize_tlabs();
1392   }
1393 
1394   if (ShenandoahPacing) {
1395     pacer()->setup_for_mark();
1396   }
1397 }
1398 
1399 void ShenandoahHeap::op_mark() {
1400   concurrent_mark()->mark_from_roots();
1401 }
1402 
1403 class ShenandoahFinalMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
1404 private:
1405   ShenandoahMarkingContext* const _ctx;
1406   ShenandoahHeapLock* const _lock;
1407 
1408 public:
1409   ShenandoahFinalMarkUpdateRegionStateClosure() :
1410     _ctx(ShenandoahHeap::heap()->complete_marking_context()), _lock(ShenandoahHeap::heap()->lock()) {}
1411 
1412   void heap_region_do(ShenandoahHeapRegion* r) {
1413     if (r->is_active()) {
1414       // All allocations past TAMS are implicitly live, adjust the region data.
1415       // Bitmaps/TAMS are swapped at this point, so we need to poll complete bitmap.
1416       HeapWord *tams = _ctx->top_at_mark_start(r);
1417       HeapWord *top = r->top();
1418       if (top > tams) {
1419         r->increase_live_data_alloc_words(pointer_delta(top, tams));
1420       }
1421 
1422       // We are about to select the collection set, make sure it knows about
1423       // current pinning status. Also, this allows trashing more regions that
1424       // now have their pinning status dropped.
1425       if (r->is_pinned()) {
1426         if (r->pin_count() == 0) {
1427           ShenandoahHeapLocker locker(_lock);
1428           r->make_unpinned();
1429         }
1430       } else {
1431         if (r->pin_count() > 0) {
1432           ShenandoahHeapLocker locker(_lock);
1433           r->make_pinned();
1434         }
1435       }
1436 
1437       // Remember limit for updating refs. It's guaranteed that we get no
1438       // from-space-refs written from here on.
1439       r->set_update_watermark_at_safepoint(r->top());
1440     } else {
1441       assert(!r->has_live(),
1442              err_msg("Region " SIZE_FORMAT " should have no live data", r->index()));
1443       assert(_ctx->top_at_mark_start(r) == r->top(),
1444              err_msg("Region " SIZE_FORMAT " should have correct TAMS", r->index()));
1445     }
1446   }
1447 
1448   bool is_thread_safe() { return true; }
1449 };
1450 
1451 void ShenandoahHeap::op_final_mark() {
1452   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
1453   assert(!has_forwarded_objects(), "No forwarded objects on this path");
1454 
1455   // It is critical that we
1456   // evacuate roots right after finishing marking, so that we don't
1457   // get unmarked objects in the roots.
1458 
1459   if (!cancelled_gc()) {
1460     concurrent_mark()->finish_mark_from_roots(/* full_gc = */ false);
1461 
1462     TASKQUEUE_STATS_ONLY(concurrent_mark()->task_queues()->reset_taskqueue_stats());
1463 
1464     if (ShenandoahVerify) {
1465       verifier()->verify_roots_no_forwarded();
1466     }
1467 
1468     TASKQUEUE_STATS_ONLY(concurrent_mark()->task_queues()->print_taskqueue_stats());
1469 
1470     {
1471       ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_region_states);
1472       ShenandoahFinalMarkUpdateRegionStateClosure cl;
1473       parallel_heap_region_iterate(&cl);
1474 
1475       assert_pinned_region_status();
1476     }
1477 
1478     // Force the threads to reacquire their TLABs outside the collection set.
1479     {
1480       ShenandoahGCPhase phase(ShenandoahPhaseTimings::retire_tlabs);
1481       make_parsable(true);
1482     }
1483 
1484     {
1485       ShenandoahGCPhase phase(ShenandoahPhaseTimings::choose_cset);
1486       ShenandoahHeapLocker locker(lock());
1487       _collection_set->clear();
1488       heuristics()->choose_collection_set(_collection_set);
1489     }
1490 
1491     {
1492       ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_rebuild_freeset);
1493       ShenandoahHeapLocker locker(lock());
1494       _free_set->rebuild();
1495     }
1496 
1497     // If collection set has candidates, start evacuation.
1498     // Otherwise, bypass the rest of the cycle.
1499     if (!collection_set()->is_empty()) {
1500       ShenandoahGCPhase init_evac(ShenandoahPhaseTimings::init_evac);
1501 
1502       if (ShenandoahVerify) {
1503         verifier()->verify_before_evacuation();
1504       }
1505 
1506       set_evacuation_in_progress(true);
1507       // From here on, we need to update references.
1508       set_has_forwarded_objects(true);
1509 
1510       if (!is_degenerated_gc_in_progress()) {
1511         evacuate_and_update_roots();
1512       }
1513  
1514       if (ShenandoahPacing) {
1515         pacer()->setup_for_evac();
1516       }
1517 
1518       if (ShenandoahVerify) {
1519         verifier()->verify_roots_no_forwarded();
1520         verifier()->verify_during_evacuation();
1521       }
1522     } else {
1523       if (ShenandoahVerify) {
1524         verifier()->verify_after_concmark();
1525       }
1526 
1527       if (VerifyAfterGC) {
1528         Universe::verify();
1529       }
1530     }
1531 
1532   } else {
1533     concurrent_mark()->cancel();
1534     complete_marking();
1535 
1536     if (process_references()) {
1537       // Abandon reference processing right away: pre-cleaning must have failed.
1538       ReferenceProcessor *rp = ref_processor();
1539       rp->disable_discovery();
1540       rp->abandon_partial_discovery();
1541       rp->verify_no_references_recorded();
1542     }
1543   }
1544 }
1545 
1546 void ShenandoahHeap::op_conc_evac() {
1547   ShenandoahEvacuationTask task(this, _collection_set, true);
1548   workers()->run_task(&task);
1549 }
1550 
1551 void ShenandoahHeap::op_stw_evac() {
1552   ShenandoahEvacuationTask task(this, _collection_set, false);
1553   workers()->run_task(&task);
1554 }
1555 
1556 void ShenandoahHeap::op_updaterefs() {
1557   update_heap_references(true);
1558 }
1559 
1560 void ShenandoahHeap::op_cleanup_early() {
1561   free_set()->recycle_trash();
1562 }
1563 
1564 void ShenandoahHeap::op_cleanup_complete() {
1565   free_set()->recycle_trash();
1566 }
1567 
1568 class ShenandoahResetUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
1569 private:
1570   ShenandoahMarkingContext* const _ctx;
1571 public:
1572   ShenandoahResetUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
1573 
1574   void heap_region_do(ShenandoahHeapRegion* r) {
1575     if (r->is_active()) {
1576       // Reset live data and set TAMS optimistically. We would recheck these under the pause
1577       // anyway to capture any updates that happened since now.
1578       r->clear_live_data();
1579       _ctx->capture_top_at_mark_start(r);
1580     }
1581   }
1582 
1583   bool is_thread_safe() { return true; }
1584 };
1585 
1586 void ShenandoahHeap::op_reset() {
1587   if (ShenandoahPacing) {
1588     pacer()->setup_for_reset();
1589   }
1590   reset_mark_bitmap();
1591 
1592   ShenandoahResetUpdateRegionStateClosure cl;
1593   parallel_heap_region_iterate(&cl);
1594 }
1595 
1596 void ShenandoahHeap::op_preclean() {
1597   if (ShenandoahPacing) {
1598     pacer()->setup_for_preclean();
1599   }
1600   concurrent_mark()->preclean_weak_refs();
1601 }
1602 
1603 void ShenandoahHeap::op_full(GCCause::Cause cause) {
1604   ShenandoahMetricsSnapshot metrics;
1605   metrics.snap_before();
1606 
1607   full_gc()->do_it(cause);
1608 
1609   metrics.snap_after();
1610 
1611   if (metrics.is_good_progress()) {
1612     _progress_last_gc.set();
1613   } else {
1614     // Nothing to do. Tell the allocation path that we have failed to make
1615     // progress, and it can finally fail.
1616     _progress_last_gc.unset();
1617   }
1618 }
1619 
1620 void ShenandoahHeap::op_degenerated(ShenandoahDegenPoint point) {
1621   // Degenerated GC is STW, but it can also fail. Current mechanics communicates
1622   // GC failure via cancelled_concgc() flag. So, if we detect the failure after
1623   // some phase, we have to upgrade the Degenerate GC to Full GC.
1624 
1625   clear_cancelled_gc();
1626 
1627   ShenandoahMetricsSnapshot metrics;
1628   metrics.snap_before();
1629 
1630   switch (point) {
1631     // The cases below form the Duff's-like device: it describes the actual GC cycle,
1632     // but enters it at different points, depending on which concurrent phase had
1633     // degenerated.
1634 
1635     case _degenerated_outside_cycle:
1636       // We have degenerated from outside the cycle, which means something is bad with
1637       // the heap, most probably heavy humongous fragmentation, or we are very low on free
1638       // space. It makes little sense to wait for Full GC to reclaim as much as it can, when
1639       // we can do the most aggressive degen cycle, which includes processing references and
1640       // class unloading, unless those features are explicitly disabled.
1641       //
1642       // Note that we can only do this for "outside-cycle" degens, otherwise we would risk
1643       // changing the cycle parameters mid-cycle during concurrent -> degenerated handover.
1644       set_process_references(heuristics()->can_process_references());
1645       set_unload_classes(heuristics()->can_unload_classes());
1646 
1647       if (_heap->process_references()) {
1648         ReferenceProcessor* rp = _heap->ref_processor();
1649         rp->set_active_mt_degree(_heap->workers()->active_workers());
1650 
1651         // enable ("weak") refs discovery
1652         rp->enable_discovery(true /*verify_no_refs*/, true);
1653         rp->setup_policy(_heap->collector_policy()->should_clear_all_soft_refs());
1654       }
1655 
1656       op_reset();
1657 
1658       op_init_mark();
1659       if (cancelled_gc()) {
1660         op_degenerated_fail();
1661         return;
1662       }
1663 
1664     case _degenerated_mark:
1665       if (is_concurrent_mark_in_progress()) {
1666         op_final_mark();
1667       }
1668       if (cancelled_gc()) {
1669         op_degenerated_fail();
1670         return;
1671       }
1672 
1673       op_cleanup_early();
1674 
1675     case _degenerated_evac:
1676       // If heuristics thinks we should do the cycle, this flag would be set,
1677       // and we can do evacuation. Otherwise, it would be the shortcut cycle.
1678       if (is_evacuation_in_progress()) {
1679 
1680         // Degeneration under oom-evac protocol might have left some objects in
1681         // collection set un-evacuated. Restart evacuation from the beginning to
1682         // capture all objects. For all the objects that are already evacuated,
1683         // it would be a simple check, which is supposed to be fast. This is also
1684         // safe to do even without degeneration, as CSet iterator is at beginning
1685         // in preparation for evacuation anyway.
1686         //
1687         // Before doing that, we need to make sure we never had any cset-pinned
1688         // regions. This may happen if allocation failure happened when evacuating
1689         // the about-to-be-pinned object, oom-evac protocol left the object in
1690         // the collection set, and then the pin reached the cset region. If we continue
1691         // the cycle here, we would trash the cset and alive objects in it. To avoid
1692         // it, we fail degeneration right away and slide into Full GC to recover.
1693 
1694         {
1695           sync_pinned_region_status();
1696           collection_set()->clear_current_index();
1697 
1698           ShenandoahHeapRegion* r;
1699           while ((r = collection_set()->next()) != NULL) {
1700             if (r->is_pinned()) {
1701               cancel_gc(GCCause::_shenandoah_upgrade_to_full_gc);
1702               op_degenerated_fail();
1703               return;
1704             }
1705           }
1706 
1707           collection_set()->clear_current_index();
1708         }
1709 
1710         op_stw_evac();
1711         if (cancelled_gc()) {
1712           op_degenerated_fail();
1713           return;
1714         }
1715       }
1716 
1717       // If heuristics thinks we should do the cycle, this flag would be set,
1718       // and we need to do update-refs. Otherwise, it would be the shortcut cycle.
1719       if (has_forwarded_objects()) {
1720         op_init_updaterefs();
1721         if (cancelled_gc()) {
1722           op_degenerated_fail();
1723           return;
1724         }
1725       }
1726 
1727     case _degenerated_updaterefs:
1728       if (has_forwarded_objects()) {
1729         op_final_updaterefs();
1730         if (cancelled_gc()) {
1731           op_degenerated_fail();
1732           return;
1733         }
1734       }
1735 
1736       op_cleanup_complete();
1737       break;
1738 
1739     default:
1740       ShouldNotReachHere();
1741   }
1742 
1743   if (ShenandoahVerify) {
1744     verifier()->verify_after_degenerated();
1745   }
1746 
1747   if (VerifyAfterGC) {
1748     Universe::verify();
1749   }
1750 
1751   metrics.snap_after();
1752 
1753   // Check for futility and fail. There is no reason to do several back-to-back Degenerated cycles,
1754   // because that probably means the heap is overloaded and/or fragmented.
1755   if (!metrics.is_good_progress()) {
1756     _progress_last_gc.unset();
1757     cancel_gc(GCCause::_shenandoah_upgrade_to_full_gc);
1758     op_degenerated_futile();
1759   } else {
1760     _progress_last_gc.set();
1761   }
1762 }
1763 
1764 void ShenandoahHeap::op_degenerated_fail() {
1765   log_info(gc)("Cannot finish degeneration, upgrading to Full GC");
1766   shenandoah_policy()->record_degenerated_upgrade_to_full();
1767   op_full(GCCause::_shenandoah_upgrade_to_full_gc);
1768 }
1769 
1770 void ShenandoahHeap::op_degenerated_futile() {
1771   shenandoah_policy()->record_degenerated_upgrade_to_full();
1772   op_full(GCCause::_shenandoah_upgrade_to_full_gc);
1773 }
1774 
1775 void ShenandoahHeap::complete_marking() {
1776   if (is_concurrent_mark_in_progress()) {
1777     set_concurrent_mark_in_progress(false);
1778   }
1779 
1780   if (!cancelled_gc()) {
1781     // If we needed to update refs, and concurrent marking has been cancelled,
1782     // we need to finish updating references.
1783     set_has_forwarded_objects(false);
1784     mark_complete_marking_context();
1785   }
1786 }
1787 
1788 void ShenandoahHeap::force_satb_flush_all_threads() {
1789   if (!is_concurrent_mark_in_progress()) {
1790     // No need to flush SATBs
1791     return;
1792   }
1793 
1794   // Do not block if Threads lock is busy. This avoids the potential deadlock
1795   // when this code is called from the periodic task, and something else is
1796   // expecting the periodic task to complete without blocking. On the off-chance
1797   // Threads lock is busy momentarily, try to acquire several times.
1798   for (int t = 0; t < 10; t++) {
1799     if (Threads_lock->try_lock()) {
1800       JavaThread::set_force_satb_flush_all_threads(true);
1801       Threads_lock->unlock();
1802 
1803       // The threads are not "acquiring" their thread-local data, but it does not
1804       // hurt to "release" the updates here anyway.
1805       OrderAccess::fence();
1806       break;
1807     }
1808     os::naked_short_sleep(1);
1809   }
1810 }
1811 
1812 void ShenandoahHeap::set_gc_state_mask(uint mask, bool value) {
1813   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should really be Shenandoah safepoint");
1814   _gc_state.set_cond(mask, value);
1815   JavaThread::set_gc_state_all_threads(_gc_state.raw_value());
1816 }
1817 
1818 void ShenandoahHeap::set_concurrent_mark_in_progress(bool in_progress) {
1819   if (has_forwarded_objects()) {
1820     set_gc_state_mask(MARKING | UPDATEREFS, in_progress);
1821   } else {
1822     set_gc_state_mask(MARKING, in_progress);
1823   }
1824   JavaThread::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
1825 }
1826 
1827 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
1828   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only call this at safepoint");
1829   set_gc_state_mask(EVACUATION, in_progress);
1830 }
1831 
1832 void ShenandoahHeap::ref_processing_init() {
1833   MemRegion mr = reserved_region();
1834 
1835   assert(_max_workers > 0, "Sanity");
1836 
1837   bool mt_processing = ParallelRefProcEnabled && (ParallelGCThreads > 1);
1838   bool mt_discovery = _max_workers > 1;
1839 
1840   _ref_processor =
1841     new ReferenceProcessor(mr,    // span
1842                            mt_processing,           // MT processing
1843                            _max_workers,            // Degree of MT processing
1844                            mt_discovery,            // MT discovery
1845                            _max_workers,            // Degree of MT discovery
1846                            false,                   // Reference discovery is not atomic
1847                            NULL);                   // No closure, should be installed before use
1848 
1849   log_info(gc, init)("Reference processing: %s discovery, %s processing",
1850           mt_discovery ? "parallel" : "serial",
1851           mt_processing ? "parallel" : "serial");
1852 
1853   shenandoah_assert_rp_isalive_not_installed();
1854 }
1855 
1856 void ShenandoahHeap::acquire_pending_refs_lock() {
1857   _control_thread->slt()->manipulatePLL(SurrogateLockerThread::acquirePLL);
1858 }
1859 
1860 void ShenandoahHeap::release_pending_refs_lock() {
1861   _control_thread->slt()->manipulatePLL(SurrogateLockerThread::releaseAndNotifyPLL);
1862 }
1863 
1864 GCTracer* ShenandoahHeap::tracer() {
1865   return shenandoah_policy()->tracer();
1866 }
1867 
1868 size_t ShenandoahHeap::tlab_used(Thread* thread) const {
1869   return _free_set->used();
1870 }
1871 
1872 void ShenandoahHeap::cancel_gc(GCCause::Cause cause) {
1873   if (try_cancel_gc()) {
1874     FormatBuffer<> msg("Cancelling GC: %s", GCCause::to_string(cause));
1875     log_info(gc)("%s", msg.buffer());
1876     Events::log(Thread::current(), "%s", msg.buffer());
1877   }
1878 }
1879 
1880 uint ShenandoahHeap::max_workers() {
1881   return _max_workers;
1882 }
1883 
1884 void ShenandoahHeap::stop() {
1885   // The shutdown sequence should be able to terminate when GC is running.
1886 
1887   // Step 0. Notify policy to disable event recording.
1888   _shenandoah_policy->record_shutdown();
1889 
1890   // Step 1. Notify control thread that we are in shutdown.
1891   // Note that we cannot do that with stop(), because stop() is blocking and waits for the actual shutdown.
1892   // Doing stop() here would wait for the normal GC cycle to complete, never falling through to cancel below.
1893   _control_thread->prepare_for_graceful_shutdown();
1894 
1895   // Step 2. Notify GC workers that we are cancelling GC.
1896   cancel_gc(GCCause::_shenandoah_stop_vm);
1897 
1898   // Step 3. Wait until GC worker exits normally.
1899   _control_thread->stop();
1900 
1901   // Step 4. Stop String Dedup thread if it is active
1902   if (ShenandoahStringDedup::is_enabled()) {
1903     ShenandoahStringDedup::stop();
1904   }
1905 }
1906 
1907 void ShenandoahHeap::unload_classes_and_cleanup_tables(bool full_gc) {
1908   assert(heuristics()->can_unload_classes(), "Class unloading should be enabled");
1909 
1910   ShenandoahGCPhase root_phase(full_gc ?
1911                                ShenandoahPhaseTimings::full_gc_purge :
1912                                ShenandoahPhaseTimings::purge);
1913 
1914   ShenandoahIsAliveSelector alive;
1915   BoolObjectClosure* is_alive = alive.is_alive_closure();
1916 
1917   // Cleaning of klasses depends on correct information from MetadataMarkOnStack. The CodeCache::mark_on_stack
1918   // part is too slow to be done serially, so it is handled during the ShenandoahParallelCleaning phase.
1919   // Defer the cleaning until we have complete on_stack data.
1920   MetadataOnStackMark md_on_stack(false /* Don't visit the code cache at this point */);
1921 
1922   bool purged_class;
1923 
1924   // Unload classes and purge SystemDictionary.
1925   {
1926     ShenandoahGCPhase phase(full_gc ?
1927                             ShenandoahPhaseTimings::full_gc_purge_class_unload :
1928                             ShenandoahPhaseTimings::purge_class_unload);
1929     purged_class = SystemDictionary::do_unloading(is_alive,
1930                                                   false /* Defer klass cleaning */);
1931   }
1932   {
1933     ShenandoahGCPhase phase(full_gc ?
1934                             ShenandoahPhaseTimings::full_gc_purge_par :
1935                             ShenandoahPhaseTimings::purge_par);
1936     uint active = _workers->active_workers();
1937     ShenandoahParallelCleaningTask unlink_task(is_alive, true, true, active, purged_class);
1938     _workers->run_task(&unlink_task);
1939   }
1940 
1941   {
1942     ShenandoahGCPhase phase(full_gc ?
1943                             ShenandoahPhaseTimings::full_gc_purge_metadata :
1944                             ShenandoahPhaseTimings::purge_metadata);
1945     ClassLoaderDataGraph::free_deallocate_lists();
1946   }
1947 
1948   if (ShenandoahStringDedup::is_enabled()) {
1949     ShenandoahGCPhase phase(full_gc ?
1950                             ShenandoahPhaseTimings::full_gc_purge_string_dedup :
1951                             ShenandoahPhaseTimings::purge_string_dedup);
1952     ShenandoahStringDedup::parallel_cleanup();
1953   }
1954 
1955   {
1956     ShenandoahGCPhase phase(full_gc ?
1957                             ShenandoahPhaseTimings::full_gc_purge_cldg :
1958                             ShenandoahPhaseTimings::purge_cldg);
1959     ClassLoaderDataGraph::purge();
1960   }
1961 }
1962 
1963 void ShenandoahHeap::set_has_forwarded_objects(bool cond) {
1964   set_gc_state_mask(HAS_FORWARDED, cond);
1965 }
1966 
1967 void ShenandoahHeap::set_process_references(bool pr) {
1968   _process_references.set_cond(pr);
1969 }
1970 
1971 void ShenandoahHeap::set_unload_classes(bool uc) {
1972   _unload_classes.set_cond(uc);
1973 }
1974 
1975 bool ShenandoahHeap::process_references() const {
1976   return _process_references.is_set();
1977 }
1978 
1979 bool ShenandoahHeap::unload_classes() const {
1980   return _unload_classes.is_set();
1981 }
1982 
1983 address ShenandoahHeap::in_cset_fast_test_addr() {
1984   ShenandoahHeap* heap = ShenandoahHeap::heap();
1985   assert(heap->collection_set() != NULL, "Sanity");
1986   return (address) heap->collection_set()->biased_map_address();
1987 }
1988 
1989 address ShenandoahHeap::cancelled_gc_addr() {
1990   return (address) ShenandoahHeap::heap()->_cancelled_gc.addr_of();
1991 }
1992 
1993 address ShenandoahHeap::gc_state_addr() {
1994   return (address) ShenandoahHeap::heap()->_gc_state.addr_of();
1995 }
1996 
1997 size_t ShenandoahHeap::conservative_max_heap_alignment() {
1998   size_t align = ShenandoahMaxRegionSize;
1999   if (UseLargePages) {
2000     align = MAX2(align, os::large_page_size());
2001   }
2002   return align;
2003 }
2004 
2005 size_t ShenandoahHeap::bytes_allocated_since_gc_start() {
2006   return OrderAccess::load_acquire(&_bytes_allocated_since_gc_start);
2007 }
2008 
2009 void ShenandoahHeap::reset_bytes_allocated_since_gc_start() {
2010   OrderAccess::release_store_fence(&_bytes_allocated_since_gc_start, (size_t)0);
2011 }
2012 
2013 void ShenandoahHeap::set_degenerated_gc_in_progress(bool in_progress) {
2014   _degenerated_gc_in_progress.set_cond(in_progress);
2015 }
2016 
2017 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) {
2018   _full_gc_in_progress.set_cond(in_progress);
2019 }
2020 
2021 void ShenandoahHeap::set_full_gc_move_in_progress(bool in_progress) {
2022   assert (is_full_gc_in_progress(), "should be");
2023   _full_gc_move_in_progress.set_cond(in_progress);
2024 }
2025 
2026 void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) {
2027   set_gc_state_mask(UPDATEREFS, in_progress);
2028 }
2029 
2030 void ShenandoahHeap::register_nmethod(nmethod* nm) {
2031   ShenandoahCodeRoots::add_nmethod(nm);
2032 }
2033 
2034 void ShenandoahHeap::unregister_nmethod(nmethod* nm) {
2035   ShenandoahCodeRoots::remove_nmethod(nm);
2036 }
2037 
2038 oop ShenandoahHeap::pin_object(JavaThread* thr, oop o) {
2039   heap_region_containing(o)->record_pin();
2040   return o;
2041 }
2042 
2043 void ShenandoahHeap::unpin_object(JavaThread* thr, oop o) {
2044   heap_region_containing(o)->record_unpin();
2045 }
2046 
2047 void ShenandoahHeap::sync_pinned_region_status() {
2048   ShenandoahHeapLocker locker(lock());
2049 
2050   for (size_t i = 0; i < num_regions(); i++) {
2051     ShenandoahHeapRegion *r = get_region(i);
2052     if (r->is_active()) {
2053       if (r->is_pinned()) {
2054         if (r->pin_count() == 0) {
2055           r->make_unpinned();
2056         }
2057       } else {
2058         if (r->pin_count() > 0) {
2059           r->make_pinned();
2060         }
2061       }
2062     }
2063   }
2064 
2065   assert_pinned_region_status();
2066 }
2067 
2068 #ifdef ASSERT
2069 void ShenandoahHeap::assert_pinned_region_status() {
2070   for (size_t i = 0; i < num_regions(); i++) {
2071     ShenandoahHeapRegion* r = get_region(i);
2072     assert((r->is_pinned() && r->pin_count() > 0) || (!r->is_pinned() && r->pin_count() == 0),
2073            err_msg("Region " SIZE_FORMAT " pinning status is inconsistent", i));
2074   }
2075 }
2076 #endif
2077 
2078 GCTimer* ShenandoahHeap::gc_timer() const {
2079   return _gc_timer;
2080 }
2081 
2082 #ifdef ASSERT
2083 void ShenandoahHeap::assert_gc_workers(uint nworkers) {
2084   assert(nworkers > 0 && nworkers <= max_workers(), "Sanity");
2085 
2086   if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
2087     if (UseDynamicNumberOfGCThreads ||
2088         (FLAG_IS_DEFAULT(ParallelGCThreads) && ForceDynamicNumberOfGCThreads)) {
2089       assert(nworkers <= ParallelGCThreads, "Cannot use more than it has");
2090     } else {
2091       // Use ParallelGCThreads inside safepoints
2092       assert(nworkers == ParallelGCThreads, "Use ParalleGCThreads within safepoints");
2093     }
2094   } else {
2095     if (UseDynamicNumberOfGCThreads ||
2096         (FLAG_IS_DEFAULT(ConcGCThreads) && ForceDynamicNumberOfGCThreads)) {
2097       assert(nworkers <= ConcGCThreads, "Cannot use more than it has");
2098     } else {
2099       // Use ConcGCThreads outside safepoints
2100       assert(nworkers == ConcGCThreads, "Use ConcGCThreads outside safepoints");
2101     }
2102   }
2103 }
2104 #endif
2105 
2106 ShenandoahVerifier* ShenandoahHeap::verifier() {
2107   guarantee(ShenandoahVerify, "Should be enabled");
2108   assert (_verifier != NULL, "sanity");
2109   return _verifier;
2110 }
2111 
2112 ShenandoahUpdateHeapRefsClosure::ShenandoahUpdateHeapRefsClosure() :
2113   _heap(ShenandoahHeap::heap()) {}
2114 
2115 class ShenandoahUpdateHeapRefsTask : public AbstractGangTask {
2116 private:
2117   ShenandoahHeap* _heap;
2118   ShenandoahRegionIterator* _regions;
2119   bool _concurrent;
2120 
2121 public:
2122   ShenandoahUpdateHeapRefsTask(ShenandoahRegionIterator* regions, bool concurrent) :
2123     AbstractGangTask("Concurrent Update References Task"),
2124     _heap(ShenandoahHeap::heap()),
2125     _regions(regions),
2126     _concurrent(concurrent) {
2127   }
2128 
2129   void work(uint worker_id) {
2130     ShenandoahConcurrentWorkerSession worker_session(worker_id);
2131     ShenandoahUpdateHeapRefsClosure cl;
2132     ShenandoahHeapRegion* r = _regions->next();
2133     ShenandoahMarkingContext* const ctx = _heap->complete_marking_context();
2134     while (r != NULL) {
2135       HeapWord* update_watermark = r->get_update_watermark();
2136       assert (update_watermark >= r->bottom(), "sanity");
2137       if (r->is_active() && !r->is_cset()) {
2138         _heap->marked_object_oop_iterate(r, &cl, update_watermark);
2139       }
2140       if (ShenandoahPacing) {
2141         _heap->pacer()->report_updaterefs(pointer_delta(update_watermark, r->bottom()));
2142       }
2143       if (_heap->cancelled_gc()) {
2144         return;
2145       }
2146       r = _regions->next();
2147     }
2148   }
2149 };
2150 
2151 void ShenandoahHeap::update_heap_references(bool concurrent) {
2152   ShenandoahUpdateHeapRefsTask task(&_update_refs_iterator, concurrent);
2153   workers()->run_task(&task);
2154 }
2155 
2156 void ShenandoahHeap::op_init_updaterefs() {
2157   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
2158 
2159   set_evacuation_in_progress(false);
2160 
2161   if (ShenandoahVerify) {
2162     if (!is_degenerated_gc_in_progress()) {
2163       verifier()->verify_roots_no_forwarded_except(ShenandoahRootVerifier::ThreadRoots);
2164     }
2165     verifier()->verify_before_updaterefs();
2166   }
2167 
2168   set_update_refs_in_progress(true);
2169 
2170   {
2171     ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_refs_prepare);
2172 
2173     make_parsable(true);
2174 
2175     // Reset iterator.
2176     _update_refs_iterator.reset();
2177   }
2178 
2179   if (ShenandoahPacing) {
2180     pacer()->setup_for_updaterefs();
2181   }
2182 }
2183 
2184 class ShenandoahFinalUpdateRefsUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
2185 private:
2186   ShenandoahHeapLock* const _lock;
2187 
2188 public:
2189   ShenandoahFinalUpdateRefsUpdateRegionStateClosure() : _lock(ShenandoahHeap::heap()->lock()) {}
2190 
2191   void heap_region_do(ShenandoahHeapRegion* r) {
2192     // Drop unnecessary "pinned" state from regions that does not have CP marks
2193     // anymore, as this would allow trashing them.
2194 
2195     if (r->is_active()) {
2196       if (r->is_pinned()) {
2197         if (r->pin_count() == 0) {
2198           ShenandoahHeapLocker locker(_lock);
2199           r->make_unpinned();
2200         }
2201       } else {
2202         if (r->pin_count() > 0) {
2203           ShenandoahHeapLocker locker(_lock);
2204           r->make_pinned();
2205         }
2206       }
2207     }
2208   }
2209 
2210   bool is_thread_safe() { return true; }
2211 };
2212 
2213 void ShenandoahHeap::op_final_updaterefs() {
2214   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
2215 
2216   // Check if there is left-over work, and finish it
2217   if (_update_refs_iterator.has_next()) {
2218     ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs_finish_work);
2219 
2220     // Finish updating references where we left off.
2221     clear_cancelled_gc();
2222     update_heap_references(false);
2223   }
2224 
2225   // Clear cancelled GC, if set. On cancellation path, the block before would handle
2226   // everything. On degenerated paths, cancelled gc would not be set anyway.
2227   if (cancelled_gc()) {
2228     clear_cancelled_gc();
2229   }
2230   assert(!cancelled_gc(), "Should have been done right before");
2231 
2232   if (ShenandoahVerify && !is_degenerated_gc_in_progress()) {
2233     verifier()->verify_roots_no_forwarded_except(ShenandoahRootVerifier::ThreadRoots);
2234   }
2235 
2236   if (is_degenerated_gc_in_progress()) {
2237     concurrent_mark()->update_roots(ShenandoahPhaseTimings::degen_gc_update_roots);
2238   } else {
2239     concurrent_mark()->update_thread_roots(ShenandoahPhaseTimings::final_update_refs_roots);
2240   }
2241 
2242   // Has to be done before cset is clear
2243   if (ShenandoahVerify) {
2244     verifier()->verify_roots_in_to_space();
2245   }
2246 
2247   {
2248     ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs_trash_cset);
2249     trash_cset_regions();
2250   }
2251 
2252   set_has_forwarded_objects(false);
2253   set_update_refs_in_progress(false);
2254 
2255   if (ShenandoahVerify) {
2256     verifier()->verify_after_updaterefs();
2257   }
2258 
2259   if (VerifyAfterGC) {
2260     Universe::verify();
2261   }
2262 
2263   {
2264     ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs_update_region_states);
2265     ShenandoahFinalUpdateRefsUpdateRegionStateClosure cl;
2266     parallel_heap_region_iterate(&cl);
2267 
2268     assert_pinned_region_status();
2269   }
2270 
2271   {
2272     ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs_rebuild_freeset);
2273     ShenandoahHeapLocker locker(lock());
2274     _free_set->rebuild();
2275   }
2276 }
2277 
2278 void ShenandoahHeap::print_extended_on(outputStream *st) const {
2279   print_on(st);
2280   print_heap_regions_on(st);
2281 }
2282 
2283 bool ShenandoahHeap::is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self) {
2284   size_t slice = r->index() / _bitmap_regions_per_slice;
2285 
2286   size_t regions_from = _bitmap_regions_per_slice * slice;
2287   size_t regions_to   = MIN2(num_regions(), _bitmap_regions_per_slice * (slice + 1));
2288   for (size_t g = regions_from; g < regions_to; g++) {
2289     assert (g / _bitmap_regions_per_slice == slice, "same slice");
2290     if (skip_self && g == r->index()) continue;
2291     if (get_region(g)->is_committed()) {
2292       return true;
2293     }
2294   }
2295   return false;
2296 }
2297 
2298 bool ShenandoahHeap::commit_bitmap_slice(ShenandoahHeapRegion* r) {
2299   shenandoah_assert_heaplocked();
2300 
2301   // Bitmaps in special regions do not need commits
2302   if (_bitmap_region_special) {
2303     return true;
2304   }
2305 
2306   if (is_bitmap_slice_committed(r, true)) {
2307     // Some other region from the group is already committed, meaning the bitmap
2308     // slice is already committed, we exit right away.
2309     return true;
2310   }
2311 
2312   // Commit the bitmap slice:
2313   size_t slice = r->index() / _bitmap_regions_per_slice;
2314   size_t off = _bitmap_bytes_per_slice * slice;
2315   size_t len = _bitmap_bytes_per_slice;
2316   char* start = (char*) _bitmap_region.start() + off;
2317 
2318   if (!os::commit_memory(start, len, false)) {
2319     return false;
2320   }
2321 
2322   if (AlwaysPreTouch) {
2323     os::pretouch_memory(start, start + len);
2324   }
2325 
2326   return true;
2327 }
2328 
2329 bool ShenandoahHeap::uncommit_bitmap_slice(ShenandoahHeapRegion *r) {
2330   shenandoah_assert_heaplocked();
2331 
2332   // Bitmaps in special regions do not need uncommits
2333   if (_bitmap_region_special) {
2334     return true;
2335   }
2336 
2337   if (is_bitmap_slice_committed(r, true)) {
2338     // Some other region from the group is still committed, meaning the bitmap
2339     // slice is should stay committed, exit right away.
2340     return true;
2341   }
2342 
2343   // Uncommit the bitmap slice:
2344   size_t slice = r->index() / _bitmap_regions_per_slice;
2345   size_t off = _bitmap_bytes_per_slice * slice;
2346   size_t len = _bitmap_bytes_per_slice;
2347   if (!os::uncommit_memory((char*)_bitmap_region.start() + off, len)) {
2348     return false;
2349   }
2350   return true;
2351 }
2352 
2353 void ShenandoahHeap::vmop_entry_init_mark() {
2354   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2355   ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_mark_gross);
2356 
2357   try_inject_alloc_failure();
2358   VM_ShenandoahInitMark op;
2359   VMThread::execute(&op); // jump to entry_init_mark() under safepoint
2360 }
2361 
2362 void ShenandoahHeap::vmop_entry_final_mark() {
2363   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2364   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_mark_gross);
2365 
2366   try_inject_alloc_failure();
2367   VM_ShenandoahFinalMarkStartEvac op;
2368   VMThread::execute(&op); // jump to entry_final_mark under safepoint
2369 }
2370 
2371 void ShenandoahHeap::vmop_entry_init_updaterefs() {
2372   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2373   ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_refs_gross);
2374 
2375   try_inject_alloc_failure();
2376   VM_ShenandoahInitUpdateRefs op;
2377   VMThread::execute(&op);
2378 }
2379 
2380 void ShenandoahHeap::vmop_entry_final_updaterefs() {
2381   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2382   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs_gross);
2383 
2384   try_inject_alloc_failure();
2385   VM_ShenandoahFinalUpdateRefs op;
2386   VMThread::execute(&op);
2387 }
2388 
2389 void ShenandoahHeap::vmop_entry_full(GCCause::Cause cause) {
2390   TraceCollectorStats tcs(monitoring_support()->full_stw_collection_counters());
2391   ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_gross);
2392 
2393   try_inject_alloc_failure();
2394   VM_ShenandoahFullGC op(cause);
2395   VMThread::execute(&op);
2396 }
2397 
2398 void ShenandoahHeap::vmop_degenerated(ShenandoahDegenPoint point) {
2399   TraceCollectorStats tcs(monitoring_support()->full_stw_collection_counters());
2400   ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_gross);
2401 
2402   VM_ShenandoahDegeneratedGC degenerated_gc((int)point);
2403   VMThread::execute(&degenerated_gc);
2404 }
2405 
2406 void ShenandoahHeap::entry_init_mark() {
2407   ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_mark);
2408 
2409   const char* msg = init_mark_event_message();
2410   GCTraceTime time(msg, PrintGC, _gc_timer, tracer()->gc_id());
2411   EventMark em("%s", msg);
2412 
2413   ShenandoahWorkerScope scope(workers(),
2414                               ShenandoahWorkerPolicy::calc_workers_for_init_marking(),
2415                               "init marking");
2416 
2417   op_init_mark();
2418 }
2419 
2420 void ShenandoahHeap::entry_final_mark() {
2421   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_mark);
2422 
2423   const char* msg = final_mark_event_message();
2424   GCTraceTime time(msg, PrintGC, _gc_timer, tracer()->gc_id());
2425   EventMark em("%s", msg);
2426 
2427   ShenandoahWorkerScope scope(workers(),
2428                               ShenandoahWorkerPolicy::calc_workers_for_final_marking(),
2429                               "final marking");
2430 
2431   op_final_mark();
2432 }
2433 
2434 void ShenandoahHeap::entry_init_updaterefs() {
2435   ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_refs);
2436 
2437   static const char* msg = "Pause Init Update Refs";
2438   GCTraceTime time(msg, PrintGC, _gc_timer, tracer()->gc_id());
2439   EventMark em("%s", msg);
2440 
2441   // No workers used in this phase, no setup required
2442 
2443   op_init_updaterefs();
2444 }
2445 
2446 void ShenandoahHeap::entry_final_updaterefs() {
2447   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs);
2448 
2449   static const char* msg = "Pause Final Update Refs";
2450   GCTraceTime time(msg, PrintGC, _gc_timer, tracer()->gc_id());
2451   EventMark em("%s", msg);
2452 
2453   ShenandoahWorkerScope scope(workers(),
2454                               ShenandoahWorkerPolicy::calc_workers_for_final_update_ref(),
2455                               "final reference update");
2456 
2457   op_final_updaterefs();
2458 }
2459 
2460 void ShenandoahHeap::entry_full(GCCause::Cause cause) {
2461   ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc);
2462 
2463   static const char* msg = "Pause Full";
2464   GCTraceTime time(msg, PrintGC, _gc_timer, tracer()->gc_id(), true);
2465   EventMark em("%s", msg);
2466 
2467   ShenandoahWorkerScope scope(workers(),
2468                               ShenandoahWorkerPolicy::calc_workers_for_fullgc(),
2469                               "full gc");
2470 
2471   op_full(cause);
2472 }
2473 
2474 void ShenandoahHeap::entry_degenerated(int point) {
2475   ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc);
2476 
2477   ShenandoahDegenPoint dpoint = (ShenandoahDegenPoint)point;
2478   const char* msg = degen_event_message(dpoint);
2479   GCTraceTime time(msg, PrintGC, _gc_timer, tracer()->gc_id(), true);
2480   EventMark em("%s", msg);
2481 
2482   ShenandoahWorkerScope scope(workers(),
2483                               ShenandoahWorkerPolicy::calc_workers_for_stw_degenerated(),
2484                               "stw degenerated gc");
2485 
2486   set_degenerated_gc_in_progress(true);
2487   op_degenerated(dpoint);
2488   set_degenerated_gc_in_progress(false);
2489 }
2490 
2491 void ShenandoahHeap::entry_mark() {
2492   TraceCollectorStats tcs(monitoring_support()->concurrent_collection_counters());
2493 
2494   const char* msg = conc_mark_event_message();
2495   GCTraceTime time(msg, PrintGC, NULL, tracer()->gc_id());
2496   EventMark em("%s", msg);
2497 
2498   ShenandoahWorkerScope scope(workers(),
2499                               ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
2500                               "concurrent marking");
2501 
2502   try_inject_alloc_failure();
2503   op_mark();
2504 }
2505 
2506 void ShenandoahHeap::entry_evac() {
2507   ShenandoahGCPhase conc_evac_phase(ShenandoahPhaseTimings::conc_evac);
2508   TraceCollectorStats tcs(monitoring_support()->concurrent_collection_counters());
2509 
2510   static const char *msg = "Concurrent evacuation";
2511   GCTraceTime time(msg, PrintGC, NULL, tracer()->gc_id());
2512   EventMark em("%s", msg);
2513 
2514   ShenandoahWorkerScope scope(workers(),
2515                               ShenandoahWorkerPolicy::calc_workers_for_conc_evac(),
2516                               "concurrent evacuation");
2517 
2518   try_inject_alloc_failure();
2519   op_conc_evac();
2520 }
2521 
2522 void ShenandoahHeap::entry_updaterefs() {
2523   ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_update_refs);
2524 
2525   static const char* msg = "Concurrent update references";
2526   GCTraceTime time(msg, PrintGC, NULL, tracer()->gc_id());
2527   EventMark em("%s", msg);
2528 
2529   ShenandoahWorkerScope scope(workers(),
2530                               ShenandoahWorkerPolicy::calc_workers_for_conc_update_ref(),
2531                               "concurrent reference update");
2532 
2533   try_inject_alloc_failure();
2534   op_updaterefs();
2535 }
2536 
2537 void ShenandoahHeap::entry_cleanup_early() {
2538   ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_cleanup_early);
2539 
2540   static const char* msg = "Concurrent cleanup";
2541   GCTraceTime time(msg, PrintGC, NULL, tracer()->gc_id(), true);
2542   EventMark em("%s", msg);
2543 
2544   // This phase does not use workers, no need for setup
2545 
2546   try_inject_alloc_failure();
2547   op_cleanup_early();
2548 }
2549 
2550 void ShenandoahHeap::entry_cleanup_complete() {
2551   ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_cleanup_complete);
2552 
2553   static const char* msg = "Concurrent cleanup";
2554   GCTraceTime time(msg, PrintGC, NULL, tracer()->gc_id(), true);
2555   EventMark em("%s", msg);
2556 
2557   // This phase does not use workers, no need for setup
2558 
2559   try_inject_alloc_failure();
2560   op_cleanup_complete();
2561 }
2562 
2563 void ShenandoahHeap::entry_reset() {
2564   ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_reset);
2565 
2566   static const char* msg = "Concurrent reset";
2567   GCTraceTime time(msg, PrintGC, NULL, tracer()->gc_id());
2568   EventMark em("%s", msg);
2569 
2570   ShenandoahWorkerScope scope(workers(),
2571                               ShenandoahWorkerPolicy::calc_workers_for_conc_reset(),
2572                               "concurrent reset");
2573 
2574   try_inject_alloc_failure();
2575   op_reset();
2576 }
2577 
2578 void ShenandoahHeap::entry_preclean() {
2579   if (ShenandoahPreclean && process_references()) {
2580     ShenandoahGCPhase conc_preclean(ShenandoahPhaseTimings::conc_preclean);
2581 
2582     static const char* msg = "Concurrent precleaning";
2583     GCTraceTime time(msg, PrintGC, NULL, tracer()->gc_id());
2584     EventMark em("%s", msg);
2585 
2586     ShenandoahWorkerScope scope(workers(),
2587                                 ShenandoahWorkerPolicy::calc_workers_for_conc_preclean(),
2588                                 "concurrent preclean",
2589                                 /* check_workers = */ false);
2590 
2591     try_inject_alloc_failure();
2592     op_preclean();
2593   }
2594 }
2595 
2596 void ShenandoahHeap::entry_uncommit(double shrink_before, size_t shrink_until) {
2597   static const char *msg = "Concurrent uncommit";
2598   GCTraceTime time(msg, PrintGC, NULL, tracer()->gc_id(), true);
2599   EventMark em("%s", msg);
2600 
2601   ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_uncommit);
2602 
2603   op_uncommit(shrink_before, shrink_until);
2604 }
2605 
2606 void ShenandoahHeap::try_inject_alloc_failure() {
2607   if (ShenandoahAllocFailureALot && !cancelled_gc() && ((os::random() % 1000) > 950)) {
2608     _inject_alloc_failure.set();
2609     os::naked_short_sleep(1);
2610     if (cancelled_gc()) {
2611       log_info(gc)("Allocation failure was successfully injected");
2612     }
2613   }
2614 }
2615 
2616 bool ShenandoahHeap::should_inject_alloc_failure() {
2617   return _inject_alloc_failure.is_set() && _inject_alloc_failure.try_unset();
2618 }
2619 
2620 void ShenandoahHeap::enter_evacuation() {
2621   _oom_evac_handler.enter_evacuation();
2622 }
2623 
2624 void ShenandoahHeap::leave_evacuation() {
2625   _oom_evac_handler.leave_evacuation();
2626 }
2627 
2628 ShenandoahRegionIterator::ShenandoahRegionIterator() :
2629   _heap(ShenandoahHeap::heap()),
2630   _index(0) {}
2631 
2632 ShenandoahRegionIterator::ShenandoahRegionIterator(ShenandoahHeap* heap) :
2633   _heap(heap),
2634   _index(0) {}
2635 
2636 void ShenandoahRegionIterator::reset() {
2637   _index = 0;
2638 }
2639 
2640 bool ShenandoahRegionIterator::has_next() const {
2641   return _index < (jint)_heap->num_regions();
2642 }
2643 
2644 char ShenandoahHeap::gc_state() {
2645   return _gc_state.raw_value();
2646 }
2647 
2648 const char* ShenandoahHeap::init_mark_event_message() const {
2649   assert(!has_forwarded_objects(), "Should not have forwarded objects here");
2650 
2651   bool proc_refs = process_references();
2652   bool unload_cls = unload_classes();
2653 
2654   if (proc_refs && unload_cls) {
2655     return "Pause Init Mark (process weakrefs) (unload classes)";
2656   } else if (proc_refs) {
2657     return "Pause Init Mark (process weakrefs)";
2658   } else if (unload_cls) {
2659     return "Pause Init Mark (unload classes)";
2660   } else {
2661     return "Pause Init Mark";
2662   }
2663 }
2664 
2665 const char* ShenandoahHeap::final_mark_event_message() const {
2666   assert(!has_forwarded_objects(), "Should not have forwarded objects here");
2667 
2668   bool proc_refs = process_references();
2669   bool unload_cls = unload_classes();
2670 
2671   if (proc_refs && unload_cls) {
2672     return "Pause Final Mark (process weakrefs) (unload classes)";
2673   } else if (proc_refs) {
2674     return "Pause Final Mark (process weakrefs)";
2675   } else if (unload_cls) {
2676     return "Pause Final Mark (unload classes)";
2677   } else {
2678     return "Pause Final Mark";
2679   }
2680 }
2681 
2682 const char* ShenandoahHeap::conc_mark_event_message() const {
2683   assert(!has_forwarded_objects(), "Should not have forwarded objects here");
2684 
2685   bool proc_refs = process_references();
2686   bool unload_cls = unload_classes();
2687 
2688   if (proc_refs && unload_cls) {
2689     return "Concurrent marking (process weakrefs) (unload classes)";
2690   } else if (proc_refs) {
2691     return "Concurrent marking (process weakrefs)";
2692   } else if (unload_cls) {
2693     return "Concurrent marking (unload classes)";
2694   } else {
2695     return "Concurrent marking";
2696   }
2697 }
2698 
2699 const char* ShenandoahHeap::degen_event_message(ShenandoahDegenPoint point) const {
2700   switch (point) {
2701     case _degenerated_unset:
2702       return "Pause Degenerated GC (<UNSET>)";
2703     case _degenerated_outside_cycle:
2704       return "Pause Degenerated GC (Outside of Cycle)";
2705     case _degenerated_mark:
2706       return "Pause Degenerated GC (Mark)";
2707     case _degenerated_evac:
2708       return "Pause Degenerated GC (Evacuation)";
2709     case _degenerated_updaterefs:
2710       return "Pause Degenerated GC (Update Refs)";
2711     default:
2712       ShouldNotReachHere();
2713       return "ERROR";
2714   }
2715 }
2716 
2717 ShenandoahLiveData* ShenandoahHeap::get_liveness_cache(uint worker_id) {
2718 #ifdef ASSERT
2719   assert(_liveness_cache != NULL, "sanity");
2720   assert(worker_id < _max_workers, "sanity");
2721   for (uint i = 0; i < num_regions(); i++) {
2722     assert(_liveness_cache[worker_id][i] == 0, "liveness cache should be empty");
2723   }
2724 #endif
2725   return _liveness_cache[worker_id];
2726 }
2727 
2728 void ShenandoahHeap::flush_liveness_cache(uint worker_id) {
2729   assert(worker_id < _max_workers, "sanity");
2730   assert(_liveness_cache != NULL, "sanity");
2731   ShenandoahLiveData* ld = _liveness_cache[worker_id];
2732   for (uint i = 0; i < num_regions(); i++) {
2733     ShenandoahLiveData live = ld[i];
2734     if (live > 0) {
2735       ShenandoahHeapRegion* r = get_region(i);
2736       r->increase_live_data_gc_words(live);
2737       ld[i] = 0;
2738     }
2739   }
2740 }