< prev index next >

src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp

Print this page

  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "memory/allocation.hpp"
  28 #include "memory/universe.hpp"
  29 
  30 #include "gc/shared/gcArguments.hpp"
  31 #include "gc/shared/gcTimer.hpp"
  32 #include "gc/shared/gcTraceTime.inline.hpp"
  33 #include "gc/shared/locationPrinter.inline.hpp"
  34 #include "gc/shared/memAllocator.hpp"
  35 #include "gc/shared/plab.hpp"
  36 #include "gc/shared/tlab_globals.hpp"
  37 
  38 #include "gc/shenandoah/shenandoahBarrierSet.hpp"

  39 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
  40 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  41 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  42 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
  43 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  44 #include "gc/shenandoah/shenandoahControlThread.hpp"

  45 #include "gc/shenandoah/shenandoahFreeSet.hpp"

  46 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  47 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  48 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
  49 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  50 #include "gc/shenandoah/shenandoahInitLogger.hpp"
  51 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  52 #include "gc/shenandoah/shenandoahMemoryPool.hpp"
  53 #include "gc/shenandoah/shenandoahMetrics.hpp"
  54 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"

  55 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  56 #include "gc/shenandoah/shenandoahPacer.inline.hpp"
  57 #include "gc/shenandoah/shenandoahPadding.hpp"
  58 #include "gc/shenandoah/shenandoahParallelCleaning.inline.hpp"
  59 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
  60 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"

  61 #include "gc/shenandoah/shenandoahStringDedup.hpp"
  62 #include "gc/shenandoah/shenandoahSTWMark.hpp"
  63 #include "gc/shenandoah/shenandoahUtils.hpp"
  64 #include "gc/shenandoah/shenandoahVerifier.hpp"
  65 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
  66 #include "gc/shenandoah/shenandoahVMOperations.hpp"
  67 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
  68 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"


  69 #include "gc/shenandoah/mode/shenandoahIUMode.hpp"
  70 #include "gc/shenandoah/mode/shenandoahPassiveMode.hpp"
  71 #include "gc/shenandoah/mode/shenandoahSATBMode.hpp"

  72 #if INCLUDE_JFR
  73 #include "gc/shenandoah/shenandoahJfrSupport.hpp"
  74 #endif
  75 


  76 #include "classfile/systemDictionary.hpp"
  77 #include "code/codeCache.hpp"
  78 #include "memory/classLoaderMetaspace.hpp"
  79 #include "memory/metaspaceUtils.hpp"
  80 #include "oops/compressedOops.inline.hpp"
  81 #include "prims/jvmtiTagMap.hpp"
  82 #include "runtime/atomic.hpp"
  83 #include "runtime/globals.hpp"
  84 #include "runtime/interfaceSupport.inline.hpp"
  85 #include "runtime/java.hpp"
  86 #include "runtime/orderAccess.hpp"
  87 #include "runtime/safepointMechanism.hpp"
  88 #include "runtime/vmThread.hpp"
  89 #include "services/mallocTracker.hpp"
  90 #include "services/memTracker.hpp"
  91 #include "utilities/events.hpp"
  92 #include "utilities/powerOfTwo.hpp"
  93 
  94 class ShenandoahPretouchHeapTask : public WorkerTask {
  95 private:

 143 jint ShenandoahHeap::initialize() {
 144   //
 145   // Figure out heap sizing
 146   //
 147 
 148   size_t init_byte_size = InitialHeapSize;
 149   size_t min_byte_size  = MinHeapSize;
 150   size_t max_byte_size  = MaxHeapSize;
 151   size_t heap_alignment = HeapAlignment;
 152 
 153   size_t reg_size_bytes = ShenandoahHeapRegion::region_size_bytes();
 154 
 155   Universe::check_alignment(max_byte_size,  reg_size_bytes, "Shenandoah heap");
 156   Universe::check_alignment(init_byte_size, reg_size_bytes, "Shenandoah heap");
 157 
 158   _num_regions = ShenandoahHeapRegion::region_count();
 159   assert(_num_regions == (max_byte_size / reg_size_bytes),
 160          "Regions should cover entire heap exactly: " SIZE_FORMAT " != " SIZE_FORMAT "/" SIZE_FORMAT,
 161          _num_regions, max_byte_size, reg_size_bytes);
 162 
 163   // Now we know the number of regions, initialize the heuristics.
 164   initialize_heuristics();
 165 
 166   size_t num_committed_regions = init_byte_size / reg_size_bytes;
 167   num_committed_regions = MIN2(num_committed_regions, _num_regions);
 168   assert(num_committed_regions <= _num_regions, "sanity");
 169   _initial_size = num_committed_regions * reg_size_bytes;
 170 
 171   size_t num_min_regions = min_byte_size / reg_size_bytes;
 172   num_min_regions = MIN2(num_min_regions, _num_regions);
 173   assert(num_min_regions <= _num_regions, "sanity");
 174   _minimum_size = num_min_regions * reg_size_bytes;
 175 
 176   // Default to max heap size.
 177   _soft_max_size = _num_regions * reg_size_bytes;
 178 
 179   _committed = _initial_size;
 180 




 181   size_t heap_page_size   = UseLargePages ? os::large_page_size() : os::vm_page_size();
 182   size_t bitmap_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
 183   size_t region_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
 184 
 185   //
 186   // Reserve and commit memory for heap
 187   //
 188 
 189   ReservedHeapSpace heap_rs = Universe::reserve_heap(max_byte_size, heap_alignment);
 190   initialize_reserved_region(heap_rs);
 191   _heap_region = MemRegion((HeapWord*)heap_rs.base(), heap_rs.size() / HeapWordSize);
 192   _heap_region_special = heap_rs.special();
 193 
 194   assert((((size_t) base()) & ShenandoahHeapRegion::region_size_bytes_mask()) == 0,
 195          "Misaligned heap: " PTR_FORMAT, p2i(base()));
 196 
 197 #if SHENANDOAH_OPTIMIZED_MARKTASK
 198   // The optimized ShenandoahMarkTask takes some bits away from the full object bits.
 199   // Fail if we ever attempt to address more than we can.
 200   if ((uintptr_t)heap_rs.end() >= ShenandoahMarkTask::max_addressable()) {
 201     FormatBuffer<512> buf("Shenandoah reserved [" PTR_FORMAT ", " PTR_FORMAT") for the heap, \n"
 202                           "but max object address is " PTR_FORMAT ". Try to reduce heap size, or try other \n"
 203                           "VM options that allocate heap at lower addresses (HeapBaseMinAddress, AllocateHeapAt, etc).",
 204                 p2i(heap_rs.base()), p2i(heap_rs.end()), ShenandoahMarkTask::max_addressable());
 205     vm_exit_during_initialization("Fatal Error", buf);
 206   }
 207 #endif
 208 
 209   ReservedSpace sh_rs = heap_rs.first_part(max_byte_size);
 210   if (!_heap_region_special) {
 211     os::commit_memory_or_exit(sh_rs.base(), _initial_size, heap_alignment, false,
 212                               "Cannot commit heap memory");
 213   }
 214 

























 215   //
 216   // Reserve and commit memory for bitmap(s)
 217   //
 218 
 219   _bitmap_size = ShenandoahMarkBitMap::compute_size(heap_rs.size());
 220   _bitmap_size = align_up(_bitmap_size, bitmap_page_size);
 221 
 222   size_t bitmap_bytes_per_region = reg_size_bytes / ShenandoahMarkBitMap::heap_map_factor();
 223 
 224   guarantee(bitmap_bytes_per_region != 0,
 225             "Bitmap bytes per region should not be zero");
 226   guarantee(is_power_of_2(bitmap_bytes_per_region),
 227             "Bitmap bytes per region should be power of two: " SIZE_FORMAT, bitmap_bytes_per_region);
 228 
 229   if (bitmap_page_size > bitmap_bytes_per_region) {
 230     _bitmap_regions_per_slice = bitmap_page_size / bitmap_bytes_per_region;
 231     _bitmap_bytes_per_slice = bitmap_page_size;
 232   } else {
 233     _bitmap_regions_per_slice = 1;
 234     _bitmap_bytes_per_slice = bitmap_bytes_per_region;

 238             "Should have at least one region per slice: " SIZE_FORMAT,
 239             _bitmap_regions_per_slice);
 240 
 241   guarantee(((_bitmap_bytes_per_slice) % bitmap_page_size) == 0,
 242             "Bitmap slices should be page-granular: bps = " SIZE_FORMAT ", page size = " SIZE_FORMAT,
 243             _bitmap_bytes_per_slice, bitmap_page_size);
 244 
 245   ReservedSpace bitmap(_bitmap_size, bitmap_page_size);
 246   MemTracker::record_virtual_memory_type(bitmap.base(), mtGC);
 247   _bitmap_region = MemRegion((HeapWord*) bitmap.base(), bitmap.size() / HeapWordSize);
 248   _bitmap_region_special = bitmap.special();
 249 
 250   size_t bitmap_init_commit = _bitmap_bytes_per_slice *
 251                               align_up(num_committed_regions, _bitmap_regions_per_slice) / _bitmap_regions_per_slice;
 252   bitmap_init_commit = MIN2(_bitmap_size, bitmap_init_commit);
 253   if (!_bitmap_region_special) {
 254     os::commit_memory_or_exit((char *) _bitmap_region.start(), bitmap_init_commit, bitmap_page_size, false,
 255                               "Cannot commit bitmap memory");
 256   }
 257 
 258   _marking_context = new ShenandoahMarkingContext(_heap_region, _bitmap_region, _num_regions, _max_workers);
 259 
 260   if (ShenandoahVerify) {
 261     ReservedSpace verify_bitmap(_bitmap_size, bitmap_page_size);
 262     if (!verify_bitmap.special()) {
 263       os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), bitmap_page_size, false,
 264                                 "Cannot commit verification bitmap memory");
 265     }
 266     MemTracker::record_virtual_memory_type(verify_bitmap.base(), mtGC);
 267     MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize);
 268     _verification_bit_map.initialize(_heap_region, verify_bitmap_region);
 269     _verifier = new ShenandoahVerifier(this, &_verification_bit_map);
 270   }
 271 
 272   // Reserve aux bitmap for use in object_iterate(). We don't commit it here.
 273   ReservedSpace aux_bitmap(_bitmap_size, bitmap_page_size);
 274   MemTracker::record_virtual_memory_type(aux_bitmap.base(), mtGC);
 275   _aux_bitmap_region = MemRegion((HeapWord*) aux_bitmap.base(), aux_bitmap.size() / HeapWordSize);
 276   _aux_bitmap_region_special = aux_bitmap.special();
 277   _aux_bit_map.initialize(_heap_region, _aux_bitmap_region);
 278 

 301     uintptr_t max = (1u << 30u);
 302 
 303     for (uintptr_t addr = min; addr <= max; addr <<= 1u) {
 304       char* req_addr = (char*)addr;
 305       assert(is_aligned(req_addr, cset_align), "Should be aligned");
 306       ReservedSpace cset_rs(cset_size, cset_align, os::vm_page_size(), req_addr);
 307       if (cset_rs.is_reserved()) {
 308         assert(cset_rs.base() == req_addr, "Allocated where requested: " PTR_FORMAT ", " PTR_FORMAT, p2i(cset_rs.base()), addr);
 309         _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
 310         break;
 311       }
 312     }
 313 
 314     if (_collection_set == nullptr) {
 315       ReservedSpace cset_rs(cset_size, cset_align, os::vm_page_size());
 316       _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
 317     }
 318   }
 319 
 320   _regions = NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, _num_regions, mtGC);

 321   _free_set = new ShenandoahFreeSet(this, _num_regions);
 322 
 323   {
 324     ShenandoahHeapLocker locker(lock());
 325 

 326     for (size_t i = 0; i < _num_regions; i++) {
 327       HeapWord* start = (HeapWord*)sh_rs.base() + ShenandoahHeapRegion::region_size_words() * i;
 328       bool is_committed = i < num_committed_regions;
 329       void* loc = region_storage.base() + i * region_align;
 330 
 331       ShenandoahHeapRegion* r = new (loc) ShenandoahHeapRegion(start, i, is_committed);
 332       assert(is_aligned(r, SHENANDOAH_CACHE_LINE_SIZE), "Sanity");
 333 
 334       _marking_context->initialize_top_at_mark_start(r);
 335       _regions[i] = r;
 336       assert(!collection_set()->is_in(i), "New region should not be in collection set");


 337     }
 338 
 339     // Initialize to complete
 340     _marking_context->mark_complete();
 341 
 342     _free_set->rebuild();
 343   }
 344 
 345   if (AlwaysPreTouch) {
 346     // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads,
 347     // before initialize() below zeroes it with initializing thread. For any given region,
 348     // we touch the region and the corresponding bitmaps from the same thread.
 349     ShenandoahPushWorkerScope scope(workers(), _max_workers, false);
 350 
 351     _pretouch_heap_page_size = heap_page_size;
 352     _pretouch_bitmap_page_size = bitmap_page_size;
 353 
 354 #ifdef LINUX
 355     // UseTransparentHugePages would madvise that backing memory can be coalesced into huge
 356     // pages. But, the kernel needs to know that every small page is used, in order to coalesce

 384   // There should probably be Shenandoah-specific options for these,
 385   // just as there are G1-specific options.
 386   {
 387     ShenandoahSATBMarkQueueSet& satbqs = ShenandoahBarrierSet::satb_mark_queue_set();
 388     satbqs.set_process_completed_buffers_threshold(20); // G1SATBProcessCompletedThreshold
 389     satbqs.set_buffer_enqueue_threshold_percentage(60); // G1SATBBufferEnqueueingThresholdPercent
 390   }
 391 
 392   _monitoring_support = new ShenandoahMonitoringSupport(this);
 393   _phase_timings = new ShenandoahPhaseTimings(max_workers());
 394   ShenandoahCodeRoots::initialize();
 395 
 396   if (ShenandoahPacing) {
 397     _pacer = new ShenandoahPacer(this);
 398     _pacer->setup_for_idle();
 399   } else {
 400     _pacer = nullptr;
 401   }
 402 
 403   _control_thread = new ShenandoahControlThread();

 404 
 405   ShenandoahInitLogger::print();
 406 
 407   return JNI_OK;
 408 }
 409 
 410 void ShenandoahHeap::initialize_mode() {






































 411   if (ShenandoahGCMode != nullptr) {
 412     if (strcmp(ShenandoahGCMode, "satb") == 0) {
 413       _gc_mode = new ShenandoahSATBMode();
 414     } else if (strcmp(ShenandoahGCMode, "iu") == 0) {
 415       _gc_mode = new ShenandoahIUMode();
 416     } else if (strcmp(ShenandoahGCMode, "passive") == 0) {
 417       _gc_mode = new ShenandoahPassiveMode();


 418     } else {
 419       vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option");
 420     }
 421   } else {
 422     vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option (null)");
 423   }
 424   _gc_mode->initialize_flags();
 425   if (_gc_mode->is_diagnostic() && !UnlockDiagnosticVMOptions) {
 426     vm_exit_during_initialization(
 427             err_msg("GC mode \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
 428                     _gc_mode->name()));
 429   }
 430   if (_gc_mode->is_experimental() && !UnlockExperimentalVMOptions) {
 431     vm_exit_during_initialization(
 432             err_msg("GC mode \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
 433                     _gc_mode->name()));
 434   }
 435 }
 436 
 437 void ShenandoahHeap::initialize_heuristics() {
 438   assert(_gc_mode != nullptr, "Must be initialized");
 439   _heuristics = _gc_mode->initialize_heuristics();

 440 
 441   if (_heuristics->is_diagnostic() && !UnlockDiagnosticVMOptions) {
 442     vm_exit_during_initialization(
 443             err_msg("Heuristics \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
 444                     _heuristics->name()));
 445   }
 446   if (_heuristics->is_experimental() && !UnlockExperimentalVMOptions) {
 447     vm_exit_during_initialization(
 448             err_msg("Heuristics \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
 449                     _heuristics->name()));
 450   }
 451 }
 452 
 453 #ifdef _MSC_VER
 454 #pragma warning( push )
 455 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
 456 #endif
 457 
 458 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
 459   CollectedHeap(),


 460   _initial_size(0),
 461   _used(0),
 462   _committed(0),
 463   _bytes_allocated_since_gc_start(0),
 464   _max_workers(MAX2(ConcGCThreads, ParallelGCThreads)),
 465   _workers(nullptr),
 466   _safepoint_workers(nullptr),
 467   _heap_region_special(false),
 468   _num_regions(0),
 469   _regions(nullptr),

 470   _update_refs_iterator(this),











 471   _control_thread(nullptr),

 472   _shenandoah_policy(policy),
 473   _gc_mode(nullptr),
 474   _heuristics(nullptr),
 475   _free_set(nullptr),
 476   _pacer(nullptr),
 477   _verifier(nullptr),
 478   _phase_timings(nullptr),



 479   _monitoring_support(nullptr),
 480   _memory_pool(nullptr),


 481   _stw_memory_manager("Shenandoah Pauses", "end of GC pause"),
 482   _cycle_memory_manager("Shenandoah Cycles", "end of GC cycle"),
 483   _gc_timer(new ConcurrentGCTimer()),
 484   _soft_ref_policy(),
 485   _log_min_obj_alignment_in_bytes(LogMinObjAlignmentInBytes),
 486   _ref_processor(new ShenandoahReferenceProcessor(MAX2(_max_workers, 1U))),
 487   _marking_context(nullptr),
 488   _bitmap_size(0),
 489   _bitmap_regions_per_slice(0),
 490   _bitmap_bytes_per_slice(0),
 491   _bitmap_region_special(false),
 492   _aux_bitmap_region_special(false),
 493   _liveness_cache(nullptr),
 494   _collection_set(nullptr)

 495 {
 496   // Initialize GC mode early, so we can adjust barrier support
 497   initialize_mode();
 498   BarrierSet::set_barrier_set(new ShenandoahBarrierSet(this));
 499 
 500   _max_workers = MAX2(_max_workers, 1U);
 501   _workers = new ShenandoahWorkerThreads("Shenandoah GC Threads", _max_workers);
 502   if (_workers == nullptr) {
 503     vm_exit_during_initialization("Failed necessary allocation.");
 504   } else {
 505     _workers->initialize_workers();
 506   }
 507 
 508   if (ParallelGCThreads > 1) {
 509     _safepoint_workers = new ShenandoahWorkerThreads("Safepoint Cleanup Thread",
 510                                                 ParallelGCThreads);
 511     _safepoint_workers->initialize_workers();
 512   }
 513 }
 514 
 515 #ifdef _MSC_VER
 516 #pragma warning( pop )
 517 #endif
 518 
 519 class ShenandoahResetBitmapTask : public WorkerTask {
 520 private:
 521   ShenandoahRegionIterator _regions;
 522 
 523 public:
 524   ShenandoahResetBitmapTask() :
 525     WorkerTask("Shenandoah Reset Bitmap") {}
 526 
 527   void work(uint worker_id) {
 528     ShenandoahHeapRegion* region = _regions.next();
 529     ShenandoahHeap* heap = ShenandoahHeap::heap();
 530     ShenandoahMarkingContext* const ctx = heap->marking_context();
 531     while (region != nullptr) {
 532       if (heap->is_bitmap_slice_committed(region)) {
 533         ctx->clear_bitmap(region);
 534       }
 535       region = _regions.next();
 536     }
 537   }
 538 };
 539 
 540 void ShenandoahHeap::reset_mark_bitmap() {
 541   assert_gc_workers(_workers->active_workers());
 542   mark_incomplete_marking_context();
 543 
 544   ShenandoahResetBitmapTask task;
 545   _workers->run_task(&task);
 546 }
 547 
 548 void ShenandoahHeap::print_on(outputStream* st) const {
 549   st->print_cr("Shenandoah Heap");
 550   st->print_cr(" " SIZE_FORMAT "%s max, " SIZE_FORMAT "%s soft max, " SIZE_FORMAT "%s committed, " SIZE_FORMAT "%s used",
 551                byte_size_in_proper_unit(max_capacity()), proper_unit_for_byte_size(max_capacity()),
 552                byte_size_in_proper_unit(soft_max_capacity()), proper_unit_for_byte_size(soft_max_capacity()),
 553                byte_size_in_proper_unit(committed()),    proper_unit_for_byte_size(committed()),
 554                byte_size_in_proper_unit(used()),         proper_unit_for_byte_size(used()));
 555   st->print_cr(" " SIZE_FORMAT " x " SIZE_FORMAT"%s regions",
 556                num_regions(),
 557                byte_size_in_proper_unit(ShenandoahHeapRegion::region_size_bytes()),
 558                proper_unit_for_byte_size(ShenandoahHeapRegion::region_size_bytes()));
 559 
 560   st->print("Status: ");
 561   if (has_forwarded_objects())                 st->print("has forwarded objects, ");
 562   if (is_concurrent_mark_in_progress())        st->print("marking, ");

 563   if (is_evacuation_in_progress())             st->print("evacuating, ");
 564   if (is_update_refs_in_progress())            st->print("updating refs, ");
 565   if (is_degenerated_gc_in_progress())         st->print("degenerated gc, ");
 566   if (is_full_gc_in_progress())                st->print("full gc, ");
 567   if (is_full_gc_move_in_progress())           st->print("full gc move, ");
 568   if (is_concurrent_weak_root_in_progress())   st->print("concurrent weak roots, ");
 569   if (is_concurrent_strong_root_in_progress() &&
 570       !is_concurrent_weak_root_in_progress())  st->print("concurrent strong roots, ");
 571 
 572   if (cancelled_gc()) {
 573     st->print("cancelled");
 574   } else {
 575     st->print("not cancelled");
 576   }
 577   st->cr();
 578 
 579   st->print_cr("Reserved region:");
 580   st->print_cr(" - [" PTR_FORMAT ", " PTR_FORMAT ") ",
 581                p2i(reserved_region().start()),
 582                p2i(reserved_region().end()));

 592 
 593   st->cr();
 594   MetaspaceUtils::print_on(st);
 595 
 596   if (Verbose) {
 597     print_heap_regions_on(st);
 598   }
 599 }
 600 
 601 class ShenandoahInitWorkerGCLABClosure : public ThreadClosure {
 602 public:
 603   void do_thread(Thread* thread) {
 604     assert(thread != nullptr, "Sanity");
 605     assert(thread->is_Worker_thread(), "Only worker thread expected");
 606     ShenandoahThreadLocalData::initialize_gclab(thread);
 607   }
 608 };
 609 
 610 void ShenandoahHeap::post_initialize() {
 611   CollectedHeap::post_initialize();


 612   MutexLocker ml(Threads_lock);
 613 
 614   ShenandoahInitWorkerGCLABClosure init_gclabs;
 615   _workers->threads_do(&init_gclabs);
 616 
 617   // gclab can not be initialized early during VM startup, as it can not determinate its max_size.
 618   // Now, we will let WorkerThreads to initialize gclab when new worker is created.
 619   _workers->set_initialize_gclab();
 620   if (_safepoint_workers != nullptr) {
 621     _safepoint_workers->threads_do(&init_gclabs);
 622     _safepoint_workers->set_initialize_gclab();
 623   }
 624 
 625   _heuristics->initialize();
 626 
 627   JFR_ONLY(ShenandoahJFRSupport::register_jfr_type_serializers());
 628 }
 629 



















 630 size_t ShenandoahHeap::used() const {
 631   return Atomic::load(&_used);
 632 }
 633 
 634 size_t ShenandoahHeap::committed() const {
 635   return Atomic::load(&_committed);
 636 }
 637 
 638 void ShenandoahHeap::increase_committed(size_t bytes) {
 639   shenandoah_assert_heaplocked_or_safepoint();
 640   _committed += bytes;
 641 }
 642 
 643 void ShenandoahHeap::decrease_committed(size_t bytes) {
 644   shenandoah_assert_heaplocked_or_safepoint();
 645   _committed -= bytes;
 646 }
 647 
 648 void ShenandoahHeap::increase_used(size_t bytes) {
 649   Atomic::add(&_used, bytes, memory_order_relaxed);
 650 }
 651 
 652 void ShenandoahHeap::set_used(size_t bytes) {
 653   Atomic::store(&_used, bytes);
 654 }
 655 
 656 void ShenandoahHeap::decrease_used(size_t bytes) {
 657   assert(used() >= bytes, "never decrease heap size by more than we've left");
 658   Atomic::sub(&_used, bytes, memory_order_relaxed);
 659 }
 660 
 661 void ShenandoahHeap::increase_allocated(size_t bytes) {
 662   Atomic::add(&_bytes_allocated_since_gc_start, bytes, memory_order_relaxed);
 663 }
 664 
 665 void ShenandoahHeap::notify_mutator_alloc_words(size_t words, bool waste) {
 666   size_t bytes = words * HeapWordSize;
 667   if (!waste) {
 668     increase_used(bytes);
 669   }
 670   increase_allocated(bytes);
 671   if (ShenandoahPacing) {
 672     control_thread()->pacing_notify_alloc(words);
 673     if (waste) {
 674       pacer()->claim_for_alloc(words, true);
 675     }
 676   }
 677 }
 678 
 679 size_t ShenandoahHeap::capacity() const {
 680   return committed();
 681 }
 682 
 683 size_t ShenandoahHeap::max_capacity() const {
 684   return _num_regions * ShenandoahHeapRegion::region_size_bytes();
 685 }
 686 
 687 size_t ShenandoahHeap::soft_max_capacity() const {
 688   size_t v = Atomic::load(&_soft_max_size);
 689   assert(min_capacity() <= v && v <= max_capacity(),
 690          "Should be in bounds: " SIZE_FORMAT " <= " SIZE_FORMAT " <= " SIZE_FORMAT,
 691          min_capacity(), v, max_capacity());
 692   return v;
 693 }
 694 
 695 void ShenandoahHeap::set_soft_max_capacity(size_t v) {
 696   assert(min_capacity() <= v && v <= max_capacity(),
 697          "Should be in bounds: " SIZE_FORMAT " <= " SIZE_FORMAT " <= " SIZE_FORMAT,
 698          min_capacity(), v, max_capacity());
 699   Atomic::store(&_soft_max_size, v);








 700 }
 701 
 702 size_t ShenandoahHeap::min_capacity() const {
 703   return _minimum_size;
 704 }
 705 
 706 size_t ShenandoahHeap::initial_capacity() const {
 707   return _initial_size;
 708 }
 709 
 710 bool ShenandoahHeap::is_in(const void* p) const {
 711   HeapWord* heap_base = (HeapWord*) base();
 712   HeapWord* last_region_end = heap_base + ShenandoahHeapRegion::region_size_words() * num_regions();
 713   return p >= heap_base && p < last_region_end;
 714 }
 715 
 716 void ShenandoahHeap::op_uncommit(double shrink_before, size_t shrink_until) {
 717   assert (ShenandoahUncommit, "should be enabled");
 718 
 719   // Application allocates from the beginning of the heap, and GC allocates at
 720   // the end of it. It is more efficient to uncommit from the end, so that applications
 721   // could enjoy the near committed regions. GC allocations are much less frequent,
 722   // and therefore can accept the committing costs.
 723 
 724   size_t count = 0;
 725   for (size_t i = num_regions(); i > 0; i--) { // care about size_t underflow
 726     ShenandoahHeapRegion* r = get_region(i - 1);
 727     if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
 728       ShenandoahHeapLocker locker(lock());
 729       if (r->is_empty_committed()) {
 730         if (committed() < shrink_until + ShenandoahHeapRegion::region_size_bytes()) {
 731           break;
 732         }
 733 
 734         r->make_uncommitted();
 735         count++;
 736       }
 737     }
 738     SpinPause(); // allow allocators to take the lock
 739   }
 740 
 741   if (count > 0) {
 742     control_thread()->notify_heap_changed();
































































 743   }
 744 }
 745 
 746 HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) {
 747   // New object should fit the GCLAB size
 748   size_t min_size = MAX2(size, PLAB::min_size());
 749 
 750   // Figure out size of new GCLAB, looking back at heuristics. Expand aggressively.
 751   size_t new_size = ShenandoahThreadLocalData::gclab_size(thread) * 2;








 752   new_size = MIN2(new_size, PLAB::max_size());
 753   new_size = MAX2(new_size, PLAB::min_size());
 754 
 755   // Record new heuristic value even if we take any shortcut. This captures
 756   // the case when moderately-sized objects always take a shortcut. At some point,
 757   // heuristics should catch up with them.
 758   ShenandoahThreadLocalData::set_gclab_size(thread, new_size);
 759 
 760   if (new_size < size) {
 761     // New size still does not fit the object. Fall back to shared allocation.
 762     // This avoids retiring perfectly good GCLABs, when we encounter a large object.

 763     return nullptr;
 764   }
 765 
 766   // Retire current GCLAB, and allocate a new one.
 767   PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
 768   gclab->retire();
 769 
 770   size_t actual_size = 0;
 771   HeapWord* gclab_buf = allocate_new_gclab(min_size, new_size, &actual_size);
 772   if (gclab_buf == nullptr) {
 773     return nullptr;
 774   }
 775 
 776   assert (size <= actual_size, "allocation should fit");
 777 
 778   if (ZeroTLAB) {
 779     // ..and clear it.
 780     Copy::zero_to_words(gclab_buf, actual_size);
 781   } else {
 782     // ...and zap just allocated object.
 783 #ifdef ASSERT
 784     // Skip mangling the space corresponding to the object header to
 785     // ensure that the returned space is not considered parsable by
 786     // any concurrent GC thread.
 787     size_t hdr_size = oopDesc::header_size();
 788     Copy::fill_to_words(gclab_buf + hdr_size, actual_size - hdr_size, badHeapWordVal);
 789 #endif // ASSERT
 790   }
 791   gclab->set_buf(gclab_buf, actual_size);
 792   return gclab->allocate(size);
 793 }
 794 















































































































































































 795 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t min_size,
 796                                             size_t requested_size,
 797                                             size_t* actual_size) {
 798   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_tlab(min_size, requested_size);
 799   HeapWord* res = allocate_memory(req);
 800   if (res != nullptr) {
 801     *actual_size = req.actual_size();
 802   } else {
 803     *actual_size = 0;
 804   }
 805   return res;
 806 }
 807 
 808 HeapWord* ShenandoahHeap::allocate_new_gclab(size_t min_size,
 809                                              size_t word_size,
 810                                              size_t* actual_size) {
 811   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_gclab(min_size, word_size);
 812   HeapWord* res = allocate_memory(req);















 813   if (res != nullptr) {
 814     *actual_size = req.actual_size();
 815   } else {
 816     *actual_size = 0;
 817   }
 818   return res;
 819 }
 820 
 821 HeapWord* ShenandoahHeap::allocate_memory(ShenandoahAllocRequest& req) {


 822   intptr_t pacer_epoch = 0;
 823   bool in_new_region = false;
 824   HeapWord* result = nullptr;
 825 
 826   if (req.is_mutator_alloc()) {
 827     if (ShenandoahPacing) {
 828       pacer()->pace_for_alloc(req.size());
 829       pacer_epoch = pacer()->epoch();
 830     }
 831 
 832     if (!ShenandoahAllocFailureALot || !should_inject_alloc_failure()) {
 833       result = allocate_memory_under_lock(req, in_new_region);
 834     }
 835 
 836     // Allocation failed, block until control thread reacted, then retry allocation.
 837     //
 838     // It might happen that one of the threads requesting allocation would unblock
 839     // way later after GC happened, only to fail the second allocation, because
 840     // other threads have already depleted the free storage. In this case, a better
 841     // strategy is to try again, as long as GC makes progress.
 842     //
 843     // Then, we need to make sure the allocation was retried after at least one
 844     // Full GC, which means we want to try more than ShenandoahFullGCThreshold times.
 845 
 846     size_t tries = 0;
 847 
 848     while (result == nullptr && _progress_last_gc.is_set()) {
 849       tries++;
 850       control_thread()->handle_alloc_failure(req);
 851       result = allocate_memory_under_lock(req, in_new_region);
 852     }
 853 
 854     while (result == nullptr && tries <= ShenandoahFullGCThreshold) {
 855       tries++;
 856       control_thread()->handle_alloc_failure(req);
 857       result = allocate_memory_under_lock(req, in_new_region);
 858     }
 859 
 860   } else {
 861     assert(req.is_gc_alloc(), "Can only accept GC allocs here");
 862     result = allocate_memory_under_lock(req, in_new_region);
 863     // Do not call handle_alloc_failure() here, because we cannot block.
 864     // The allocation failure would be handled by the LRB slowpath with handle_alloc_failure_evac().
 865   }
 866 
 867   if (in_new_region) {
 868     control_thread()->notify_heap_changed();

 869   }
 870 
 871   if (result != nullptr) {

 872     size_t requested = req.size();
 873     size_t actual = req.actual_size();

 874 
 875     assert (req.is_lab_alloc() || (requested == actual),
 876             "Only LAB allocations are elastic: %s, requested = " SIZE_FORMAT ", actual = " SIZE_FORMAT,
 877             ShenandoahAllocRequest::alloc_type_to_string(req.type()), requested, actual);
 878 
 879     if (req.is_mutator_alloc()) {
 880       notify_mutator_alloc_words(actual, false);

 881 
 882       // If we requested more than we were granted, give the rest back to pacer.
 883       // This only matters if we are in the same pacing epoch: do not try to unpace
 884       // over the budget for the other phase.
 885       if (ShenandoahPacing && (pacer_epoch > 0) && (requested > actual)) {
 886         pacer()->unpace_for_alloc(pacer_epoch, requested - actual);
 887       }
 888     } else {
 889       increase_used(actual*HeapWordSize);
 890     }
 891   }
 892 
 893   return result;
 894 }
 895 
 896 HeapWord* ShenandoahHeap::allocate_memory_under_lock(ShenandoahAllocRequest& req, bool& in_new_region) {
 897   ShenandoahHeapLocker locker(lock());
 898   return _free_set->allocate(req, in_new_region);



































































































































































 899 }
 900 
 901 HeapWord* ShenandoahHeap::mem_allocate(size_t size,
 902                                         bool*  gc_overhead_limit_was_exceeded) {
 903   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared(size);
 904   return allocate_memory(req);
 905 }
 906 
 907 MetaWord* ShenandoahHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
 908                                                              size_t size,
 909                                                              Metaspace::MetadataType mdtype) {
 910   MetaWord* result;
 911 
 912   // Inform metaspace OOM to GC heuristics if class unloading is possible.
 913   if (heuristics()->can_unload_classes()) {
 914     ShenandoahHeuristics* h = heuristics();
 915     h->record_metaspace_oom();
 916   }
 917 
 918   // Expand and retry allocation
 919   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
 920   if (result != nullptr) {
 921     return result;
 922   }
 923 
 924   // Start full GC
 925   collect(GCCause::_metadata_GC_clear_soft_refs);
 926 
 927   // Retry allocation
 928   result = loader_data->metaspace_non_null()->allocate(size, mdtype);
 929   if (result != nullptr) {
 930     return result;
 931   }
 932 
 933   // Expand and retry allocation
 934   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);

 973 
 974   void work(uint worker_id) {
 975     if (_concurrent) {
 976       ShenandoahConcurrentWorkerSession worker_session(worker_id);
 977       ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
 978       ShenandoahEvacOOMScope oom_evac_scope;
 979       do_work();
 980     } else {
 981       ShenandoahParallelWorkerSession worker_session(worker_id);
 982       ShenandoahEvacOOMScope oom_evac_scope;
 983       do_work();
 984     }
 985   }
 986 
 987 private:
 988   void do_work() {
 989     ShenandoahConcurrentEvacuateRegionObjectClosure cl(_sh);
 990     ShenandoahHeapRegion* r;
 991     while ((r =_cs->claim_next()) != nullptr) {
 992       assert(r->has_live(), "Region " SIZE_FORMAT " should have been reclaimed early", r->index());

 993       _sh->marked_object_iterate(r, &cl);
 994 
 995       if (ShenandoahPacing) {
 996         _sh->pacer()->report_evac(r->used() >> LogHeapWordSize);
 997       }





































































 998 
 999       if (_sh->check_cancelled_gc_and_yield(_concurrent)) {
1000         break;
1001       }
1002     }
1003   }
1004 };
1005 
1006 void ShenandoahHeap::evacuate_collection_set(bool concurrent) {
1007   ShenandoahEvacuationTask task(this, _collection_set, concurrent);
1008   workers()->run_task(&task);






1009 }
1010 
1011 void ShenandoahHeap::trash_cset_regions() {
1012   ShenandoahHeapLocker locker(lock());
1013 
1014   ShenandoahCollectionSet* set = collection_set();
1015   ShenandoahHeapRegion* r;
1016   set->clear_current_index();
1017   while ((r = set->next()) != nullptr) {
1018     r->make_trash();
1019   }
1020   collection_set()->clear();
1021 }
1022 
1023 void ShenandoahHeap::print_heap_regions_on(outputStream* st) const {
1024   st->print_cr("Heap Regions:");
1025   st->print_cr("EU=empty-uncommitted, EC=empty-committed, R=regular, H=humongous start, HC=humongous continuation, CS=collection set, T=trash, P=pinned");
1026   st->print_cr("BTE=bottom/top/end, U=used, T=TLAB allocs, G=GCLAB allocs, S=shared allocs, L=live data");
1027   st->print_cr("R=root, CP=critical pins, TAMS=top-at-mark-start, UWM=update watermark");
1028   st->print_cr("SN=alloc sequence number");
1029 
1030   for (size_t i = 0; i < num_regions(); i++) {
1031     get_region(i)->print_on(st);
1032   }
1033 }
1034 
1035 void ShenandoahHeap::trash_humongous_region_at(ShenandoahHeapRegion* start) {
1036   assert(start->is_humongous_start(), "reclaim regions starting with the first one");
1037 
1038   oop humongous_obj = cast_to_oop(start->bottom());
1039   size_t size = humongous_obj->size();
1040   size_t required_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize);
1041   size_t index = start->index() + required_regions - 1;
1042 
1043   assert(!start->has_live(), "liveness must be zero");
1044 
1045   for(size_t i = 0; i < required_regions; i++) {
1046     // Reclaim from tail. Otherwise, assertion fails when printing region to trace log,
1047     // as it expects that every region belongs to a humongous region starting with a humongous start region.
1048     ShenandoahHeapRegion* region = get_region(index --);
1049 
1050     assert(region->is_humongous(), "expect correct humongous start or continuation");
1051     assert(!region->is_cset(), "Humongous region should not be in collection set");
1052 
1053     region->make_trash_immediate();
1054   }

1055 }
1056 
1057 class ShenandoahCheckCleanGCLABClosure : public ThreadClosure {
1058 public:
1059   ShenandoahCheckCleanGCLABClosure() {}
1060   void do_thread(Thread* thread) {
1061     PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1062     assert(gclab != nullptr, "GCLAB should be initialized for %s", thread->name());
1063     assert(gclab->words_remaining() == 0, "GCLAB should not need retirement");




1064   }
1065 };
1066 
1067 class ShenandoahRetireGCLABClosure : public ThreadClosure {
1068 private:
1069   bool const _resize;
1070 public:
1071   ShenandoahRetireGCLABClosure(bool resize) : _resize(resize) {}
1072   void do_thread(Thread* thread) {
1073     PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1074     assert(gclab != nullptr, "GCLAB should be initialized for %s", thread->name());
1075     gclab->retire();
1076     if (_resize && ShenandoahThreadLocalData::gclab_size(thread) > 0) {
1077       ShenandoahThreadLocalData::set_gclab_size(thread, 0);
1078     }











1079   }
1080 };
1081 
1082 void ShenandoahHeap::labs_make_parsable() {
1083   assert(UseTLAB, "Only call with UseTLAB");
1084 
1085   ShenandoahRetireGCLABClosure cl(false);
1086 
1087   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1088     ThreadLocalAllocBuffer& tlab = t->tlab();
1089     tlab.make_parsable();
1090     cl.do_thread(t);
1091   }
1092 
1093   workers()->threads_do(&cl);
1094 }
1095 
1096 void ShenandoahHeap::tlabs_retire(bool resize) {
1097   assert(UseTLAB, "Only call with UseTLAB");
1098   assert(!resize || ResizeTLAB, "Only call for resize when ResizeTLAB is enabled");

1116   }
1117   workers()->threads_do(&cl);
1118 #endif
1119 }
1120 
1121 void ShenandoahHeap::gclabs_retire(bool resize) {
1122   assert(UseTLAB, "Only call with UseTLAB");
1123   assert(!resize || ResizeTLAB, "Only call for resize when ResizeTLAB is enabled");
1124 
1125   ShenandoahRetireGCLABClosure cl(resize);
1126   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1127     cl.do_thread(t);
1128   }
1129   workers()->threads_do(&cl);
1130 
1131   if (safepoint_workers() != nullptr) {
1132     safepoint_workers()->threads_do(&cl);
1133   }
1134 }
1135 































1136 // Returns size in bytes
1137 size_t ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const {
1138   if (ShenandoahElasticTLAB) {
1139     // With Elastic TLABs, return the max allowed size, and let the allocation path
1140     // figure out the safe size for current allocation.
1141     return ShenandoahHeapRegion::max_tlab_size_bytes();




1142   } else {
1143     return MIN2(_free_set->unsafe_peek_free(), ShenandoahHeapRegion::max_tlab_size_bytes());
1144   }
1145 }
1146 
1147 size_t ShenandoahHeap::max_tlab_size() const {
1148   // Returns size in words
1149   return ShenandoahHeapRegion::max_tlab_size_words();
1150 }
1151 
1152 void ShenandoahHeap::collect(GCCause::Cause cause) {
1153   control_thread()->request_gc(cause);
1154 }
1155 
1156 void ShenandoahHeap::do_full_collection(bool clear_all_soft_refs) {
1157   //assert(false, "Shouldn't need to do full collections");
1158 }
1159 
1160 HeapWord* ShenandoahHeap::block_start(const void* addr) const {
1161   ShenandoahHeapRegion* r = heap_region_containing(addr);

1164   }
1165   return nullptr;
1166 }
1167 
1168 bool ShenandoahHeap::block_is_obj(const HeapWord* addr) const {
1169   ShenandoahHeapRegion* r = heap_region_containing(addr);
1170   return r->block_is_obj(addr);
1171 }
1172 
1173 bool ShenandoahHeap::print_location(outputStream* st, void* addr) const {
1174   return BlockLocationPrinter<ShenandoahHeap>::print_location(st, addr);
1175 }
1176 
1177 void ShenandoahHeap::prepare_for_verify() {
1178   if (SafepointSynchronize::is_at_safepoint() && UseTLAB) {
1179     labs_make_parsable();
1180   }
1181 }
1182 
1183 void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const {




1184   tcl->do_thread(_control_thread);

1185   workers()->threads_do(tcl);
1186   if (_safepoint_workers != nullptr) {
1187     _safepoint_workers->threads_do(tcl);
1188   }
1189   if (ShenandoahStringDedup::is_enabled()) {
1190     ShenandoahStringDedup::threads_do(tcl);
1191   }
1192 }
1193 
1194 void ShenandoahHeap::print_tracing_info() const {
1195   LogTarget(Info, gc, stats) lt;
1196   if (lt.is_enabled()) {
1197     ResourceMark rm;
1198     LogStream ls(lt);
1199 
1200     phase_timings()->print_global_on(&ls);
1201 
1202     ls.cr();
1203     ls.cr();
1204 
1205     shenandoah_policy()->print_gc_stats(&ls);
1206 




1207     ls.cr();
1208     ls.cr();
1209   }
1210 }
1211 



























1212 void ShenandoahHeap::verify(VerifyOption vo) {
1213   if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
1214     if (ShenandoahVerify) {
1215       verifier()->verify_generic(vo);
1216     } else {
1217       // TODO: Consider allocating verification bitmaps on demand,
1218       // and turn this on unconditionally.
1219     }
1220   }
1221 }
1222 size_t ShenandoahHeap::tlab_capacity(Thread *thr) const {
1223   return _free_set->capacity();
1224 }
1225 
1226 class ObjectIterateScanRootClosure : public BasicOopIterateClosure {
1227 private:
1228   MarkBitMap* _bitmap;
1229   ShenandoahScanObjectStack* _oop_stack;
1230   ShenandoahHeap* const _heap;
1231   ShenandoahMarkingContext* const _marking_context;

1513       if (start >= max) break;
1514 
1515       for (size_t i = cur; i < end; i++) {
1516         ShenandoahHeapRegion* current = _heap->get_region(i);
1517         _blk->heap_region_do(current);
1518       }
1519     }
1520   }
1521 };
1522 
1523 void ShenandoahHeap::parallel_heap_region_iterate(ShenandoahHeapRegionClosure* blk) const {
1524   assert(blk->is_thread_safe(), "Only thread-safe closures here");
1525   if (num_regions() > ShenandoahParallelRegionStride) {
1526     ShenandoahParallelHeapRegionTask task(blk);
1527     workers()->run_task(&task);
1528   } else {
1529     heap_region_iterate(blk);
1530   }
1531 }
1532 
1533 class ShenandoahInitMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
1534 private:
1535   ShenandoahMarkingContext* const _ctx;
1536 public:
1537   ShenandoahInitMarkUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
1538 
1539   void heap_region_do(ShenandoahHeapRegion* r) {
1540     assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index());
1541     if (r->is_active()) {
1542       // Check if region needs updating its TAMS. We have updated it already during concurrent
1543       // reset, so it is very likely we don't need to do another write here.
1544       if (_ctx->top_at_mark_start(r) != r->top()) {
1545         _ctx->capture_top_at_mark_start(r);
1546       }
1547     } else {
1548       assert(_ctx->top_at_mark_start(r) == r->top(),
1549              "Region " SIZE_FORMAT " should already have correct TAMS", r->index());
1550     }
1551   }
1552 
1553   bool is_thread_safe() { return true; }
1554 };
1555 
1556 class ShenandoahRendezvousClosure : public HandshakeClosure {
1557 public:
1558   inline ShenandoahRendezvousClosure() : HandshakeClosure("ShenandoahRendezvous") {}
1559   inline void do_thread(Thread* thread) {}
1560 };
1561 
1562 void ShenandoahHeap::rendezvous_threads() {
1563   ShenandoahRendezvousClosure cl;
1564   Handshake::execute(&cl);
1565 }
1566 
1567 void ShenandoahHeap::recycle_trash() {
1568   free_set()->recycle_trash();
1569 }
1570 
1571 class ShenandoahResetUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
1572 private:
1573   ShenandoahMarkingContext* const _ctx;
1574 public:
1575   ShenandoahResetUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
1576 
1577   void heap_region_do(ShenandoahHeapRegion* r) {
1578     if (r->is_active()) {
1579       // Reset live data and set TAMS optimistically. We would recheck these under the pause
1580       // anyway to capture any updates that happened since now.
1581       r->clear_live_data();
1582       _ctx->capture_top_at_mark_start(r);
1583     }
1584   }
1585 
1586   bool is_thread_safe() { return true; }
1587 };
1588 
1589 void ShenandoahHeap::prepare_gc() {
1590   reset_mark_bitmap();
1591 
1592   ShenandoahResetUpdateRegionStateClosure cl;
1593   parallel_heap_region_iterate(&cl);
1594 }
1595 
1596 class ShenandoahFinalMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
1597 private:
1598   ShenandoahMarkingContext* const _ctx;
1599   ShenandoahHeapLock* const _lock;
1600 
1601 public:
1602   ShenandoahFinalMarkUpdateRegionStateClosure() :
1603     _ctx(ShenandoahHeap::heap()->complete_marking_context()), _lock(ShenandoahHeap::heap()->lock()) {}
1604 
1605   void heap_region_do(ShenandoahHeapRegion* r) {
1606     if (r->is_active()) {
1607       // All allocations past TAMS are implicitly live, adjust the region data.
1608       // Bitmaps/TAMS are swapped at this point, so we need to poll complete bitmap.
1609       HeapWord *tams = _ctx->top_at_mark_start(r);
1610       HeapWord *top = r->top();
1611       if (top > tams) {
1612         r->increase_live_data_alloc_words(pointer_delta(top, tams));
1613       }
1614 
1615       // We are about to select the collection set, make sure it knows about
1616       // current pinning status. Also, this allows trashing more regions that
1617       // now have their pinning status dropped.
1618       if (r->is_pinned()) {
1619         if (r->pin_count() == 0) {
1620           ShenandoahHeapLocker locker(_lock);
1621           r->make_unpinned();
1622         }
1623       } else {
1624         if (r->pin_count() > 0) {
1625           ShenandoahHeapLocker locker(_lock);
1626           r->make_pinned();
1627         }
1628       }
1629 
1630       // Remember limit for updating refs. It's guaranteed that we get no
1631       // from-space-refs written from here on.
1632       r->set_update_watermark_at_safepoint(r->top());
1633     } else {
1634       assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index());
1635       assert(_ctx->top_at_mark_start(r) == r->top(),
1636              "Region " SIZE_FORMAT " should have correct TAMS", r->index());
1637     }
1638   }
1639 
1640   bool is_thread_safe() { return true; }
1641 };
1642 
1643 void ShenandoahHeap::prepare_regions_and_collection_set(bool concurrent) {
1644   assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
1645   {
1646     ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::final_update_region_states :
1647                                          ShenandoahPhaseTimings::degen_gc_final_update_region_states);
1648     ShenandoahFinalMarkUpdateRegionStateClosure cl;
1649     parallel_heap_region_iterate(&cl);
1650 
1651     assert_pinned_region_status();
1652   }
1653 
1654   {
1655     ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::choose_cset :
1656                                          ShenandoahPhaseTimings::degen_gc_choose_cset);
1657     ShenandoahHeapLocker locker(lock());
1658     _collection_set->clear();
1659     heuristics()->choose_collection_set(_collection_set);
1660   }
1661 
1662   {
1663     ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::final_rebuild_freeset :
1664                                          ShenandoahPhaseTimings::degen_gc_final_rebuild_freeset);
1665     ShenandoahHeapLocker locker(lock());
1666     _free_set->rebuild();
1667   }
1668 }
1669 
1670 void ShenandoahHeap::do_class_unloading() {
1671   _unloader.unload();
1672 }
1673 
1674 void ShenandoahHeap::stw_weak_refs(bool full_gc) {
1675   // Weak refs processing
1676   ShenandoahPhaseTimings::Phase phase = full_gc ? ShenandoahPhaseTimings::full_gc_weakrefs
1677                                                 : ShenandoahPhaseTimings::degen_gc_weakrefs;
1678   ShenandoahTimingsTracker t(phase);
1679   ShenandoahGCWorkerPhase worker_phase(phase);
1680   ref_processor()->process_references(phase, workers(), false /* concurrent */);
1681 }
1682 
1683 void ShenandoahHeap::prepare_update_heap_references(bool concurrent) {
1684   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
1685 
1686   // Evacuation is over, no GCLABs are needed anymore. GCLABs are under URWM, so we need to
1687   // make them parsable for update code to work correctly. Plus, we can compute new sizes
1688   // for future GCLABs here.
1689   if (UseTLAB) {
1690     ShenandoahGCPhase phase(concurrent ?
1691                             ShenandoahPhaseTimings::init_update_refs_manage_gclabs :
1692                             ShenandoahPhaseTimings::degen_gc_init_update_refs_manage_gclabs);
1693     gclabs_retire(ResizeTLAB);
1694   }
1695 
1696   _update_refs_iterator.reset();
1697 }
1698 
1699 void ShenandoahHeap::set_gc_state_all_threads(char state) {
1700   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1701     ShenandoahThreadLocalData::set_gc_state(t, state);
1702   }
1703 }
1704 
1705 void ShenandoahHeap::set_gc_state_mask(uint mask, bool value) {
1706   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should really be Shenandoah safepoint");
1707   _gc_state.set_cond(mask, value);
1708   set_gc_state_all_threads(_gc_state.raw_value());
1709 }
1710 
1711 void ShenandoahHeap::set_concurrent_mark_in_progress(bool in_progress) {
1712   assert(!has_forwarded_objects(), "Not expected before/after mark phase");
1713   set_gc_state_mask(MARKING, in_progress);
1714   ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);








































1715 }
1716 
1717 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
1718   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only call this at safepoint");
1719   set_gc_state_mask(EVACUATION, in_progress);
1720 }
1721 
1722 void ShenandoahHeap::set_concurrent_strong_root_in_progress(bool in_progress) {
1723   if (in_progress) {
1724     _concurrent_strong_root_in_progress.set();
1725   } else {
1726     _concurrent_strong_root_in_progress.unset();
1727   }
1728 }
1729 
1730 void ShenandoahHeap::set_concurrent_weak_root_in_progress(bool cond) {
1731   set_gc_state_mask(WEAK_ROOTS, cond);
1732 }
1733 
1734 GCTracer* ShenandoahHeap::tracer() {

1739   return _free_set->used();
1740 }
1741 
1742 bool ShenandoahHeap::try_cancel_gc() {
1743   while (true) {
1744     jbyte prev = _cancelled_gc.cmpxchg(CANCELLED, CANCELLABLE);
1745     if (prev == CANCELLABLE) return true;
1746     else if (prev == CANCELLED) return false;
1747     assert(ShenandoahSuspendibleWorkers, "should not get here when not using suspendible workers");
1748     assert(prev == NOT_CANCELLED, "must be NOT_CANCELLED");
1749     Thread* thread = Thread::current();
1750     if (thread->is_Java_thread()) {
1751       // We need to provide a safepoint here, otherwise we might
1752       // spin forever if a SP is pending.
1753       ThreadBlockInVM sp(JavaThread::cast(thread));
1754       SpinPause();
1755     }
1756   }
1757 }
1758 








1759 void ShenandoahHeap::cancel_gc(GCCause::Cause cause) {
1760   if (try_cancel_gc()) {
1761     FormatBuffer<> msg("Cancelling GC: %s", GCCause::to_string(cause));
1762     log_info(gc)("%s", msg.buffer());
1763     Events::log(Thread::current(), "%s", msg.buffer());




1764   }
1765 }
1766 
1767 uint ShenandoahHeap::max_workers() {
1768   return _max_workers;
1769 }
1770 
1771 void ShenandoahHeap::stop() {
1772   // The shutdown sequence should be able to terminate when GC is running.
1773 
1774   // Step 0. Notify policy to disable event recording.
1775   _shenandoah_policy->record_shutdown();
1776 
1777   // Step 1. Notify control thread that we are in shutdown.



1778   // Note that we cannot do that with stop(), because stop() is blocking and waits for the actual shutdown.
1779   // Doing stop() here would wait for the normal GC cycle to complete, never falling through to cancel below.
1780   control_thread()->prepare_for_graceful_shutdown();
1781 
1782   // Step 2. Notify GC workers that we are cancelling GC.
1783   cancel_gc(GCCause::_shenandoah_stop_vm);
1784 
1785   // Step 3. Wait until GC worker exits normally.
1786   control_thread()->stop();
1787 }
1788 
1789 void ShenandoahHeap::stw_unload_classes(bool full_gc) {
1790   if (!unload_classes()) return;
1791   // Unload classes and purge SystemDictionary.
1792   {
1793     ShenandoahPhaseTimings::Phase phase = full_gc ?
1794                                           ShenandoahPhaseTimings::full_gc_purge_class_unload :
1795                                           ShenandoahPhaseTimings::degen_gc_purge_class_unload;
1796     ShenandoahIsAliveSelector is_alive;
1797     CodeCache::UnloadingScope scope(is_alive.is_alive_closure());
1798     ShenandoahGCPhase gc_phase(phase);
1799     ShenandoahGCWorkerPhase worker_phase(phase);
1800     bool purged_class = SystemDictionary::do_unloading(gc_timer());
1801 
1802     uint num_workers = _workers->active_workers();
1803     ShenandoahClassUnloadingTask unlink_task(phase, num_workers, purged_class);
1804     _workers->run_task(&unlink_task);
1805   }

1859 }
1860 
1861 void ShenandoahHeap::set_has_forwarded_objects(bool cond) {
1862   set_gc_state_mask(HAS_FORWARDED, cond);
1863 }
1864 
1865 void ShenandoahHeap::set_unload_classes(bool uc) {
1866   _unload_classes.set_cond(uc);
1867 }
1868 
1869 bool ShenandoahHeap::unload_classes() const {
1870   return _unload_classes.is_set();
1871 }
1872 
1873 address ShenandoahHeap::in_cset_fast_test_addr() {
1874   ShenandoahHeap* heap = ShenandoahHeap::heap();
1875   assert(heap->collection_set() != nullptr, "Sanity");
1876   return (address) heap->collection_set()->biased_map_address();
1877 }
1878 
1879 address ShenandoahHeap::cancelled_gc_addr() {
1880   return (address) ShenandoahHeap::heap()->_cancelled_gc.addr_of();
1881 }
1882 
1883 address ShenandoahHeap::gc_state_addr() {
1884   return (address) ShenandoahHeap::heap()->_gc_state.addr_of();
1885 }
1886 
1887 size_t ShenandoahHeap::bytes_allocated_since_gc_start() {
1888   return Atomic::load(&_bytes_allocated_since_gc_start);
1889 }
1890 
1891 void ShenandoahHeap::reset_bytes_allocated_since_gc_start() {
1892   Atomic::store(&_bytes_allocated_since_gc_start, (size_t)0);





1893 }
1894 
1895 void ShenandoahHeap::set_degenerated_gc_in_progress(bool in_progress) {
1896   _degenerated_gc_in_progress.set_cond(in_progress);
1897 }
1898 
1899 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) {
1900   _full_gc_in_progress.set_cond(in_progress);
1901 }
1902 
1903 void ShenandoahHeap::set_full_gc_move_in_progress(bool in_progress) {
1904   assert (is_full_gc_in_progress(), "should be");
1905   _full_gc_move_in_progress.set_cond(in_progress);
1906 }
1907 
1908 void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) {
1909   set_gc_state_mask(UPDATEREFS, in_progress);
1910 }
1911 
1912 void ShenandoahHeap::register_nmethod(nmethod* nm) {

1936     if (r->is_active()) {
1937       if (r->is_pinned()) {
1938         if (r->pin_count() == 0) {
1939           r->make_unpinned();
1940         }
1941       } else {
1942         if (r->pin_count() > 0) {
1943           r->make_pinned();
1944         }
1945       }
1946     }
1947   }
1948 
1949   assert_pinned_region_status();
1950 }
1951 
1952 #ifdef ASSERT
1953 void ShenandoahHeap::assert_pinned_region_status() {
1954   for (size_t i = 0; i < num_regions(); i++) {
1955     ShenandoahHeapRegion* r = get_region(i);
1956     assert((r->is_pinned() && r->pin_count() > 0) || (!r->is_pinned() && r->pin_count() == 0),
1957            "Region " SIZE_FORMAT " pinning status is inconsistent", i);


1958   }
1959 }
1960 #endif
1961 
1962 ConcurrentGCTimer* ShenandoahHeap::gc_timer() const {
1963   return _gc_timer;
1964 }
1965 
1966 void ShenandoahHeap::prepare_concurrent_roots() {
1967   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
1968   assert(!is_stw_gc_in_progress(), "Only concurrent GC");
1969   set_concurrent_strong_root_in_progress(!collection_set()->is_empty());
1970   set_concurrent_weak_root_in_progress(true);
1971   if (unload_classes()) {
1972     _unloader.prepare();
1973   }
1974 }
1975 
1976 void ShenandoahHeap::finish_concurrent_roots() {
1977   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");

1997       assert(nworkers <= ConcGCThreads, "Cannot use more than it has");
1998     } else {
1999       // Use ConcGCThreads outside safepoints
2000       assert(nworkers == ConcGCThreads, "Use ConcGCThreads outside safepoints");
2001     }
2002   }
2003 }
2004 #endif
2005 
2006 ShenandoahVerifier* ShenandoahHeap::verifier() {
2007   guarantee(ShenandoahVerify, "Should be enabled");
2008   assert (_verifier != nullptr, "sanity");
2009   return _verifier;
2010 }
2011 
2012 template<bool CONCURRENT>
2013 class ShenandoahUpdateHeapRefsTask : public WorkerTask {
2014 private:
2015   ShenandoahHeap* _heap;
2016   ShenandoahRegionIterator* _regions;


2017 public:
2018   ShenandoahUpdateHeapRefsTask(ShenandoahRegionIterator* regions) :

2019     WorkerTask("Shenandoah Update References"),
2020     _heap(ShenandoahHeap::heap()),
2021     _regions(regions) {


2022   }
2023 
2024   void work(uint worker_id) {
2025     if (CONCURRENT) {
2026       ShenandoahConcurrentWorkerSession worker_session(worker_id);
2027       ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
2028       do_work<ShenandoahConcUpdateRefsClosure>();
2029     } else {
2030       ShenandoahParallelWorkerSession worker_session(worker_id);
2031       do_work<ShenandoahSTWUpdateRefsClosure>();
2032     }
2033   }
2034 
2035 private:
2036   template<class T>
2037   void do_work() {
2038     T cl;
2039     ShenandoahHeapRegion* r = _regions->next();
2040     ShenandoahMarkingContext* const ctx = _heap->complete_marking_context();



2041     while (r != nullptr) {
2042       HeapWord* update_watermark = r->get_update_watermark();
2043       assert (update_watermark >= r->bottom(), "sanity");



2044       if (r->is_active() && !r->is_cset()) {
2045         _heap->marked_object_oop_iterate(r, &cl, update_watermark);






























2046       }
2047       if (ShenandoahPacing) {
2048         _heap->pacer()->report_updaterefs(pointer_delta(update_watermark, r->bottom()));
2049       }
2050       if (_heap->check_cancelled_gc_and_yield(CONCURRENT)) {
2051         return;
2052       }
2053       r = _regions->next();
2054     }


















































































































2055   }
2056 };
2057 
2058 void ShenandoahHeap::update_heap_references(bool concurrent) {
2059   assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");


2060 
2061   if (concurrent) {
2062     ShenandoahUpdateHeapRefsTask<true> task(&_update_refs_iterator);
2063     workers()->run_task(&task);
2064   } else {
2065     ShenandoahUpdateHeapRefsTask<false> task(&_update_refs_iterator);
2066     workers()->run_task(&task);
2067   }



2068 }
2069 
2070 
2071 class ShenandoahFinalUpdateRefsUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
2072 private:

2073   ShenandoahHeapLock* const _lock;

2074 
2075 public:
2076   ShenandoahFinalUpdateRefsUpdateRegionStateClosure() : _lock(ShenandoahHeap::heap()->lock()) {}


2077 
2078   void heap_region_do(ShenandoahHeapRegion* r) {





















2079     // Drop unnecessary "pinned" state from regions that does not have CP marks
2080     // anymore, as this would allow trashing them.
2081 
2082     if (r->is_active()) {
2083       if (r->is_pinned()) {
2084         if (r->pin_count() == 0) {
2085           ShenandoahHeapLocker locker(_lock);
2086           r->make_unpinned();
2087         }
2088       } else {
2089         if (r->pin_count() > 0) {
2090           ShenandoahHeapLocker locker(_lock);
2091           r->make_pinned();
2092         }
2093       }
2094     }
2095   }
2096 
2097   bool is_thread_safe() { return true; }
2098 };
2099 
2100 void ShenandoahHeap::update_heap_region_states(bool concurrent) {
2101   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2102   assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
2103 
2104   {
2105     ShenandoahGCPhase phase(concurrent ?
2106                             ShenandoahPhaseTimings::final_update_refs_update_region_states :
2107                             ShenandoahPhaseTimings::degen_gc_final_update_refs_update_region_states);
2108     ShenandoahFinalUpdateRefsUpdateRegionStateClosure cl;
2109     parallel_heap_region_iterate(&cl);
2110 
2111     assert_pinned_region_status();
2112   }
2113 
2114   {
2115     ShenandoahGCPhase phase(concurrent ?
2116                             ShenandoahPhaseTimings::final_update_refs_trash_cset :
2117                             ShenandoahPhaseTimings::degen_gc_final_update_refs_trash_cset);
2118     trash_cset_regions();
2119   }
2120 }
2121 
2122 void ShenandoahHeap::rebuild_free_set(bool concurrent) {
2123   {
2124     ShenandoahGCPhase phase(concurrent ?
2125                             ShenandoahPhaseTimings::final_update_refs_rebuild_freeset :
2126                             ShenandoahPhaseTimings::degen_gc_final_update_refs_rebuild_freeset);
2127     ShenandoahHeapLocker locker(lock());
2128     _free_set->rebuild();

2222   EventMark em("%s", msg);
2223 
2224   op_uncommit(shrink_before, shrink_until);
2225 }
2226 
2227 void ShenandoahHeap::try_inject_alloc_failure() {
2228   if (ShenandoahAllocFailureALot && !cancelled_gc() && ((os::random() % 1000) > 950)) {
2229     _inject_alloc_failure.set();
2230     os::naked_short_sleep(1);
2231     if (cancelled_gc()) {
2232       log_info(gc)("Allocation failure was successfully injected");
2233     }
2234   }
2235 }
2236 
2237 bool ShenandoahHeap::should_inject_alloc_failure() {
2238   return _inject_alloc_failure.is_set() && _inject_alloc_failure.try_unset();
2239 }
2240 
2241 void ShenandoahHeap::initialize_serviceability() {
2242   _memory_pool = new ShenandoahMemoryPool(this);
2243   _cycle_memory_manager.add_pool(_memory_pool);
2244   _stw_memory_manager.add_pool(_memory_pool);









2245 }
2246 
2247 GrowableArray<GCMemoryManager*> ShenandoahHeap::memory_managers() {
2248   GrowableArray<GCMemoryManager*> memory_managers(2);
2249   memory_managers.append(&_cycle_memory_manager);
2250   memory_managers.append(&_stw_memory_manager);
2251   return memory_managers;
2252 }
2253 
2254 GrowableArray<MemoryPool*> ShenandoahHeap::memory_pools() {
2255   GrowableArray<MemoryPool*> memory_pools(1);
2256   memory_pools.append(_memory_pool);





2257   return memory_pools;
2258 }
2259 
2260 MemoryUsage ShenandoahHeap::memory_usage() {
2261   return _memory_pool->get_memory_usage();
2262 }
2263 
2264 ShenandoahRegionIterator::ShenandoahRegionIterator() :
2265   _heap(ShenandoahHeap::heap()),
2266   _index(0) {}
2267 
2268 ShenandoahRegionIterator::ShenandoahRegionIterator(ShenandoahHeap* heap) :
2269   _heap(heap),
2270   _index(0) {}
2271 
2272 void ShenandoahRegionIterator::reset() {
2273   _index = 0;
2274 }
2275 
2276 bool ShenandoahRegionIterator::has_next() const {
2277   return _index < _heap->num_regions();
2278 }
2279 
2280 char ShenandoahHeap::gc_state() const {
2281   return _gc_state.raw_value();
2282 }
2283 
2284 ShenandoahLiveData* ShenandoahHeap::get_liveness_cache(uint worker_id) {
2285 #ifdef ASSERT
2286   assert(_liveness_cache != nullptr, "sanity");
2287   assert(worker_id < _max_workers, "sanity");
2288   for (uint i = 0; i < num_regions(); i++) {
2289     assert(_liveness_cache[worker_id][i] == 0, "liveness cache should be empty");
2290   }
2291 #endif
2292   return _liveness_cache[worker_id];
2293 }
2294 
2295 void ShenandoahHeap::flush_liveness_cache(uint worker_id) {
2296   assert(worker_id < _max_workers, "sanity");
2297   assert(_liveness_cache != nullptr, "sanity");
2298   ShenandoahLiveData* ld = _liveness_cache[worker_id];

2299   for (uint i = 0; i < num_regions(); i++) {
2300     ShenandoahLiveData live = ld[i];
2301     if (live > 0) {
2302       ShenandoahHeapRegion* r = get_region(i);
2303       r->increase_live_data_gc_words(live);
2304       ld[i] = 0;
2305     }
2306   }
2307 }
2308 
2309 bool ShenandoahHeap::requires_barriers(stackChunkOop obj) const {
2310   if (is_idle()) return false;
2311 
2312   // Objects allocated after marking start are implicitly alive, don't need any barriers during
2313   // marking phase.
2314   if (is_concurrent_mark_in_progress() &&
2315      !marking_context()->allocated_after_mark_start(obj)) {
2316     return true;
2317   }
2318 
2319   // Can not guarantee obj is deeply good.
2320   if (has_forwarded_objects()) {
2321     return true;
2322   }
2323 
2324   return false;
2325 }
























































































































































































































  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "memory/allocation.hpp"
  28 #include "memory/universe.hpp"
  29 
  30 #include "gc/shared/gcArguments.hpp"
  31 #include "gc/shared/gcTimer.hpp"
  32 #include "gc/shared/gcTraceTime.inline.hpp"
  33 #include "gc/shared/locationPrinter.inline.hpp"
  34 #include "gc/shared/memAllocator.hpp"
  35 #include "gc/shared/plab.hpp"
  36 #include "gc/shared/tlab_globals.hpp"
  37 
  38 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  39 #include "gc/shenandoah/shenandoahCardTable.hpp"
  40 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
  41 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  42 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  43 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
  44 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  45 #include "gc/shenandoah/shenandoahControlThread.hpp"
  46 #include "gc/shenandoah/shenandoahRegulatorThread.hpp"
  47 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  48 #include "gc/shenandoah/shenandoahGlobalGeneration.hpp"
  49 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  50 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  51 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
  52 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  53 #include "gc/shenandoah/shenandoahInitLogger.hpp"
  54 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  55 #include "gc/shenandoah/shenandoahMemoryPool.hpp"
  56 #include "gc/shenandoah/shenandoahMetrics.hpp"
  57 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  58 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
  59 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  60 #include "gc/shenandoah/shenandoahPacer.inline.hpp"
  61 #include "gc/shenandoah/shenandoahPadding.hpp"
  62 #include "gc/shenandoah/shenandoahParallelCleaning.inline.hpp"
  63 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
  64 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
  65 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
  66 #include "gc/shenandoah/shenandoahStringDedup.hpp"
  67 #include "gc/shenandoah/shenandoahSTWMark.hpp"
  68 #include "gc/shenandoah/shenandoahUtils.hpp"
  69 #include "gc/shenandoah/shenandoahVerifier.hpp"
  70 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
  71 #include "gc/shenandoah/shenandoahVMOperations.hpp"
  72 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
  73 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  74 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
  75 #include "gc/shenandoah/mode/shenandoahGenerationalMode.hpp"
  76 #include "gc/shenandoah/mode/shenandoahIUMode.hpp"
  77 #include "gc/shenandoah/mode/shenandoahPassiveMode.hpp"
  78 #include "gc/shenandoah/mode/shenandoahSATBMode.hpp"
  79 
  80 #if INCLUDE_JFR
  81 #include "gc/shenandoah/shenandoahJfrSupport.hpp"
  82 #endif
  83 
  84 #include "gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp"
  85 
  86 #include "classfile/systemDictionary.hpp"
  87 #include "code/codeCache.hpp"
  88 #include "memory/classLoaderMetaspace.hpp"
  89 #include "memory/metaspaceUtils.hpp"
  90 #include "oops/compressedOops.inline.hpp"
  91 #include "prims/jvmtiTagMap.hpp"
  92 #include "runtime/atomic.hpp"
  93 #include "runtime/globals.hpp"
  94 #include "runtime/interfaceSupport.inline.hpp"
  95 #include "runtime/java.hpp"
  96 #include "runtime/orderAccess.hpp"
  97 #include "runtime/safepointMechanism.hpp"
  98 #include "runtime/vmThread.hpp"
  99 #include "services/mallocTracker.hpp"
 100 #include "services/memTracker.hpp"
 101 #include "utilities/events.hpp"
 102 #include "utilities/powerOfTwo.hpp"
 103 
 104 class ShenandoahPretouchHeapTask : public WorkerTask {
 105 private:

 153 jint ShenandoahHeap::initialize() {
 154   //
 155   // Figure out heap sizing
 156   //
 157 
 158   size_t init_byte_size = InitialHeapSize;
 159   size_t min_byte_size  = MinHeapSize;
 160   size_t max_byte_size  = MaxHeapSize;
 161   size_t heap_alignment = HeapAlignment;
 162 
 163   size_t reg_size_bytes = ShenandoahHeapRegion::region_size_bytes();
 164 
 165   Universe::check_alignment(max_byte_size,  reg_size_bytes, "Shenandoah heap");
 166   Universe::check_alignment(init_byte_size, reg_size_bytes, "Shenandoah heap");
 167 
 168   _num_regions = ShenandoahHeapRegion::region_count();
 169   assert(_num_regions == (max_byte_size / reg_size_bytes),
 170          "Regions should cover entire heap exactly: " SIZE_FORMAT " != " SIZE_FORMAT "/" SIZE_FORMAT,
 171          _num_regions, max_byte_size, reg_size_bytes);
 172 



 173   size_t num_committed_regions = init_byte_size / reg_size_bytes;
 174   num_committed_regions = MIN2(num_committed_regions, _num_regions);
 175   assert(num_committed_regions <= _num_regions, "sanity");
 176   _initial_size = num_committed_regions * reg_size_bytes;
 177 
 178   size_t num_min_regions = min_byte_size / reg_size_bytes;
 179   num_min_regions = MIN2(num_min_regions, _num_regions);
 180   assert(num_min_regions <= _num_regions, "sanity");
 181   _minimum_size = num_min_regions * reg_size_bytes;
 182 
 183   // Default to max heap size.
 184   _soft_max_size = _num_regions * reg_size_bytes;
 185 
 186   _committed = _initial_size;
 187 
 188   // Now we know the number of regions and heap sizes, initialize the heuristics.
 189   initialize_generations();
 190   initialize_heuristics();
 191 
 192   size_t heap_page_size   = UseLargePages ? os::large_page_size() : os::vm_page_size();
 193   size_t bitmap_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
 194   size_t region_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
 195 
 196   //
 197   // Reserve and commit memory for heap
 198   //
 199 
 200   ReservedHeapSpace heap_rs = Universe::reserve_heap(max_byte_size, heap_alignment);
 201   initialize_reserved_region(heap_rs);
 202   _heap_region = MemRegion((HeapWord*)heap_rs.base(), heap_rs.size() / HeapWordSize);
 203   _heap_region_special = heap_rs.special();
 204 
 205   assert((((size_t) base()) & ShenandoahHeapRegion::region_size_bytes_mask()) == 0,
 206          "Misaligned heap: " PTR_FORMAT, p2i(base()));
 207 
 208 #if SHENANDOAH_OPTIMIZED_MARKTASK
 209   // The optimized ShenandoahMarkTask takes some bits away from the full object bits.
 210   // Fail if we ever attempt to address more than we can.
 211   if ((uintptr_t)heap_rs.end() >= ShenandoahMarkTask::max_addressable()) {
 212     FormatBuffer<512> buf("Shenandoah reserved [" PTR_FORMAT ", " PTR_FORMAT") for the heap, \n"
 213                           "but max object address is " PTR_FORMAT ". Try to reduce heap size, or try other \n"
 214                           "VM options that allocate heap at lower addresses (HeapBaseMinAddress, AllocateHeapAt, etc).",
 215                 p2i(heap_rs.base()), p2i(heap_rs.end()), ShenandoahMarkTask::max_addressable());
 216     vm_exit_during_initialization("Fatal Error", buf);
 217   }
 218 #endif
 219 
 220   ReservedSpace sh_rs = heap_rs.first_part(max_byte_size);
 221   if (!_heap_region_special) {
 222     os::commit_memory_or_exit(sh_rs.base(), _initial_size, heap_alignment, false,
 223                               "Cannot commit heap memory");
 224   }
 225 
 226   BarrierSet::set_barrier_set(new ShenandoahBarrierSet(this, _heap_region));
 227 
 228   //
 229   // After reserving the Java heap, create the card table, barriers, and workers, in dependency order
 230   //
 231   if (mode()->is_generational()) {
 232     ShenandoahDirectCardMarkRememberedSet *rs;
 233     ShenandoahCardTable* card_table = ShenandoahBarrierSet::barrier_set()->card_table();
 234     size_t card_count = card_table->cards_required(heap_rs.size() / HeapWordSize);
 235     rs = new ShenandoahDirectCardMarkRememberedSet(ShenandoahBarrierSet::barrier_set()->card_table(), card_count);
 236     _card_scan = new ShenandoahScanRemembered<ShenandoahDirectCardMarkRememberedSet>(rs);
 237   }
 238 
 239   _workers = new ShenandoahWorkerThreads("Shenandoah GC Threads", _max_workers);
 240   if (_workers == nullptr) {
 241     vm_exit_during_initialization("Failed necessary allocation.");
 242   } else {
 243     _workers->initialize_workers();
 244   }
 245 
 246   if (ParallelGCThreads > 1) {
 247     _safepoint_workers = new ShenandoahWorkerThreads("Safepoint Cleanup Thread", ParallelGCThreads);
 248     _safepoint_workers->initialize_workers();
 249   }
 250 
 251   //
 252   // Reserve and commit memory for bitmap(s)
 253   //
 254 
 255   _bitmap_size = ShenandoahMarkBitMap::compute_size(heap_rs.size());
 256   _bitmap_size = align_up(_bitmap_size, bitmap_page_size);
 257 
 258   size_t bitmap_bytes_per_region = reg_size_bytes / ShenandoahMarkBitMap::heap_map_factor();
 259 
 260   guarantee(bitmap_bytes_per_region != 0,
 261             "Bitmap bytes per region should not be zero");
 262   guarantee(is_power_of_2(bitmap_bytes_per_region),
 263             "Bitmap bytes per region should be power of two: " SIZE_FORMAT, bitmap_bytes_per_region);
 264 
 265   if (bitmap_page_size > bitmap_bytes_per_region) {
 266     _bitmap_regions_per_slice = bitmap_page_size / bitmap_bytes_per_region;
 267     _bitmap_bytes_per_slice = bitmap_page_size;
 268   } else {
 269     _bitmap_regions_per_slice = 1;
 270     _bitmap_bytes_per_slice = bitmap_bytes_per_region;

 274             "Should have at least one region per slice: " SIZE_FORMAT,
 275             _bitmap_regions_per_slice);
 276 
 277   guarantee(((_bitmap_bytes_per_slice) % bitmap_page_size) == 0,
 278             "Bitmap slices should be page-granular: bps = " SIZE_FORMAT ", page size = " SIZE_FORMAT,
 279             _bitmap_bytes_per_slice, bitmap_page_size);
 280 
 281   ReservedSpace bitmap(_bitmap_size, bitmap_page_size);
 282   MemTracker::record_virtual_memory_type(bitmap.base(), mtGC);
 283   _bitmap_region = MemRegion((HeapWord*) bitmap.base(), bitmap.size() / HeapWordSize);
 284   _bitmap_region_special = bitmap.special();
 285 
 286   size_t bitmap_init_commit = _bitmap_bytes_per_slice *
 287                               align_up(num_committed_regions, _bitmap_regions_per_slice) / _bitmap_regions_per_slice;
 288   bitmap_init_commit = MIN2(_bitmap_size, bitmap_init_commit);
 289   if (!_bitmap_region_special) {
 290     os::commit_memory_or_exit((char *) _bitmap_region.start(), bitmap_init_commit, bitmap_page_size, false,
 291                               "Cannot commit bitmap memory");
 292   }
 293 
 294   _marking_context = new ShenandoahMarkingContext(_heap_region, _bitmap_region, _num_regions);
 295 
 296   if (ShenandoahVerify) {
 297     ReservedSpace verify_bitmap(_bitmap_size, bitmap_page_size);
 298     if (!verify_bitmap.special()) {
 299       os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), bitmap_page_size, false,
 300                                 "Cannot commit verification bitmap memory");
 301     }
 302     MemTracker::record_virtual_memory_type(verify_bitmap.base(), mtGC);
 303     MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize);
 304     _verification_bit_map.initialize(_heap_region, verify_bitmap_region);
 305     _verifier = new ShenandoahVerifier(this, &_verification_bit_map);
 306   }
 307 
 308   // Reserve aux bitmap for use in object_iterate(). We don't commit it here.
 309   ReservedSpace aux_bitmap(_bitmap_size, bitmap_page_size);
 310   MemTracker::record_virtual_memory_type(aux_bitmap.base(), mtGC);
 311   _aux_bitmap_region = MemRegion((HeapWord*) aux_bitmap.base(), aux_bitmap.size() / HeapWordSize);
 312   _aux_bitmap_region_special = aux_bitmap.special();
 313   _aux_bit_map.initialize(_heap_region, _aux_bitmap_region);
 314 

 337     uintptr_t max = (1u << 30u);
 338 
 339     for (uintptr_t addr = min; addr <= max; addr <<= 1u) {
 340       char* req_addr = (char*)addr;
 341       assert(is_aligned(req_addr, cset_align), "Should be aligned");
 342       ReservedSpace cset_rs(cset_size, cset_align, os::vm_page_size(), req_addr);
 343       if (cset_rs.is_reserved()) {
 344         assert(cset_rs.base() == req_addr, "Allocated where requested: " PTR_FORMAT ", " PTR_FORMAT, p2i(cset_rs.base()), addr);
 345         _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
 346         break;
 347       }
 348     }
 349 
 350     if (_collection_set == nullptr) {
 351       ReservedSpace cset_rs(cset_size, cset_align, os::vm_page_size());
 352       _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
 353     }
 354   }
 355 
 356   _regions = NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, _num_regions, mtGC);
 357   _affiliations = NEW_C_HEAP_ARRAY(uint8_t, _num_regions, mtGC);
 358   _free_set = new ShenandoahFreeSet(this, _num_regions);
 359 
 360   {
 361     ShenandoahHeapLocker locker(lock());
 362 
 363 
 364     for (size_t i = 0; i < _num_regions; i++) {
 365       HeapWord* start = (HeapWord*)sh_rs.base() + ShenandoahHeapRegion::region_size_words() * i;
 366       bool is_committed = i < num_committed_regions;
 367       void* loc = region_storage.base() + i * region_align;
 368 
 369       ShenandoahHeapRegion* r = new (loc) ShenandoahHeapRegion(start, i, is_committed);
 370       assert(is_aligned(r, SHENANDOAH_CACHE_LINE_SIZE), "Sanity");
 371 
 372       _marking_context->initialize_top_at_mark_start(r);
 373       _regions[i] = r;
 374       assert(!collection_set()->is_in(i), "New region should not be in collection set");
 375 
 376       _affiliations[i] = ShenandoahRegionAffiliation::FREE;
 377     }
 378 
 379     // Initialize to complete
 380     _marking_context->mark_complete();
 381 
 382     _free_set->rebuild();
 383   }
 384 
 385   if (AlwaysPreTouch) {
 386     // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads,
 387     // before initialize() below zeroes it with initializing thread. For any given region,
 388     // we touch the region and the corresponding bitmaps from the same thread.
 389     ShenandoahPushWorkerScope scope(workers(), _max_workers, false);
 390 
 391     _pretouch_heap_page_size = heap_page_size;
 392     _pretouch_bitmap_page_size = bitmap_page_size;
 393 
 394 #ifdef LINUX
 395     // UseTransparentHugePages would madvise that backing memory can be coalesced into huge
 396     // pages. But, the kernel needs to know that every small page is used, in order to coalesce

 424   // There should probably be Shenandoah-specific options for these,
 425   // just as there are G1-specific options.
 426   {
 427     ShenandoahSATBMarkQueueSet& satbqs = ShenandoahBarrierSet::satb_mark_queue_set();
 428     satbqs.set_process_completed_buffers_threshold(20); // G1SATBProcessCompletedThreshold
 429     satbqs.set_buffer_enqueue_threshold_percentage(60); // G1SATBBufferEnqueueingThresholdPercent
 430   }
 431 
 432   _monitoring_support = new ShenandoahMonitoringSupport(this);
 433   _phase_timings = new ShenandoahPhaseTimings(max_workers());
 434   ShenandoahCodeRoots::initialize();
 435 
 436   if (ShenandoahPacing) {
 437     _pacer = new ShenandoahPacer(this);
 438     _pacer->setup_for_idle();
 439   } else {
 440     _pacer = nullptr;
 441   }
 442 
 443   _control_thread = new ShenandoahControlThread();
 444   _regulator_thread = new ShenandoahRegulatorThread(_control_thread);
 445 
 446   ShenandoahInitLogger::print();
 447 
 448   return JNI_OK;
 449 }
 450 
 451 size_t ShenandoahHeap::max_size_for(ShenandoahGeneration* generation) const {
 452   switch (generation->generation_mode()) {
 453     case YOUNG:  return _generation_sizer.max_young_size();
 454     case OLD:    return max_capacity() - _generation_sizer.min_young_size();
 455     case GLOBAL: return max_capacity();
 456     default:
 457       ShouldNotReachHere();
 458       return 0;
 459   }
 460 }
 461 
 462 size_t ShenandoahHeap::min_size_for(ShenandoahGeneration* generation) const {
 463   switch (generation->generation_mode()) {
 464     case YOUNG:  return _generation_sizer.min_young_size();
 465     case OLD:    return max_capacity() - _generation_sizer.max_young_size();
 466     case GLOBAL: return min_capacity();
 467     default:
 468       ShouldNotReachHere();
 469       return 0;
 470   }
 471 }
 472 
 473 void ShenandoahHeap::initialize_generations() {
 474   // Max capacity is the maximum _allowed_ capacity. That is, the maximum allowed capacity
 475   // for old would be total heap - minimum capacity of young. This means the sum of the maximum
 476   // allowed for old and young could exceed the total heap size. It remains the case that the
 477   // _actual_ capacity of young + old = total.
 478   _generation_sizer.heap_size_changed(soft_max_capacity());
 479   size_t initial_capacity_young = _generation_sizer.max_young_size();
 480   size_t max_capacity_young = _generation_sizer.max_young_size();
 481   size_t initial_capacity_old = max_capacity() - max_capacity_young;
 482   size_t max_capacity_old = max_capacity() - initial_capacity_young;
 483 
 484   _young_generation = new ShenandoahYoungGeneration(_max_workers, max_capacity_young, initial_capacity_young);
 485   _old_generation = new ShenandoahOldGeneration(_max_workers, max_capacity_old, initial_capacity_old);
 486   _global_generation = new ShenandoahGlobalGeneration(_max_workers, soft_max_capacity(), soft_max_capacity());
 487 }
 488 
 489 void ShenandoahHeap::initialize_heuristics() {
 490   if (ShenandoahGCMode != nullptr) {
 491     if (strcmp(ShenandoahGCMode, "satb") == 0) {
 492       _gc_mode = new ShenandoahSATBMode();
 493     } else if (strcmp(ShenandoahGCMode, "iu") == 0) {
 494       _gc_mode = new ShenandoahIUMode();
 495     } else if (strcmp(ShenandoahGCMode, "passive") == 0) {
 496       _gc_mode = new ShenandoahPassiveMode();
 497     } else if (strcmp(ShenandoahGCMode, "generational") == 0) {
 498       _gc_mode = new ShenandoahGenerationalMode();
 499     } else {
 500       vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option");
 501     }
 502   } else {
 503     vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option (null)");
 504   }
 505   _gc_mode->initialize_flags();
 506   if (_gc_mode->is_diagnostic() && !UnlockDiagnosticVMOptions) {
 507     vm_exit_during_initialization(
 508             err_msg("GC mode \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
 509                     _gc_mode->name()));
 510   }
 511   if (_gc_mode->is_experimental() && !UnlockExperimentalVMOptions) {
 512     vm_exit_during_initialization(
 513             err_msg("GC mode \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
 514                     _gc_mode->name()));
 515   }

 516 
 517   _global_generation->initialize_heuristics(_gc_mode);
 518   if (mode()->is_generational()) {
 519     _young_generation->initialize_heuristics(_gc_mode);
 520     _old_generation->initialize_heuristics(_gc_mode);
 521 
 522     ShenandoahEvacWaste = ShenandoahGenerationalEvacWaste;








 523   }
 524 }
 525 
 526 #ifdef _MSC_VER
 527 #pragma warning( push )
 528 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
 529 #endif
 530 
 531 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
 532   CollectedHeap(),
 533   _gc_generation(nullptr),
 534   _prepare_for_old_mark(false),
 535   _initial_size(0),
 536   _used(0),
 537   _committed(0),
 538   _max_workers(MAX3(ConcGCThreads, ParallelGCThreads, 1U)),

 539   _workers(nullptr),
 540   _safepoint_workers(nullptr),
 541   _heap_region_special(false),
 542   _num_regions(0),
 543   _regions(nullptr),
 544   _affiliations(nullptr),
 545   _update_refs_iterator(this),
 546   _alloc_supplement_reserve(0),
 547   _promoted_reserve(0),
 548   _old_evac_reserve(0),
 549   _old_evac_expended(0),
 550   _young_evac_reserve(0),
 551   _captured_old_usage(0),
 552   _previous_promotion(0),
 553   _cancel_requested_time(0),
 554   _young_generation(nullptr),
 555   _global_generation(nullptr),
 556   _old_generation(nullptr),
 557   _control_thread(nullptr),
 558   _regulator_thread(nullptr),
 559   _shenandoah_policy(policy),


 560   _free_set(nullptr),
 561   _pacer(nullptr),
 562   _verifier(nullptr),
 563   _phase_timings(nullptr),
 564   _evac_tracker(new ShenandoahEvacuationTracker()),
 565   _mmu_tracker(),
 566   _generation_sizer(&_mmu_tracker),
 567   _monitoring_support(nullptr),
 568   _memory_pool(nullptr),
 569   _young_gen_memory_pool(nullptr),
 570   _old_gen_memory_pool(nullptr),
 571   _stw_memory_manager("Shenandoah Pauses", "end of GC pause"),
 572   _cycle_memory_manager("Shenandoah Cycles", "end of GC cycle"),
 573   _gc_timer(new ConcurrentGCTimer()),
 574   _soft_ref_policy(),
 575   _log_min_obj_alignment_in_bytes(LogMinObjAlignmentInBytes),

 576   _marking_context(nullptr),
 577   _bitmap_size(0),
 578   _bitmap_regions_per_slice(0),
 579   _bitmap_bytes_per_slice(0),
 580   _bitmap_region_special(false),
 581   _aux_bitmap_region_special(false),
 582   _liveness_cache(nullptr),
 583   _collection_set(nullptr),
 584   _card_scan(nullptr)
 585 {

















 586 }
 587 
 588 #ifdef _MSC_VER
 589 #pragma warning( pop )
 590 #endif
 591 





























 592 void ShenandoahHeap::print_on(outputStream* st) const {
 593   st->print_cr("Shenandoah Heap");
 594   st->print_cr(" " SIZE_FORMAT "%s max, " SIZE_FORMAT "%s soft max, " SIZE_FORMAT "%s committed, " SIZE_FORMAT "%s used",
 595                byte_size_in_proper_unit(max_capacity()), proper_unit_for_byte_size(max_capacity()),
 596                byte_size_in_proper_unit(soft_max_capacity()), proper_unit_for_byte_size(soft_max_capacity()),
 597                byte_size_in_proper_unit(committed()),    proper_unit_for_byte_size(committed()),
 598                byte_size_in_proper_unit(used()),         proper_unit_for_byte_size(used()));
 599   st->print_cr(" " SIZE_FORMAT " x " SIZE_FORMAT"%s regions",
 600                num_regions(),
 601                byte_size_in_proper_unit(ShenandoahHeapRegion::region_size_bytes()),
 602                proper_unit_for_byte_size(ShenandoahHeapRegion::region_size_bytes()));
 603 
 604   st->print("Status: ");
 605   if (has_forwarded_objects())                 st->print("has forwarded objects, ");
 606   if (is_concurrent_old_mark_in_progress())    st->print("old marking, ");
 607   if (is_concurrent_young_mark_in_progress())  st->print("young marking, ");
 608   if (is_evacuation_in_progress())             st->print("evacuating, ");
 609   if (is_update_refs_in_progress())            st->print("updating refs, ");
 610   if (is_degenerated_gc_in_progress())         st->print("degenerated gc, ");
 611   if (is_full_gc_in_progress())                st->print("full gc, ");
 612   if (is_full_gc_move_in_progress())           st->print("full gc move, ");
 613   if (is_concurrent_weak_root_in_progress())   st->print("concurrent weak roots, ");
 614   if (is_concurrent_strong_root_in_progress() &&
 615       !is_concurrent_weak_root_in_progress())  st->print("concurrent strong roots, ");
 616 
 617   if (cancelled_gc()) {
 618     st->print("cancelled");
 619   } else {
 620     st->print("not cancelled");
 621   }
 622   st->cr();
 623 
 624   st->print_cr("Reserved region:");
 625   st->print_cr(" - [" PTR_FORMAT ", " PTR_FORMAT ") ",
 626                p2i(reserved_region().start()),
 627                p2i(reserved_region().end()));

 637 
 638   st->cr();
 639   MetaspaceUtils::print_on(st);
 640 
 641   if (Verbose) {
 642     print_heap_regions_on(st);
 643   }
 644 }
 645 
 646 class ShenandoahInitWorkerGCLABClosure : public ThreadClosure {
 647 public:
 648   void do_thread(Thread* thread) {
 649     assert(thread != nullptr, "Sanity");
 650     assert(thread->is_Worker_thread(), "Only worker thread expected");
 651     ShenandoahThreadLocalData::initialize_gclab(thread);
 652   }
 653 };
 654 
 655 void ShenandoahHeap::post_initialize() {
 656   CollectedHeap::post_initialize();
 657   _mmu_tracker.initialize();
 658 
 659   MutexLocker ml(Threads_lock);
 660 
 661   ShenandoahInitWorkerGCLABClosure init_gclabs;
 662   _workers->threads_do(&init_gclabs);
 663 
 664   // gclab can not be initialized early during VM startup, as it can not determinate its max_size.
 665   // Now, we will let WorkerThreads to initialize gclab when new worker is created.
 666   _workers->set_initialize_gclab();
 667   if (_safepoint_workers != nullptr) {
 668     _safepoint_workers->threads_do(&init_gclabs);
 669     _safepoint_workers->set_initialize_gclab();
 670   }
 671 


 672   JFR_ONLY(ShenandoahJFRSupport::register_jfr_type_serializers());
 673 }
 674 
 675 
 676 ShenandoahOldHeuristics* ShenandoahHeap::old_heuristics() {
 677   return (ShenandoahOldHeuristics*) _old_generation->heuristics();
 678 }
 679 
 680 bool ShenandoahHeap::doing_mixed_evacuations() {
 681   return _old_generation->state() == ShenandoahOldGeneration::WAITING_FOR_EVAC;
 682 }
 683 
 684 bool ShenandoahHeap::is_old_bitmap_stable() const {
 685   ShenandoahOldGeneration::State state = _old_generation->state();
 686   return state != ShenandoahOldGeneration::MARKING
 687       && state != ShenandoahOldGeneration::BOOTSTRAPPING;
 688 }
 689 
 690 bool ShenandoahHeap::is_gc_generation_young() const {
 691   return _gc_generation != nullptr && _gc_generation->generation_mode() == YOUNG;
 692 }
 693 
 694 size_t ShenandoahHeap::used() const {
 695   return Atomic::load(&_used);
 696 }
 697 
 698 size_t ShenandoahHeap::committed() const {
 699   return Atomic::load(&_committed);
 700 }
 701 
 702 void ShenandoahHeap::increase_committed(size_t bytes) {
 703   shenandoah_assert_heaplocked_or_safepoint();
 704   _committed += bytes;
 705 }
 706 
 707 void ShenandoahHeap::decrease_committed(size_t bytes) {
 708   shenandoah_assert_heaplocked_or_safepoint();
 709   _committed -= bytes;
 710 }
 711 
 712 void ShenandoahHeap::increase_used(size_t bytes) {
 713   Atomic::add(&_used, bytes, memory_order_relaxed);
 714 }
 715 
 716 void ShenandoahHeap::set_used(size_t bytes) {
 717   Atomic::store(&_used, bytes);
 718 }
 719 
 720 void ShenandoahHeap::decrease_used(size_t bytes) {
 721   assert(used() >= bytes, "never decrease heap size by more than we've left");
 722   Atomic::sub(&_used, bytes, memory_order_relaxed);
 723 }
 724 




 725 void ShenandoahHeap::notify_mutator_alloc_words(size_t words, bool waste) {
 726   size_t bytes = words * HeapWordSize;
 727   if (!waste) {
 728     increase_used(bytes);
 729   }
 730 
 731   if (ShenandoahPacing) {
 732     control_thread()->pacing_notify_alloc(words);
 733     if (waste) {
 734       pacer()->claim_for_alloc(words, true);
 735     }
 736   }
 737 }
 738 
 739 size_t ShenandoahHeap::capacity() const {
 740   return committed();
 741 }
 742 
 743 size_t ShenandoahHeap::max_capacity() const {
 744   return _num_regions * ShenandoahHeapRegion::region_size_bytes();
 745 }
 746 
 747 size_t ShenandoahHeap::soft_max_capacity() const {
 748   size_t v = Atomic::load(&_soft_max_size);
 749   assert(min_capacity() <= v && v <= max_capacity(),
 750          "Should be in bounds: " SIZE_FORMAT " <= " SIZE_FORMAT " <= " SIZE_FORMAT,
 751          min_capacity(), v, max_capacity());
 752   return v;
 753 }
 754 
 755 void ShenandoahHeap::set_soft_max_capacity(size_t v) {
 756   assert(min_capacity() <= v && v <= max_capacity(),
 757          "Should be in bounds: " SIZE_FORMAT " <= " SIZE_FORMAT " <= " SIZE_FORMAT,
 758          min_capacity(), v, max_capacity());
 759   Atomic::store(&_soft_max_size, v);
 760 
 761   if (mode()->is_generational()) {
 762     _generation_sizer.heap_size_changed(_soft_max_size);
 763     size_t soft_max_capacity_young = _generation_sizer.max_young_size();
 764     size_t soft_max_capacity_old = _soft_max_size - soft_max_capacity_young;
 765     _young_generation->set_soft_max_capacity(soft_max_capacity_young);
 766     _old_generation->set_soft_max_capacity(soft_max_capacity_old);
 767   }
 768 }
 769 
 770 size_t ShenandoahHeap::min_capacity() const {
 771   return _minimum_size;
 772 }
 773 
 774 size_t ShenandoahHeap::initial_capacity() const {
 775   return _initial_size;
 776 }
 777 






 778 void ShenandoahHeap::op_uncommit(double shrink_before, size_t shrink_until) {
 779   assert (ShenandoahUncommit, "should be enabled");
 780 
 781   // Application allocates from the beginning of the heap, and GC allocates at
 782   // the end of it. It is more efficient to uncommit from the end, so that applications
 783   // could enjoy the near committed regions. GC allocations are much less frequent,
 784   // and therefore can accept the committing costs.
 785 
 786   size_t count = 0;
 787   for (size_t i = num_regions(); i > 0; i--) { // care about size_t underflow
 788     ShenandoahHeapRegion* r = get_region(i - 1);
 789     if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
 790       ShenandoahHeapLocker locker(lock());
 791       if (r->is_empty_committed()) {
 792         if (committed() < shrink_until + ShenandoahHeapRegion::region_size_bytes()) {
 793           break;
 794         }
 795 
 796         r->make_uncommitted();
 797         count++;
 798       }
 799     }
 800     SpinPause(); // allow allocators to take the lock
 801   }
 802 
 803   if (count > 0) {
 804     control_thread()->notify_heap_changed();
 805     regulator_thread()->notify_heap_changed();
 806   }
 807 }
 808 
 809 void ShenandoahHeap::handle_old_evacuation(HeapWord* obj, size_t words, bool promotion) {
 810   // Only register the copy of the object that won the evacuation race.
 811   card_scan()->register_object_wo_lock(obj);
 812 
 813   // Mark the entire range of the evacuated object as dirty.  At next remembered set scan,
 814   // we will clear dirty bits that do not hold interesting pointers.  It's more efficient to
 815   // do this in batch, in a background GC thread than to try to carefully dirty only cards
 816   // that hold interesting pointers right now.
 817   card_scan()->mark_range_as_dirty(obj, words);
 818 
 819   if (promotion) {
 820     // This evacuation was a promotion, track this as allocation against old gen
 821     old_generation()->increase_allocated(words * HeapWordSize);
 822   }
 823 }
 824 
 825 void ShenandoahHeap::handle_old_evacuation_failure() {
 826   if (_old_gen_oom_evac.try_set()) {
 827     log_info(gc)("Old gen evac failure.");
 828   }
 829 }
 830 
 831 void ShenandoahHeap::handle_promotion_failure() {
 832   old_heuristics()->handle_promotion_failure();
 833 }
 834 
 835 void ShenandoahHeap::report_promotion_failure(Thread* thread, size_t size) {
 836   // We squelch excessive reports to reduce noise in logs.  Squelch enforcement is not "perfect" because
 837   // this same code can be in-lined in multiple contexts, and each context will have its own copy of the static
 838   // last_report_epoch and this_epoch_report_count variables.
 839   const size_t MaxReportsPerEpoch = 4;
 840   static size_t last_report_epoch = 0;
 841   static size_t epoch_report_count = 0;
 842 
 843   size_t promotion_reserve;
 844   size_t promotion_expended;
 845 
 846   size_t gc_id = control_thread()->get_gc_id();
 847 
 848   if ((gc_id != last_report_epoch) || (epoch_report_count++ < MaxReportsPerEpoch)) {
 849     {
 850       // Promotion failures should be very rare.  Invest in providing useful diagnostic info.
 851       ShenandoahHeapLocker locker(lock());
 852       promotion_reserve = get_promoted_reserve();
 853       promotion_expended = get_promoted_expended();
 854     }
 855     PLAB* plab = ShenandoahThreadLocalData::plab(thread);
 856     size_t words_remaining = (plab == nullptr)? 0: plab->words_remaining();
 857     const char* promote_enabled = ShenandoahThreadLocalData::allow_plab_promotions(thread)? "enabled": "disabled";
 858 
 859     log_info(gc, ergo)("Promotion failed, size " SIZE_FORMAT ", has plab? %s, PLAB remaining: " SIZE_FORMAT
 860                        ", plab promotions %s, promotion reserve: " SIZE_FORMAT ", promotion expended: " SIZE_FORMAT,
 861                        size, plab == nullptr? "no": "yes",
 862                        words_remaining, promote_enabled, promotion_reserve, promotion_expended);
 863     if ((gc_id == last_report_epoch) && (epoch_report_count >= MaxReportsPerEpoch)) {
 864       log_info(gc, ergo)("Squelching additional promotion failure reports for current epoch");
 865     } else if (gc_id != last_report_epoch) {
 866       last_report_epoch = gc_id;;
 867       epoch_report_count = 1;
 868     }
 869   }
 870 }
 871 
 872 HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) {
 873   // New object should fit the GCLAB size
 874   size_t min_size = MAX2(size, PLAB::min_size());
 875 
 876   // Figure out size of new GCLAB, looking back at heuristics. Expand aggressively.
 877   size_t new_size = ShenandoahThreadLocalData::gclab_size(thread) * 2;
 878 
 879   // Limit growth of GCLABs to ShenandoahMaxEvacLABRatio * the minimum size.  This enables more equitable distribution of
 880   // available evacuation buidget between the many threads that are coordinating in the evacuation effort.
 881   if (ShenandoahMaxEvacLABRatio > 0) {
 882     log_debug(gc, free)("Allocate new gclab: " SIZE_FORMAT ", " SIZE_FORMAT, new_size, PLAB::min_size() * ShenandoahMaxEvacLABRatio);
 883     new_size = MIN2(new_size, PLAB::min_size() * ShenandoahMaxEvacLABRatio);
 884   }
 885 
 886   new_size = MIN2(new_size, PLAB::max_size());
 887   new_size = MAX2(new_size, PLAB::min_size());
 888 
 889   // Record new heuristic value even if we take any shortcut. This captures
 890   // the case when moderately-sized objects always take a shortcut. At some point,
 891   // heuristics should catch up with them.
 892   ShenandoahThreadLocalData::set_gclab_size(thread, new_size);
 893 
 894   if (new_size < size) {
 895     // New size still does not fit the object. Fall back to shared allocation.
 896     // This avoids retiring perfectly good GCLABs, when we encounter a large object.
 897     log_debug(gc, free)("New gclab size (" SIZE_FORMAT ") is too small for " SIZE_FORMAT, new_size, size);
 898     return nullptr;
 899   }
 900 
 901   // Retire current GCLAB, and allocate a new one.
 902   PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
 903   gclab->retire();
 904 
 905   size_t actual_size = 0;
 906   HeapWord* gclab_buf = allocate_new_gclab(min_size, new_size, &actual_size);
 907   if (gclab_buf == nullptr) {
 908     return nullptr;
 909   }
 910 
 911   assert (size <= actual_size, "allocation should fit");
 912 
 913   if (ZeroTLAB) {
 914     // ..and clear it.
 915     Copy::zero_to_words(gclab_buf, actual_size);
 916   } else {
 917     // ...and zap just allocated object.
 918 #ifdef ASSERT
 919     // Skip mangling the space corresponding to the object header to
 920     // ensure that the returned space is not considered parsable by
 921     // any concurrent GC thread.
 922     size_t hdr_size = oopDesc::header_size();
 923     Copy::fill_to_words(gclab_buf + hdr_size, actual_size - hdr_size, badHeapWordVal);
 924 #endif // ASSERT
 925   }
 926   gclab->set_buf(gclab_buf, actual_size);
 927   return gclab->allocate(size);
 928 }
 929 
 930 // Establish a new PLAB and allocate size HeapWords within it.
 931 HeapWord* ShenandoahHeap::allocate_from_plab_slow(Thread* thread, size_t size, bool is_promotion) {
 932   // New object should fit the PLAB size
 933   size_t min_size = MAX2(size, PLAB::min_size());
 934 
 935   // Figure out size of new PLAB, looking back at heuristics. Expand aggressively.
 936   size_t cur_size = ShenandoahThreadLocalData::plab_size(thread);
 937   if (cur_size == 0) {
 938     cur_size = PLAB::min_size();
 939   }
 940   size_t future_size = cur_size * 2;
 941   // Limit growth of PLABs to ShenandoahMaxEvacLABRatio * the minimum size.  This enables more equitable distribution of
 942   // available evacuation buidget between the many threads that are coordinating in the evacuation effort.
 943   if (ShenandoahMaxEvacLABRatio > 0) {
 944     future_size = MIN2(future_size, PLAB::min_size() * ShenandoahMaxEvacLABRatio);
 945   }
 946   future_size = MIN2(future_size, PLAB::max_size());
 947   future_size = MAX2(future_size, PLAB::min_size());
 948 
 949   size_t unalignment = future_size % CardTable::card_size_in_words();
 950   if (unalignment != 0) {
 951     future_size = future_size - unalignment + CardTable::card_size_in_words();
 952   }
 953 
 954   // Record new heuristic value even if we take any shortcut. This captures
 955   // the case when moderately-sized objects always take a shortcut. At some point,
 956   // heuristics should catch up with them.  Note that the requested cur_size may
 957   // not be honored, but we remember that this is the preferred size.
 958   ShenandoahThreadLocalData::set_plab_size(thread, future_size);
 959   if (cur_size < size) {
 960     // The PLAB to be allocated is still not large enough to hold the object. Fall back to shared allocation.
 961     // This avoids retiring perfectly good PLABs in order to represent a single large object allocation.
 962     return nullptr;
 963   }
 964 
 965   // Retire current PLAB, and allocate a new one.
 966   PLAB* plab = ShenandoahThreadLocalData::plab(thread);
 967   if (plab->words_remaining() < PLAB::min_size()) {
 968     // Retire current PLAB, and allocate a new one.
 969     // CAUTION: retire_plab may register the remnant filler object with the remembered set scanner without a lock.  This
 970     // is safe iff it is assured that each PLAB is a whole-number multiple of card-mark memory size and each PLAB is
 971     // aligned with the start of a card's memory range.
 972 
 973     retire_plab(plab, thread);
 974 
 975     size_t actual_size = 0;
 976     // allocate_new_plab resets plab_evacuated and plab_promoted and disables promotions if old-gen available is
 977     // less than the remaining evacuation need.  It also adjusts plab_preallocated and expend_promoted if appropriate.
 978     HeapWord* plab_buf = allocate_new_plab(min_size, cur_size, &actual_size);
 979     if (plab_buf == nullptr) {
 980       return nullptr;
 981     } else {
 982       ShenandoahThreadLocalData::enable_plab_retries(thread);
 983     }
 984     assert (size <= actual_size, "allocation should fit");
 985     if (ZeroTLAB) {
 986       // ..and clear it.
 987       Copy::zero_to_words(plab_buf, actual_size);
 988     } else {
 989       // ...and zap just allocated object.
 990 #ifdef ASSERT
 991       // Skip mangling the space corresponding to the object header to
 992       // ensure that the returned space is not considered parsable by
 993       // any concurrent GC thread.
 994       size_t hdr_size = oopDesc::header_size();
 995       Copy::fill_to_words(plab_buf + hdr_size, actual_size - hdr_size, badHeapWordVal);
 996 #endif // ASSERT
 997     }
 998     plab->set_buf(plab_buf, actual_size);
 999 
1000     if (is_promotion && !ShenandoahThreadLocalData::allow_plab_promotions(thread)) {
1001       return nullptr;
1002     }
1003     return plab->allocate(size);
1004   } else {
1005     // If there's still at least min_size() words available within the current plab, don't retire it.  Let's gnaw
1006     // away on this plab as long as we can.  Meanwhile, return nullptr to force this particular allocation request
1007     // to be satisfied with a shared allocation.  By packing more promotions into the previously allocated PLAB, we
1008     // reduce the likelihood of evacuation failures, and we we reduce the need for downsizing our PLABs.
1009     return nullptr;
1010   }
1011 }
1012 
1013 // TODO: It is probably most efficient to register all objects (both promotions and evacuations) that were allocated within
1014 // this plab at the time we retire the plab.  A tight registration loop will run within both code and data caches.  This change
1015 // would allow smaller and faster in-line implementation of alloc_from_plab().  Since plabs are aligned on card-table boundaries,
1016 // this object registration loop can be performed without acquiring a lock.
1017 void ShenandoahHeap::retire_plab(PLAB* plab, Thread* thread) {
1018   // We don't enforce limits on plab_evacuated.  We let it consume all available old-gen memory in order to reduce
1019   // probability of an evacuation failure.  We do enforce limits on promotion, to make sure that excessive promotion
1020   // does not result in an old-gen evacuation failure.  Note that a failed promotion is relatively harmless.  Any
1021   // object that fails to promote in the current cycle will be eligible for promotion in a subsequent cycle.
1022 
1023   // When the plab was instantiated, its entirety was treated as if the entire buffer was going to be dedicated to
1024   // promotions.  Now that we are retiring the buffer, we adjust for the reality that the plab is not entirely promotions.
1025   //  1. Some of the plab may have been dedicated to evacuations.
1026   //  2. Some of the plab may have been abandoned due to waste (at the end of the plab).
1027   size_t not_promoted =
1028     ShenandoahThreadLocalData::get_plab_preallocated_promoted(thread) - ShenandoahThreadLocalData::get_plab_promoted(thread);
1029   ShenandoahThreadLocalData::reset_plab_promoted(thread);
1030   ShenandoahThreadLocalData::reset_plab_evacuated(thread);
1031   ShenandoahThreadLocalData::set_plab_preallocated_promoted(thread, 0);
1032   if (not_promoted > 0) {
1033     unexpend_promoted(not_promoted);
1034   }
1035   size_t waste = plab->waste();
1036   HeapWord* top = plab->top();
1037   plab->retire();
1038   if (top != nullptr && plab->waste() > waste && is_in_old(top)) {
1039     // If retiring the plab created a filler object, then we
1040     // need to register it with our card scanner so it can
1041     // safely walk the region backing the plab.
1042     log_debug(gc)("retire_plab() is registering remnant of size " SIZE_FORMAT " at " PTR_FORMAT,
1043                   plab->waste() - waste, p2i(top));
1044     card_scan()->register_object_wo_lock(top);
1045   }
1046 }
1047 
1048 void ShenandoahHeap::retire_plab(PLAB* plab) {
1049   Thread* thread = Thread::current();
1050   retire_plab(plab, thread);
1051 }
1052 
1053 void ShenandoahHeap::cancel_old_gc() {
1054   shenandoah_assert_safepoint();
1055   assert(_old_generation != nullptr, "Should only have mixed collections in generation mode.");
1056   log_info(gc)("Terminating old gc cycle.");
1057 
1058   // Stop marking
1059   old_generation()->cancel_marking();
1060   // Stop coalescing undead objects
1061   set_prepare_for_old_mark_in_progress(false);
1062   // Stop tracking old regions
1063   old_heuristics()->abandon_collection_candidates();
1064   // Remove old generation access to young generation mark queues
1065   young_generation()->set_old_gen_task_queues(nullptr);
1066   // Transition to IDLE now.
1067   _old_generation->transition_to(ShenandoahOldGeneration::IDLE);
1068 }
1069 
1070 bool ShenandoahHeap::is_old_gc_active() {
1071   return _old_generation->state() != ShenandoahOldGeneration::IDLE;
1072 }
1073 
1074 void ShenandoahHeap::coalesce_and_fill_old_regions() {
1075   class ShenandoahGlobalCoalesceAndFill : public ShenandoahHeapRegionClosure {
1076    public:
1077     virtual void heap_region_do(ShenandoahHeapRegion* region) override {
1078       // old region is not in the collection set and was not immediately trashed
1079       if (region->is_old() && region->is_active() && !region->is_humongous()) {
1080         // Reset the coalesce and fill boundary because this is a global collect
1081         // and cannot be preempted by young collects. We want to be sure the entire
1082         // region is coalesced here and does not resume from a previously interrupted
1083         // or completed coalescing.
1084         region->begin_preemptible_coalesce_and_fill();
1085         region->oop_fill_and_coalesce();
1086       }
1087     }
1088 
1089     virtual bool is_thread_safe() override {
1090       return true;
1091     }
1092   };
1093   ShenandoahGlobalCoalesceAndFill coalesce;
1094   parallel_heap_region_iterate(&coalesce);
1095 }
1096 
1097 bool ShenandoahHeap::adjust_generation_sizes() {
1098   if (mode()->is_generational()) {
1099     return _generation_sizer.adjust_generation_sizes();
1100   }
1101   return false;
1102 }
1103 
1104 // Called from stubs in JIT code or interpreter
1105 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t min_size,
1106                                             size_t requested_size,
1107                                             size_t* actual_size) {
1108   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_tlab(min_size, requested_size);
1109   HeapWord* res = allocate_memory(req, false);
1110   if (res != nullptr) {
1111     *actual_size = req.actual_size();
1112   } else {
1113     *actual_size = 0;
1114   }
1115   return res;
1116 }
1117 
1118 HeapWord* ShenandoahHeap::allocate_new_gclab(size_t min_size,
1119                                              size_t word_size,
1120                                              size_t* actual_size) {
1121   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_gclab(min_size, word_size);
1122   HeapWord* res = allocate_memory(req, false);
1123   if (res != nullptr) {
1124     *actual_size = req.actual_size();
1125   } else {
1126     *actual_size = 0;
1127   }
1128   return res;
1129 }
1130 
1131 HeapWord* ShenandoahHeap::allocate_new_plab(size_t min_size,
1132                                             size_t word_size,
1133                                             size_t* actual_size) {
1134   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_plab(min_size, word_size);
1135   // Note that allocate_memory() sets a thread-local flag to prohibit further promotions by this thread
1136   // if we are at risk of exceeding the old-gen evacuation budget.
1137   HeapWord* res = allocate_memory(req, false);
1138   if (res != nullptr) {
1139     *actual_size = req.actual_size();
1140   } else {
1141     *actual_size = 0;
1142   }
1143   return res;
1144 }
1145 
1146 // is_promotion is true iff this allocation is known for sure to hold the result of young-gen evacuation
1147 // to old-gen.  plab allocates are not known as such, since they may hold old-gen evacuations.
1148 HeapWord* ShenandoahHeap::allocate_memory(ShenandoahAllocRequest& req, bool is_promotion) {
1149   intptr_t pacer_epoch = 0;
1150   bool in_new_region = false;
1151   HeapWord* result = nullptr;
1152 
1153   if (req.is_mutator_alloc()) {
1154     if (ShenandoahPacing) {
1155       pacer()->pace_for_alloc(req.size());
1156       pacer_epoch = pacer()->epoch();
1157     }
1158 
1159     if (!ShenandoahAllocFailureALot || !should_inject_alloc_failure()) {
1160       result = allocate_memory_under_lock(req, in_new_region, is_promotion);
1161     }
1162 
1163     // Allocation failed, block until control thread reacted, then retry allocation.
1164     //
1165     // It might happen that one of the threads requesting allocation would unblock
1166     // way later after GC happened, only to fail the second allocation, because
1167     // other threads have already depleted the free storage. In this case, a better
1168     // strategy is to try again, as long as GC makes progress.
1169     //
1170     // Then, we need to make sure the allocation was retried after at least one
1171     // Full GC, which means we want to try more than ShenandoahFullGCThreshold times.

1172     size_t tries = 0;

1173     while (result == nullptr && _progress_last_gc.is_set()) {
1174       tries++;
1175       control_thread()->handle_alloc_failure(req);
1176       result = allocate_memory_under_lock(req, in_new_region, is_promotion);
1177     }

1178     while (result == nullptr && tries <= ShenandoahFullGCThreshold) {
1179       tries++;
1180       control_thread()->handle_alloc_failure(req);
1181       result = allocate_memory_under_lock(req, in_new_region, is_promotion);
1182     }

1183   } else {
1184     assert(req.is_gc_alloc(), "Can only accept GC allocs here");
1185     result = allocate_memory_under_lock(req, in_new_region, is_promotion);
1186     // Do not call handle_alloc_failure() here, because we cannot block.
1187     // The allocation failure would be handled by the LRB slowpath with handle_alloc_failure_evac().
1188   }
1189 
1190   if (in_new_region) {
1191     control_thread()->notify_heap_changed();
1192     regulator_thread()->notify_heap_changed();
1193   }
1194 
1195   if (result != nullptr) {
1196     ShenandoahGeneration* alloc_generation = generation_for(req.affiliation());
1197     size_t requested = req.size();
1198     size_t actual = req.actual_size();
1199     size_t actual_bytes = actual * HeapWordSize;
1200 
1201     assert (req.is_lab_alloc() || (requested == actual),
1202             "Only LAB allocations are elastic: %s, requested = " SIZE_FORMAT ", actual = " SIZE_FORMAT,
1203             ShenandoahAllocRequest::alloc_type_to_string(req.type()), requested, actual);
1204 
1205     if (req.is_mutator_alloc()) {
1206       notify_mutator_alloc_words(actual, false);
1207       alloc_generation->increase_allocated(actual_bytes);
1208 
1209       // If we requested more than we were granted, give the rest back to pacer.
1210       // This only matters if we are in the same pacing epoch: do not try to unpace
1211       // over the budget for the other phase.
1212       if (ShenandoahPacing && (pacer_epoch > 0) && (requested > actual)) {
1213         pacer()->unpace_for_alloc(pacer_epoch, requested - actual);
1214       }
1215     } else {
1216       increase_used(actual_bytes);
1217     }
1218   }
1219 
1220   return result;
1221 }
1222 
1223 HeapWord* ShenandoahHeap::allocate_memory_under_lock(ShenandoahAllocRequest& req, bool& in_new_region, bool is_promotion) {
1224   bool try_smaller_lab_size = false;
1225   size_t smaller_lab_size;
1226   {
1227     // promotion_eligible pertains only to PLAB allocations, denoting that the PLAB is allowed to allocate for promotions.
1228     bool promotion_eligible = false;
1229     bool allow_allocation = true;
1230     bool plab_alloc = false;
1231     size_t requested_bytes = req.size() * HeapWordSize;
1232     HeapWord* result = nullptr;
1233     ShenandoahHeapLocker locker(lock());
1234     Thread* thread = Thread::current();
1235 
1236     if (mode()->is_generational()) {
1237       if (req.affiliation() == YOUNG_GENERATION) {
1238         if (req.is_mutator_alloc()) {
1239           size_t young_available = young_generation()->adjusted_available();
1240           if (requested_bytes > young_available) {
1241             // We know this is not a GCLAB.  This must be a TLAB or a shared allocation.
1242             if (req.is_lab_alloc() && (young_available >= req.min_size())) {
1243               try_smaller_lab_size = true;
1244               smaller_lab_size = young_available / HeapWordSize;
1245             } else {
1246               // Can't allocate because even min_size() is larger than remaining young_available
1247               log_info(gc, ergo)("Unable to shrink %s alloc request of minimum size: " SIZE_FORMAT
1248                                  ", young available: " SIZE_FORMAT,
1249                                  req.is_lab_alloc()? "TLAB": "shared",
1250                                  HeapWordSize * (req.is_lab_alloc()? req.min_size(): req.size()), young_available);
1251               return nullptr;
1252             }
1253           }
1254         }
1255       } else {                    // reg.affiliation() == OLD_GENERATION
1256         assert(req.type() != ShenandoahAllocRequest::_alloc_gclab, "GCLAB pertains only to young-gen memory");
1257         if (req.type() ==  ShenandoahAllocRequest::_alloc_plab) {
1258           plab_alloc = true;
1259           size_t promotion_avail = get_promoted_reserve();
1260           size_t promotion_expended = get_promoted_expended();
1261           if (promotion_expended + requested_bytes > promotion_avail) {
1262             promotion_avail = 0;
1263             if (get_old_evac_reserve() == 0) {
1264               // There are no old-gen evacuations in this pass.  There's no value in creating a plab that cannot
1265               // be used for promotions.
1266               allow_allocation = false;
1267             }
1268           } else {
1269             promotion_avail = promotion_avail - (promotion_expended + requested_bytes);
1270             promotion_eligible = true;
1271           }
1272         } else if (is_promotion) {
1273           // This is a shared alloc for promotion
1274           size_t promotion_avail = get_promoted_reserve();
1275           size_t promotion_expended = get_promoted_expended();
1276           if (promotion_expended + requested_bytes > promotion_avail) {
1277             promotion_avail = 0;
1278           } else {
1279             promotion_avail = promotion_avail - (promotion_expended + requested_bytes);
1280           }
1281           if (promotion_avail == 0) {
1282             // We need to reserve the remaining memory for evacuation.  Reject this allocation.  The object will be
1283             // evacuated to young-gen memory and promoted during a future GC pass.
1284             return nullptr;
1285           }
1286           // Else, we'll allow the allocation to proceed.  (Since we hold heap lock, the tested condition remains true.)
1287         } else {
1288           // This is a shared allocation for evacuation.  Memory has already been reserved for this purpose.
1289         }
1290       }
1291     } // This ends the is_generational() block
1292 
1293     if (!try_smaller_lab_size) {
1294       result = (allow_allocation)? _free_set->allocate(req, in_new_region): nullptr;
1295       if (result != nullptr) {
1296         if (req.affiliation() == ShenandoahRegionAffiliation::OLD_GENERATION) {
1297           ShenandoahThreadLocalData::reset_plab_promoted(thread);
1298           if (req.is_gc_alloc()) {
1299             if (req.type() ==  ShenandoahAllocRequest::_alloc_plab) {
1300               if (promotion_eligible) {
1301                 size_t actual_size = req.actual_size() * HeapWordSize;
1302                 // Assume the entirety of this PLAB will be used for promotion.  This prevents promotion from overreach.
1303                 // When we retire this plab, we'll unexpend what we don't really use.
1304                 ShenandoahThreadLocalData::enable_plab_promotions(thread);
1305                 expend_promoted(actual_size);
1306                 assert(get_promoted_expended() <= get_promoted_reserve(), "Do not expend more promotion than budgeted");
1307                 ShenandoahThreadLocalData::set_plab_preallocated_promoted(thread, actual_size);
1308               } else {
1309                 // Disable promotions in this thread because entirety of this PLAB must be available to hold old-gen evacuations.
1310                 ShenandoahThreadLocalData::disable_plab_promotions(thread);
1311                 ShenandoahThreadLocalData::set_plab_preallocated_promoted(thread, 0);
1312               }
1313             } else if (is_promotion) {
1314               // Shared promotion.  Assume size is requested_bytes.
1315               expend_promoted(requested_bytes);
1316               assert(get_promoted_expended() <= get_promoted_reserve(), "Do not expend more promotion than budgeted");
1317             }
1318           }
1319 
1320           // Register the newly allocated object while we're holding the global lock since there's no synchronization
1321           // built in to the implementation of register_object().  There are potential races when multiple independent
1322           // threads are allocating objects, some of which might span the same card region.  For example, consider
1323           // a card table's memory region within which three objects are being allocated by three different threads:
1324           //
1325           // objects being "concurrently" allocated:
1326           //    [-----a------][-----b-----][--------------c------------------]
1327           //            [---- card table memory range --------------]
1328           //
1329           // Before any objects are allocated, this card's memory range holds no objects.  Note that allocation of object a
1330           //   wants to set the has-object, first-start, and last-start attributes of the preceding card region.
1331           //   allocation of object b wants to set the has-object, first-start, and last-start attributes of this card region.
1332           //   allocation of object c also wants to set the has-object, first-start, and last-start attributes of this card region.
1333           //
1334           // The thread allocating b and the thread allocating c can "race" in various ways, resulting in confusion, such as
1335           // last-start representing object b while first-start represents object c.  This is why we need to require all
1336           // register_object() invocations to be "mutually exclusive" with respect to each card's memory range.
1337           ShenandoahHeap::heap()->card_scan()->register_object(result);
1338         }
1339       } else {
1340         // The allocation failed.  If this was a plab allocation, We've already retired it and no longer have a plab.
1341         if ((req.affiliation() == ShenandoahRegionAffiliation::OLD_GENERATION) && req.is_gc_alloc() &&
1342             (req.type() == ShenandoahAllocRequest::_alloc_plab)) {
1343           // We don't need to disable PLAB promotions because there is no PLAB.  We leave promotions enabled because
1344           // this allows the surrounding infrastructure to retry alloc_plab_slow() with a smaller PLAB size.
1345           ShenandoahThreadLocalData::set_plab_preallocated_promoted(thread, 0);
1346         }
1347       }
1348       return result;
1349     }
1350     // else, try_smaller_lab_size is true so we fall through and recurse with a smaller lab size
1351   } // This closes the block that holds the heap lock.  This releases the lock.
1352 
1353   // We arrive here if the tlab allocation request can be resized to fit within young_available
1354   assert((req.affiliation() == YOUNG_GENERATION) && req.is_lab_alloc() && req.is_mutator_alloc() &&
1355          (smaller_lab_size < req.size()), "Only shrink allocation request size for TLAB allocations");
1356 
1357   // By convention, ShenandoahAllocationRequest is primarily read-only.  The only mutable instance data is represented by
1358   // actual_size(), which is overwritten with the size of the allocaion when the allocation request is satisfied.  We use a
1359   // recursive call here rather than introducing new methods to mutate the existing ShenandoahAllocationRequest argument.
1360   // Mutation of the existing object might result in astonishing results if calling contexts assume the content of immutable
1361   // fields remain constant.  The original TLAB allocation request was for memory that exceeded the current capacity.  We'll
1362   // attempt to allocate a smaller TLAB.  If this is successful, we'll update actual_size() of our incoming
1363   // ShenandoahAllocRequest.  If the recursive request fails, we'll simply return nullptr.
1364 
1365   // Note that we've relinquished the HeapLock and some other thread may perform additional allocation before our recursive
1366   // call reacquires the lock.  If that happens, we will need another recursive call to further reduce the size of our request
1367   // for each time another thread allocates young memory during the brief intervals that the heap lock is available to
1368   // interfering threads.  We expect this interference to be rare.  The recursion bottoms out when young_available is
1369   // smaller than req.min_size().  The inner-nested call to allocate_memory_under_lock() uses the same min_size() value
1370   // as this call, but it uses a preferred size() that is smaller than our preferred size, and is no larger than what we most
1371   // recently saw as the memory currently available within the young generation.
1372 
1373   // TODO: At the expense of code clarity, we could rewrite this recursive solution to use iteration.  We need at most one
1374   // extra instance of the ShenandoahAllocRequest, which we can re-initialize multiple times inside a loop, with one iteration
1375   // of the loop required for each time the existing solution would recurse.  An iterative solution would be more efficient
1376   // in CPU time and stack memory utilization.  The expectation is that it is very rare that we would recurse more than once
1377   // so making this change is not currently seen as a high priority.
1378 
1379   ShenandoahAllocRequest smaller_req = ShenandoahAllocRequest::for_tlab(req.min_size(), smaller_lab_size);
1380 
1381   // Note that shrinking the preferred size gets us past the gatekeeper that checks whether there's available memory to
1382   // satisfy the allocation request.  The reality is the actual TLAB size is likely to be even smaller, because it will
1383   // depend on how much memory is available within mutator regions that are not yet fully used.
1384   HeapWord* result = allocate_memory_under_lock(smaller_req, in_new_region, is_promotion);
1385   if (result != nullptr) {
1386     req.set_actual_size(smaller_req.actual_size());
1387   }
1388   return result;
1389 }
1390 
1391 HeapWord* ShenandoahHeap::mem_allocate(size_t size,
1392                                         bool*  gc_overhead_limit_was_exceeded) {
1393   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared(size);
1394   return allocate_memory(req, false);
1395 }
1396 
1397 MetaWord* ShenandoahHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
1398                                                              size_t size,
1399                                                              Metaspace::MetadataType mdtype) {
1400   MetaWord* result;
1401 
1402   // Inform metaspace OOM to GC heuristics if class unloading is possible.
1403   ShenandoahHeuristics* h = global_generation()->heuristics();
1404   if (h->can_unload_classes()) {
1405     h->record_metaspace_oom();
1406   }
1407 
1408   // Expand and retry allocation
1409   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
1410   if (result != nullptr) {
1411     return result;
1412   }
1413 
1414   // Start full GC
1415   collect(GCCause::_metadata_GC_clear_soft_refs);
1416 
1417   // Retry allocation
1418   result = loader_data->metaspace_non_null()->allocate(size, mdtype);
1419   if (result != nullptr) {
1420     return result;
1421   }
1422 
1423   // Expand and retry allocation
1424   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);

1463 
1464   void work(uint worker_id) {
1465     if (_concurrent) {
1466       ShenandoahConcurrentWorkerSession worker_session(worker_id);
1467       ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
1468       ShenandoahEvacOOMScope oom_evac_scope;
1469       do_work();
1470     } else {
1471       ShenandoahParallelWorkerSession worker_session(worker_id);
1472       ShenandoahEvacOOMScope oom_evac_scope;
1473       do_work();
1474     }
1475   }
1476 
1477 private:
1478   void do_work() {
1479     ShenandoahConcurrentEvacuateRegionObjectClosure cl(_sh);
1480     ShenandoahHeapRegion* r;
1481     while ((r =_cs->claim_next()) != nullptr) {
1482       assert(r->has_live(), "Region " SIZE_FORMAT " should have been reclaimed early", r->index());
1483 
1484       _sh->marked_object_iterate(r, &cl);
1485 
1486       if (ShenandoahPacing) {
1487         _sh->pacer()->report_evac(r->used() >> LogHeapWordSize);
1488       }
1489       if (_sh->check_cancelled_gc_and_yield(_concurrent)) {
1490         break;
1491       }
1492     }
1493   }
1494 };
1495 
1496 // Unlike ShenandoahEvacuationTask, this iterates over all regions rather than just the collection set.
1497 // This is needed in order to promote humongous start regions if age() >= tenure threshold.
1498 class ShenandoahGenerationalEvacuationTask : public WorkerTask {
1499 private:
1500   ShenandoahHeap* const _sh;
1501   ShenandoahRegionIterator *_regions;
1502   bool _concurrent;
1503 public:
1504   ShenandoahGenerationalEvacuationTask(ShenandoahHeap* sh,
1505                                        ShenandoahRegionIterator* iterator,
1506                                        bool concurrent) :
1507     WorkerTask("Shenandoah Evacuation"),
1508     _sh(sh),
1509     _regions(iterator),
1510     _concurrent(concurrent)
1511   {}
1512 
1513   void work(uint worker_id) {
1514     if (_concurrent) {
1515       ShenandoahConcurrentWorkerSession worker_session(worker_id);
1516       ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
1517       ShenandoahEvacOOMScope oom_evac_scope;
1518       do_work();
1519     } else {
1520       ShenandoahParallelWorkerSession worker_session(worker_id);
1521       ShenandoahEvacOOMScope oom_evac_scope;
1522       do_work();
1523     }
1524   }
1525 
1526 private:
1527   void do_work() {
1528     ShenandoahConcurrentEvacuateRegionObjectClosure cl(_sh);
1529     ShenandoahHeapRegion* r;
1530     while ((r = _regions->next()) != nullptr) {
1531       log_debug(gc)("GenerationalEvacuationTask do_work(), looking at %s region " SIZE_FORMAT ", (age: %d) [%s, %s]",
1532                     r->is_old()? "old": r->is_young()? "young": "free", r->index(), r->age(),
1533                     r->is_active()? "active": "inactive",
1534                     r->is_humongous()? (r->is_humongous_start()? "humongous_start": "humongous_continuation"): "regular");
1535       if (r->is_cset()) {
1536         assert(r->has_live(), "Region " SIZE_FORMAT " should have been reclaimed early", r->index());
1537         _sh->marked_object_iterate(r, &cl);
1538         if (ShenandoahPacing) {
1539           _sh->pacer()->report_evac(r->used() >> LogHeapWordSize);
1540         }
1541       } else if (r->is_young() && r->is_active() && r->is_humongous_start() && (r->age() > InitialTenuringThreshold)) {
1542         // We promote humongous_start regions along with their affiliated continuations during evacuation rather than
1543         // doing this work during a safepoint.  We cannot put humongous regions into the collection set because that
1544         // triggers the load-reference barrier (LRB) to copy on reference fetch.
1545         if (r->promote_humongous() == 0) {
1546           // We chose not to promote because old-gen is out of memory.  Report and handle the promotion failure because
1547           // this suggests need for expanding old-gen and/or performing collection of old-gen.
1548           ShenandoahHeap* heap = ShenandoahHeap::heap();
1549           oop obj = cast_to_oop(r->bottom());
1550           size_t size = obj->size();
1551           Thread* thread = Thread::current();
1552           heap->report_promotion_failure(thread, size);
1553           heap->handle_promotion_failure();
1554         }
1555       }
1556       // else, region is free, or OLD, or not in collection set, or humongous_continuation,
1557       // or is young humongous_start that is too young to be promoted
1558 
1559       if (_sh->check_cancelled_gc_and_yield(_concurrent)) {
1560         break;
1561       }
1562     }
1563   }
1564 };
1565 
1566 void ShenandoahHeap::evacuate_collection_set(bool concurrent) {
1567   if (ShenandoahHeap::heap()->mode()->is_generational()) {
1568     ShenandoahRegionIterator regions;
1569     ShenandoahGenerationalEvacuationTask task(this, &regions, concurrent);
1570     workers()->run_task(&task);
1571   } else {
1572     ShenandoahEvacuationTask task(this, _collection_set, concurrent);
1573     workers()->run_task(&task);
1574   }
1575 }
1576 
1577 void ShenandoahHeap::trash_cset_regions() {
1578   ShenandoahHeapLocker locker(lock());
1579 
1580   ShenandoahCollectionSet* set = collection_set();
1581   ShenandoahHeapRegion* r;
1582   set->clear_current_index();
1583   while ((r = set->next()) != nullptr) {
1584     r->make_trash();
1585   }
1586   collection_set()->clear();
1587 }
1588 
1589 void ShenandoahHeap::print_heap_regions_on(outputStream* st) const {
1590   st->print_cr("Heap Regions:");
1591   st->print_cr("EU=empty-uncommitted, EC=empty-committed, R=regular, H=humongous start, HC=humongous continuation, CS=collection set, T=trash, P=pinned");
1592   st->print_cr("BTE=bottom/top/end, U=used, T=TLAB allocs, G=GCLAB allocs, S=shared allocs, L=live data");
1593   st->print_cr("R=root, CP=critical pins, TAMS=top-at-mark-start, UWM=update watermark");
1594   st->print_cr("SN=alloc sequence number");
1595 
1596   for (size_t i = 0; i < num_regions(); i++) {
1597     get_region(i)->print_on(st);
1598   }
1599 }
1600 
1601 size_t ShenandoahHeap::trash_humongous_region_at(ShenandoahHeapRegion* start) {
1602   assert(start->is_humongous_start(), "reclaim regions starting with the first one");
1603 
1604   oop humongous_obj = cast_to_oop(start->bottom());
1605   size_t size = humongous_obj->size();
1606   size_t required_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize);
1607   size_t index = start->index() + required_regions - 1;
1608 
1609   assert(!start->has_live(), "liveness must be zero");
1610 
1611   for(size_t i = 0; i < required_regions; i++) {
1612     // Reclaim from tail. Otherwise, assertion fails when printing region to trace log,
1613     // as it expects that every region belongs to a humongous region starting with a humongous start region.
1614     ShenandoahHeapRegion* region = get_region(index --);
1615 
1616     assert(region->is_humongous(), "expect correct humongous start or continuation");
1617     assert(!region->is_cset(), "Humongous region should not be in collection set");
1618 
1619     region->make_trash_immediate();
1620   }
1621   return required_regions;
1622 }
1623 
1624 class ShenandoahCheckCleanGCLABClosure : public ThreadClosure {
1625 public:
1626   ShenandoahCheckCleanGCLABClosure() {}
1627   void do_thread(Thread* thread) {
1628     PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1629     assert(gclab != nullptr, "GCLAB should be initialized for %s", thread->name());
1630     assert(gclab->words_remaining() == 0, "GCLAB should not need retirement");
1631 
1632     PLAB* plab = ShenandoahThreadLocalData::plab(thread);
1633     assert(plab != nullptr, "PLAB should be initialized for %s", thread->name());
1634     assert(plab->words_remaining() == 0, "PLAB should not need retirement");
1635   }
1636 };
1637 
1638 class ShenandoahRetireGCLABClosure : public ThreadClosure {
1639 private:
1640   bool const _resize;
1641 public:
1642   ShenandoahRetireGCLABClosure(bool resize) : _resize(resize) {}
1643   void do_thread(Thread* thread) {
1644     PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1645     assert(gclab != nullptr, "GCLAB should be initialized for %s", thread->name());
1646     gclab->retire();
1647     if (_resize && ShenandoahThreadLocalData::gclab_size(thread) > 0) {
1648       ShenandoahThreadLocalData::set_gclab_size(thread, 0);
1649     }
1650 
1651     PLAB* plab = ShenandoahThreadLocalData::plab(thread);
1652     assert(plab != nullptr, "PLAB should be initialized for %s", thread->name());
1653 
1654     // There are two reasons to retire all plabs between old-gen evacuation passes.
1655     //  1. We need to make the plab memory parseable by remembered-set scanning.
1656     //  2. We need to establish a trustworthy UpdateWaterMark value within each old-gen heap region
1657     ShenandoahHeap::heap()->retire_plab(plab, thread);
1658     if (_resize && ShenandoahThreadLocalData::plab_size(thread) > 0) {
1659       ShenandoahThreadLocalData::set_plab_size(thread, 0);
1660     }
1661   }
1662 };
1663 
1664 void ShenandoahHeap::labs_make_parsable() {
1665   assert(UseTLAB, "Only call with UseTLAB");
1666 
1667   ShenandoahRetireGCLABClosure cl(false);
1668 
1669   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1670     ThreadLocalAllocBuffer& tlab = t->tlab();
1671     tlab.make_parsable();
1672     cl.do_thread(t);
1673   }
1674 
1675   workers()->threads_do(&cl);
1676 }
1677 
1678 void ShenandoahHeap::tlabs_retire(bool resize) {
1679   assert(UseTLAB, "Only call with UseTLAB");
1680   assert(!resize || ResizeTLAB, "Only call for resize when ResizeTLAB is enabled");

1698   }
1699   workers()->threads_do(&cl);
1700 #endif
1701 }
1702 
1703 void ShenandoahHeap::gclabs_retire(bool resize) {
1704   assert(UseTLAB, "Only call with UseTLAB");
1705   assert(!resize || ResizeTLAB, "Only call for resize when ResizeTLAB is enabled");
1706 
1707   ShenandoahRetireGCLABClosure cl(resize);
1708   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1709     cl.do_thread(t);
1710   }
1711   workers()->threads_do(&cl);
1712 
1713   if (safepoint_workers() != nullptr) {
1714     safepoint_workers()->threads_do(&cl);
1715   }
1716 }
1717 
1718 class ShenandoahTagGCLABClosure : public ThreadClosure {
1719 public:
1720   void do_thread(Thread* thread) {
1721     PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1722     assert(gclab != nullptr, "GCLAB should be initialized for %s", thread->name());
1723     if (gclab->words_remaining() > 0) {
1724       ShenandoahHeapRegion* r = ShenandoahHeap::heap()->heap_region_containing(gclab->allocate(0));
1725       r->set_young_lab_flag();
1726     }
1727   }
1728 };
1729 
1730 void ShenandoahHeap::set_young_lab_region_flags() {
1731   if (!UseTLAB) {
1732     return;
1733   }
1734   for (size_t i = 0; i < _num_regions; i++) {
1735     _regions[i]->clear_young_lab_flags();
1736   }
1737   ShenandoahTagGCLABClosure cl;
1738   workers()->threads_do(&cl);
1739   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1740     cl.do_thread(t);
1741     ThreadLocalAllocBuffer& tlab = t->tlab();
1742     if (tlab.end() != nullptr) {
1743       ShenandoahHeapRegion* r = heap_region_containing(tlab.start());
1744       r->set_young_lab_flag();
1745     }
1746   }
1747 }
1748 
1749 // Returns size in bytes
1750 size_t ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const {
1751   if (ShenandoahElasticTLAB) {
1752     if (mode()->is_generational()) {
1753       return MIN2(ShenandoahHeapRegion::max_tlab_size_bytes(), young_generation()->adjusted_available());
1754     } else {
1755       // With Elastic TLABs, return the max allowed size, and let the allocation path
1756       // figure out the safe size for current allocation.
1757       return ShenandoahHeapRegion::max_tlab_size_bytes();
1758     }
1759   } else {
1760     return MIN2(_free_set->unsafe_peek_free(), ShenandoahHeapRegion::max_tlab_size_bytes());
1761   }
1762 }
1763 
1764 size_t ShenandoahHeap::max_tlab_size() const {
1765   // Returns size in words
1766   return ShenandoahHeapRegion::max_tlab_size_words();
1767 }
1768 
1769 void ShenandoahHeap::collect(GCCause::Cause cause) {
1770   control_thread()->request_gc(cause);
1771 }
1772 
1773 void ShenandoahHeap::do_full_collection(bool clear_all_soft_refs) {
1774   //assert(false, "Shouldn't need to do full collections");
1775 }
1776 
1777 HeapWord* ShenandoahHeap::block_start(const void* addr) const {
1778   ShenandoahHeapRegion* r = heap_region_containing(addr);

1781   }
1782   return nullptr;
1783 }
1784 
1785 bool ShenandoahHeap::block_is_obj(const HeapWord* addr) const {
1786   ShenandoahHeapRegion* r = heap_region_containing(addr);
1787   return r->block_is_obj(addr);
1788 }
1789 
1790 bool ShenandoahHeap::print_location(outputStream* st, void* addr) const {
1791   return BlockLocationPrinter<ShenandoahHeap>::print_location(st, addr);
1792 }
1793 
1794 void ShenandoahHeap::prepare_for_verify() {
1795   if (SafepointSynchronize::is_at_safepoint() && UseTLAB) {
1796     labs_make_parsable();
1797   }
1798 }
1799 
1800 void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const {
1801   if (_shenandoah_policy->is_at_shutdown()) {
1802     return;
1803   }
1804 
1805   tcl->do_thread(_control_thread);
1806   tcl->do_thread(_regulator_thread);
1807   workers()->threads_do(tcl);
1808   if (_safepoint_workers != nullptr) {
1809     _safepoint_workers->threads_do(tcl);
1810   }
1811   if (ShenandoahStringDedup::is_enabled()) {
1812     ShenandoahStringDedup::threads_do(tcl);
1813   }
1814 }
1815 
1816 void ShenandoahHeap::print_tracing_info() const {
1817   LogTarget(Info, gc, stats) lt;
1818   if (lt.is_enabled()) {
1819     ResourceMark rm;
1820     LogStream ls(lt);
1821 
1822     phase_timings()->print_global_on(&ls);
1823 
1824     ls.cr();
1825     ls.cr();
1826 
1827     shenandoah_policy()->print_gc_stats(&ls);
1828 
1829     ls.cr();
1830 
1831     evac_tracker()->print_global_on(&ls);
1832 
1833     ls.cr();
1834     ls.cr();
1835   }
1836 }
1837 
1838 void ShenandoahHeap::on_cycle_start(GCCause::Cause cause, ShenandoahGeneration* generation) {
1839   set_gc_cause(cause);
1840   set_gc_generation(generation);
1841 
1842   shenandoah_policy()->record_cycle_start();
1843   generation->heuristics()->record_cycle_start();
1844 
1845   // When a cycle starts, attribute any thread activity when the collector
1846   // is idle to the global generation.
1847   _mmu_tracker.record(global_generation());
1848 }
1849 
1850 void ShenandoahHeap::on_cycle_end(ShenandoahGeneration* generation) {
1851   generation->heuristics()->record_cycle_end();
1852 
1853   if (mode()->is_generational() &&
1854       ((generation->generation_mode() == GLOBAL) || upgraded_to_full())) {
1855     // If we just completed a GLOBAL GC, claim credit for completion of young-gen and old-gen GC as well
1856     young_generation()->heuristics()->record_cycle_end();
1857     old_generation()->heuristics()->record_cycle_end();
1858   }
1859   set_gc_cause(GCCause::_no_gc);
1860 
1861   // When a cycle ends, the thread activity is attributed to the respective generation
1862   _mmu_tracker.record(generation);
1863 }
1864 
1865 void ShenandoahHeap::verify(VerifyOption vo) {
1866   if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
1867     if (ShenandoahVerify) {
1868       verifier()->verify_generic(vo);
1869     } else {
1870       // TODO: Consider allocating verification bitmaps on demand,
1871       // and turn this on unconditionally.
1872     }
1873   }
1874 }
1875 size_t ShenandoahHeap::tlab_capacity(Thread *thr) const {
1876   return _free_set->capacity();
1877 }
1878 
1879 class ObjectIterateScanRootClosure : public BasicOopIterateClosure {
1880 private:
1881   MarkBitMap* _bitmap;
1882   ShenandoahScanObjectStack* _oop_stack;
1883   ShenandoahHeap* const _heap;
1884   ShenandoahMarkingContext* const _marking_context;

2166       if (start >= max) break;
2167 
2168       for (size_t i = cur; i < end; i++) {
2169         ShenandoahHeapRegion* current = _heap->get_region(i);
2170         _blk->heap_region_do(current);
2171       }
2172     }
2173   }
2174 };
2175 
2176 void ShenandoahHeap::parallel_heap_region_iterate(ShenandoahHeapRegionClosure* blk) const {
2177   assert(blk->is_thread_safe(), "Only thread-safe closures here");
2178   if (num_regions() > ShenandoahParallelRegionStride) {
2179     ShenandoahParallelHeapRegionTask task(blk);
2180     workers()->run_task(&task);
2181   } else {
2182     heap_region_iterate(blk);
2183   }
2184 }
2185 























2186 class ShenandoahRendezvousClosure : public HandshakeClosure {
2187 public:
2188   inline ShenandoahRendezvousClosure() : HandshakeClosure("ShenandoahRendezvous") {}
2189   inline void do_thread(Thread* thread) {}
2190 };
2191 
2192 void ShenandoahHeap::rendezvous_threads() {
2193   ShenandoahRendezvousClosure cl;
2194   Handshake::execute(&cl);
2195 }
2196 
2197 void ShenandoahHeap::recycle_trash() {
2198   free_set()->recycle_trash();
2199 }
2200 



































































































2201 void ShenandoahHeap::do_class_unloading() {
2202   _unloader.unload();
2203 }
2204 
2205 void ShenandoahHeap::stw_weak_refs(bool full_gc) {
2206   // Weak refs processing
2207   ShenandoahPhaseTimings::Phase phase = full_gc ? ShenandoahPhaseTimings::full_gc_weakrefs
2208                                                 : ShenandoahPhaseTimings::degen_gc_weakrefs;
2209   ShenandoahTimingsTracker t(phase);
2210   ShenandoahGCWorkerPhase worker_phase(phase);
2211   active_generation()->ref_processor()->process_references(phase, workers(), false /* concurrent */);
2212 }
2213 
2214 void ShenandoahHeap::prepare_update_heap_references(bool concurrent) {
2215   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
2216 
2217   // Evacuation is over, no GCLABs are needed anymore. GCLABs are under URWM, so we need to
2218   // make them parsable for update code to work correctly. Plus, we can compute new sizes
2219   // for future GCLABs here.
2220   if (UseTLAB) {
2221     ShenandoahGCPhase phase(concurrent ?
2222                             ShenandoahPhaseTimings::init_update_refs_manage_gclabs :
2223                             ShenandoahPhaseTimings::degen_gc_init_update_refs_manage_gclabs);
2224     gclabs_retire(ResizeTLAB);
2225   }
2226 
2227   _update_refs_iterator.reset();
2228 }
2229 
2230 void ShenandoahHeap::set_gc_state_all_threads(char state) {
2231   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
2232     ShenandoahThreadLocalData::set_gc_state(t, state);
2233   }
2234 }
2235 
2236 void ShenandoahHeap::set_gc_state_mask(uint mask, bool value) {
2237   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should really be Shenandoah safepoint");
2238   _gc_state.set_cond(mask, value);
2239   set_gc_state_all_threads(_gc_state.raw_value());
2240 }
2241 
2242 void ShenandoahHeap::set_concurrent_young_mark_in_progress(bool in_progress) {
2243   if (has_forwarded_objects()) {
2244     set_gc_state_mask(YOUNG_MARKING | UPDATEREFS, in_progress);
2245   } else {
2246     set_gc_state_mask(YOUNG_MARKING, in_progress);
2247   }
2248 
2249   manage_satb_barrier(in_progress);
2250 }
2251 
2252 void ShenandoahHeap::set_concurrent_old_mark_in_progress(bool in_progress) {
2253   if (has_forwarded_objects()) {
2254     set_gc_state_mask(OLD_MARKING | UPDATEREFS, in_progress);
2255   } else {
2256     set_gc_state_mask(OLD_MARKING, in_progress);
2257   }
2258 
2259   manage_satb_barrier(in_progress);
2260 }
2261 
2262 void ShenandoahHeap::set_prepare_for_old_mark_in_progress(bool in_progress) {
2263   // Unlike other set-gc-state functions, this may happen outside safepoint.
2264   // Is only set and queried by control thread, so no coherence issues.
2265   _prepare_for_old_mark = in_progress;
2266 }
2267 
2268 void ShenandoahHeap::set_aging_cycle(bool in_progress) {
2269   _is_aging_cycle.set_cond(in_progress);
2270 }
2271 
2272 void ShenandoahHeap::manage_satb_barrier(bool active) {
2273   if (is_concurrent_mark_in_progress()) {
2274     // Ignore request to deactivate barrier while concurrent mark is in progress.
2275     // Do not attempt to re-activate the barrier if it is already active.
2276     if (active && !ShenandoahBarrierSet::satb_mark_queue_set().is_active()) {
2277       ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(active, !active);
2278     }
2279   } else {
2280     // No concurrent marking is in progress so honor request to deactivate,
2281     // but only if the barrier is already active.
2282     if (!active && ShenandoahBarrierSet::satb_mark_queue_set().is_active()) {
2283       ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(active, !active);
2284     }
2285   }
2286 }
2287 
2288 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
2289   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only call this at safepoint");
2290   set_gc_state_mask(EVACUATION, in_progress);
2291 }
2292 
2293 void ShenandoahHeap::set_concurrent_strong_root_in_progress(bool in_progress) {
2294   if (in_progress) {
2295     _concurrent_strong_root_in_progress.set();
2296   } else {
2297     _concurrent_strong_root_in_progress.unset();
2298   }
2299 }
2300 
2301 void ShenandoahHeap::set_concurrent_weak_root_in_progress(bool cond) {
2302   set_gc_state_mask(WEAK_ROOTS, cond);
2303 }
2304 
2305 GCTracer* ShenandoahHeap::tracer() {

2310   return _free_set->used();
2311 }
2312 
2313 bool ShenandoahHeap::try_cancel_gc() {
2314   while (true) {
2315     jbyte prev = _cancelled_gc.cmpxchg(CANCELLED, CANCELLABLE);
2316     if (prev == CANCELLABLE) return true;
2317     else if (prev == CANCELLED) return false;
2318     assert(ShenandoahSuspendibleWorkers, "should not get here when not using suspendible workers");
2319     assert(prev == NOT_CANCELLED, "must be NOT_CANCELLED");
2320     Thread* thread = Thread::current();
2321     if (thread->is_Java_thread()) {
2322       // We need to provide a safepoint here, otherwise we might
2323       // spin forever if a SP is pending.
2324       ThreadBlockInVM sp(JavaThread::cast(thread));
2325       SpinPause();
2326     }
2327   }
2328 }
2329 
2330 void ShenandoahHeap::cancel_concurrent_mark() {
2331   _young_generation->cancel_marking();
2332   _old_generation->cancel_marking();
2333   _global_generation->cancel_marking();
2334 
2335   ShenandoahBarrierSet::satb_mark_queue_set().abandon_partial_marking();
2336 }
2337 
2338 void ShenandoahHeap::cancel_gc(GCCause::Cause cause) {
2339   if (try_cancel_gc()) {
2340     FormatBuffer<> msg("Cancelling GC: %s", GCCause::to_string(cause));
2341     log_info(gc)("%s", msg.buffer());
2342     Events::log(Thread::current(), "%s", msg.buffer());
2343     _cancel_requested_time = os::elapsedTime();
2344     if (cause == GCCause::_shenandoah_upgrade_to_full_gc) {
2345       _upgraded_to_full = true;
2346     }
2347   }
2348 }
2349 
2350 uint ShenandoahHeap::max_workers() {
2351   return _max_workers;
2352 }
2353 
2354 void ShenandoahHeap::stop() {
2355   // The shutdown sequence should be able to terminate when GC is running.
2356 
2357   // Step 1. Notify policy to disable event recording and prevent visiting gc threads during shutdown
2358   _shenandoah_policy->record_shutdown();
2359 
2360   // Step 2. Stop requesting collections.
2361   regulator_thread()->stop();
2362 
2363   // Step 3. Notify control thread that we are in shutdown.
2364   // Note that we cannot do that with stop(), because stop() is blocking and waits for the actual shutdown.
2365   // Doing stop() here would wait for the normal GC cycle to complete, never falling through to cancel below.
2366   control_thread()->prepare_for_graceful_shutdown();
2367 
2368   // Step 4. Notify GC workers that we are cancelling GC.
2369   cancel_gc(GCCause::_shenandoah_stop_vm);
2370 
2371   // Step 5. Wait until GC worker exits normally.
2372   control_thread()->stop();
2373 }
2374 
2375 void ShenandoahHeap::stw_unload_classes(bool full_gc) {
2376   if (!unload_classes()) return;
2377   // Unload classes and purge SystemDictionary.
2378   {
2379     ShenandoahPhaseTimings::Phase phase = full_gc ?
2380                                           ShenandoahPhaseTimings::full_gc_purge_class_unload :
2381                                           ShenandoahPhaseTimings::degen_gc_purge_class_unload;
2382     ShenandoahIsAliveSelector is_alive;
2383     CodeCache::UnloadingScope scope(is_alive.is_alive_closure());
2384     ShenandoahGCPhase gc_phase(phase);
2385     ShenandoahGCWorkerPhase worker_phase(phase);
2386     bool purged_class = SystemDictionary::do_unloading(gc_timer());
2387 
2388     uint num_workers = _workers->active_workers();
2389     ShenandoahClassUnloadingTask unlink_task(phase, num_workers, purged_class);
2390     _workers->run_task(&unlink_task);
2391   }

2445 }
2446 
2447 void ShenandoahHeap::set_has_forwarded_objects(bool cond) {
2448   set_gc_state_mask(HAS_FORWARDED, cond);
2449 }
2450 
2451 void ShenandoahHeap::set_unload_classes(bool uc) {
2452   _unload_classes.set_cond(uc);
2453 }
2454 
2455 bool ShenandoahHeap::unload_classes() const {
2456   return _unload_classes.is_set();
2457 }
2458 
2459 address ShenandoahHeap::in_cset_fast_test_addr() {
2460   ShenandoahHeap* heap = ShenandoahHeap::heap();
2461   assert(heap->collection_set() != nullptr, "Sanity");
2462   return (address) heap->collection_set()->biased_map_address();
2463 }
2464 




2465 address ShenandoahHeap::gc_state_addr() {
2466   return (address) ShenandoahHeap::heap()->_gc_state.addr_of();
2467 }
2468 




2469 void ShenandoahHeap::reset_bytes_allocated_since_gc_start() {
2470   if (mode()->is_generational()) {
2471     young_generation()->reset_bytes_allocated_since_gc_start();
2472     old_generation()->reset_bytes_allocated_since_gc_start();
2473   }
2474 
2475   global_generation()->reset_bytes_allocated_since_gc_start();
2476 }
2477 
2478 void ShenandoahHeap::set_degenerated_gc_in_progress(bool in_progress) {
2479   _degenerated_gc_in_progress.set_cond(in_progress);
2480 }
2481 
2482 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) {
2483   _full_gc_in_progress.set_cond(in_progress);
2484 }
2485 
2486 void ShenandoahHeap::set_full_gc_move_in_progress(bool in_progress) {
2487   assert (is_full_gc_in_progress(), "should be");
2488   _full_gc_move_in_progress.set_cond(in_progress);
2489 }
2490 
2491 void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) {
2492   set_gc_state_mask(UPDATEREFS, in_progress);
2493 }
2494 
2495 void ShenandoahHeap::register_nmethod(nmethod* nm) {

2519     if (r->is_active()) {
2520       if (r->is_pinned()) {
2521         if (r->pin_count() == 0) {
2522           r->make_unpinned();
2523         }
2524       } else {
2525         if (r->pin_count() > 0) {
2526           r->make_pinned();
2527         }
2528       }
2529     }
2530   }
2531 
2532   assert_pinned_region_status();
2533 }
2534 
2535 #ifdef ASSERT
2536 void ShenandoahHeap::assert_pinned_region_status() {
2537   for (size_t i = 0; i < num_regions(); i++) {
2538     ShenandoahHeapRegion* r = get_region(i);
2539     if (active_generation()->contains(r)) {
2540       assert((r->is_pinned() && r->pin_count() > 0) || (!r->is_pinned() && r->pin_count() == 0),
2541              "Region " SIZE_FORMAT " pinning status is inconsistent", i);
2542     }
2543   }
2544 }
2545 #endif
2546 
2547 ConcurrentGCTimer* ShenandoahHeap::gc_timer() const {
2548   return _gc_timer;
2549 }
2550 
2551 void ShenandoahHeap::prepare_concurrent_roots() {
2552   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2553   assert(!is_stw_gc_in_progress(), "Only concurrent GC");
2554   set_concurrent_strong_root_in_progress(!collection_set()->is_empty());
2555   set_concurrent_weak_root_in_progress(true);
2556   if (unload_classes()) {
2557     _unloader.prepare();
2558   }
2559 }
2560 
2561 void ShenandoahHeap::finish_concurrent_roots() {
2562   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");

2582       assert(nworkers <= ConcGCThreads, "Cannot use more than it has");
2583     } else {
2584       // Use ConcGCThreads outside safepoints
2585       assert(nworkers == ConcGCThreads, "Use ConcGCThreads outside safepoints");
2586     }
2587   }
2588 }
2589 #endif
2590 
2591 ShenandoahVerifier* ShenandoahHeap::verifier() {
2592   guarantee(ShenandoahVerify, "Should be enabled");
2593   assert (_verifier != nullptr, "sanity");
2594   return _verifier;
2595 }
2596 
2597 template<bool CONCURRENT>
2598 class ShenandoahUpdateHeapRefsTask : public WorkerTask {
2599 private:
2600   ShenandoahHeap* _heap;
2601   ShenandoahRegionIterator* _regions;
2602   ShenandoahRegionChunkIterator* _work_chunks;
2603 
2604 public:
2605   explicit ShenandoahUpdateHeapRefsTask(ShenandoahRegionIterator* regions,
2606                                         ShenandoahRegionChunkIterator* work_chunks) :
2607     WorkerTask("Shenandoah Update References"),
2608     _heap(ShenandoahHeap::heap()),
2609     _regions(regions),
2610     _work_chunks(work_chunks)
2611   {
2612   }
2613 
2614   void work(uint worker_id) {
2615     if (CONCURRENT) {
2616       ShenandoahConcurrentWorkerSession worker_session(worker_id);
2617       ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
2618       do_work<ShenandoahConcUpdateRefsClosure>(worker_id);
2619     } else {
2620       ShenandoahParallelWorkerSession worker_session(worker_id);
2621       do_work<ShenandoahSTWUpdateRefsClosure>(worker_id);
2622     }
2623   }
2624 
2625 private:
2626   template<class T>
2627   void do_work(uint worker_id) {
2628     T cl;
2629     ShenandoahHeapRegion* r = _regions->next();
2630     // We update references for global, old, and young collections.
2631     assert(_heap->active_generation()->is_mark_complete(), "Expected complete marking");
2632     ShenandoahMarkingContext* const ctx = _heap->marking_context();
2633     bool is_mixed = _heap->collection_set()->has_old_regions();
2634     while (r != nullptr) {
2635       HeapWord* update_watermark = r->get_update_watermark();
2636       assert (update_watermark >= r->bottom(), "sanity");
2637 
2638       log_debug(gc)("ShenandoahUpdateHeapRefsTask::do_work(%u) looking at region " SIZE_FORMAT, worker_id, r->index());
2639       bool region_progress = false;
2640       if (r->is_active() && !r->is_cset()) {
2641         if (!_heap->mode()->is_generational() || (r->affiliation() == ShenandoahRegionAffiliation::YOUNG_GENERATION)) {
2642           _heap->marked_object_oop_iterate(r, &cl, update_watermark);
2643           region_progress = true;
2644         } else if (r->affiliation() == ShenandoahRegionAffiliation::OLD_GENERATION) {
2645           if (_heap->active_generation()->generation_mode() == GLOBAL) {
2646             // Note that GLOBAL collection is not as effectively balanced as young and mixed cycles.  This is because
2647             // concurrent GC threads are parceled out entire heap regions of work at a time and there
2648             // is no "catchup phase" consisting of remembered set scanning, during which parcels of work are smaller
2649             // and more easily distributed more fairly across threads.
2650 
2651             // TODO: Consider an improvement to load balance GLOBAL GC.
2652             _heap->marked_object_oop_iterate(r, &cl, update_watermark);
2653             region_progress = true;
2654           }
2655           // Otherwise, this is an old region in a young or mixed cycle.  Process it during a second phase, below.
2656           // Don't bother to report pacing progress in this case.
2657         } else {
2658           // Because updating of references runs concurrently, it is possible that a FREE inactive region transitions
2659           // to a non-free active region while this loop is executing.  Whenever this happens, the changing of a region's
2660           // active status may propagate at a different speed than the changing of the region's affiliation.
2661 
2662           // When we reach this control point, it is because a race has allowed a region's is_active() status to be seen
2663           // by this thread before the region's affiliation() is seen by this thread.
2664 
2665           // It's ok for this race to occur because the newly transformed region does not have any references to be
2666           // updated.
2667 
2668           assert(r->get_update_watermark() == r->bottom(),
2669                  "%s Region " SIZE_FORMAT " is_active but not recognized as YOUNG or OLD so must be newly transitioned from FREE",
2670                  affiliation_name(r->affiliation()), r->index());
2671         }
2672       }
2673       if (region_progress && ShenandoahPacing) {
2674         _heap->pacer()->report_updaterefs(pointer_delta(update_watermark, r->bottom()));
2675       }
2676       if (_heap->check_cancelled_gc_and_yield(CONCURRENT)) {
2677         return;
2678       }
2679       r = _regions->next();
2680     }
2681 
2682     if (_heap->mode()->is_generational() && (_heap->active_generation()->generation_mode() != GLOBAL)) {
2683       // Since this is generational and not GLOBAL, we have to process the remembered set.  There's no remembered
2684       // set processing if not in generational mode or if GLOBAL mode.
2685 
2686       // After this thread has exhausted its traditional update-refs work, it continues with updating refs within remembered set.
2687       // The remembered set workload is better balanced between threads, so threads that are "behind" can catch up with other
2688       // threads during this phase, allowing all threads to work more effectively in parallel.
2689       struct ShenandoahRegionChunk assignment;
2690       RememberedScanner* scanner = _heap->card_scan();
2691 
2692       while (!_heap->check_cancelled_gc_and_yield(CONCURRENT) && _work_chunks->next(&assignment)) {
2693         // Keep grabbing next work chunk to process until finished, or asked to yield
2694         ShenandoahHeapRegion* r = assignment._r;
2695         if (r->is_active() && !r->is_cset() && (r->affiliation() == ShenandoahRegionAffiliation::OLD_GENERATION)) {
2696           HeapWord* start_of_range = r->bottom() + assignment._chunk_offset;
2697           HeapWord* end_of_range = r->get_update_watermark();
2698           if (end_of_range > start_of_range + assignment._chunk_size) {
2699             end_of_range = start_of_range + assignment._chunk_size;
2700           }
2701 
2702           // Old region in a young cycle or mixed cycle.
2703           if (is_mixed) {
2704             // TODO: For mixed evac, consider building an old-gen remembered set that allows restricted updating
2705             // within old-gen HeapRegions.  This remembered set can be constructed by old-gen concurrent marking
2706             // and augmented by card marking.  For example, old-gen concurrent marking can remember for each old-gen
2707             // card which other old-gen regions it refers to: none, one-other specifically, multiple-other non-specific.
2708             // Update-references when _mixed_evac processess each old-gen memory range that has a traditional DIRTY
2709             // card or if the "old-gen remembered set" indicates that this card holds pointers specifically to an
2710             // old-gen region in the most recent collection set, or if this card holds pointers to other non-specific
2711             // old-gen heap regions.
2712 
2713             if (r->is_humongous()) {
2714               if (start_of_range < end_of_range) {
2715                 // Need to examine both dirty and clean cards during mixed evac.
2716                 r->oop_iterate_humongous_slice(&cl, false, start_of_range, assignment._chunk_size, true);
2717               }
2718             } else {
2719               // Since this is mixed evacuation, old regions that are candidates for collection have not been coalesced
2720               // and filled.  Use mark bits to find objects that need to be updated.
2721               //
2722               // Future TODO: establish a second remembered set to identify which old-gen regions point to other old-gen
2723               // regions which are in the collection set for a particular mixed evacuation.
2724               if (start_of_range < end_of_range) {
2725                 HeapWord* p = nullptr;
2726                 size_t card_index = scanner->card_index_for_addr(start_of_range);
2727                 // In case last object in my range spans boundary of my chunk, I may need to scan all the way to top()
2728                 ShenandoahObjectToOopBoundedClosure<T> objs(&cl, start_of_range, r->top());
2729 
2730                 // Any object that begins in a previous range is part of a different scanning assignment.  Any object that
2731                 // starts after end_of_range is also not my responsibility.  (Either allocated during evacuation, so does
2732                 // not hold pointers to from-space, or is beyond the range of my assigned work chunk.)
2733 
2734                 // Find the first object that begins in my range, if there is one.
2735                 p = start_of_range;
2736                 oop obj = cast_to_oop(p);
2737                 HeapWord* tams = ctx->top_at_mark_start(r);
2738                 if (p >= tams) {
2739                   // We cannot use ctx->is_marked(obj) to test whether an object begins at this address.  Instead,
2740                   // we need to use the remembered set crossing map to advance p to the first object that starts
2741                   // within the enclosing card.
2742 
2743                   while (true) {
2744                     HeapWord* first_object = scanner->first_object_in_card(card_index);
2745                     if (first_object != nullptr) {
2746                       p = first_object;
2747                       break;
2748                     } else if (scanner->addr_for_card_index(card_index + 1) < end_of_range) {
2749                       card_index++;
2750                     } else {
2751                       // Force the loop that follows to immediately terminate.
2752                       p = end_of_range;
2753                       break;
2754                     }
2755                   }
2756                   obj = cast_to_oop(p);
2757                   // Note: p may be >= end_of_range
2758                 } else if (!ctx->is_marked(obj)) {
2759                   p = ctx->get_next_marked_addr(p, tams);
2760                   obj = cast_to_oop(p);
2761                   // If there are no more marked objects before tams, this returns tams.
2762                   // Note that tams is either >= end_of_range, or tams is the start of an object that is marked.
2763                 }
2764                 while (p < end_of_range) {
2765                   // p is known to point to the beginning of marked object obj
2766                   objs.do_object(obj);
2767                   HeapWord* prev_p = p;
2768                   p += obj->size();
2769                   if (p < tams) {
2770                     p = ctx->get_next_marked_addr(p, tams);
2771                     // If there are no more marked objects before tams, this returns tams.  Note that tams is
2772                     // either >= end_of_range, or tams is the start of an object that is marked.
2773                   }
2774                   assert(p != prev_p, "Lack of forward progress");
2775                   obj = cast_to_oop(p);
2776                 }
2777               }
2778             }
2779           } else {
2780             // This is a young evac..
2781             if (start_of_range < end_of_range) {
2782               size_t cluster_size =
2783                 CardTable::card_size_in_words() * ShenandoahCardCluster<ShenandoahDirectCardMarkRememberedSet>::CardsPerCluster;
2784               size_t clusters = assignment._chunk_size / cluster_size;
2785               assert(clusters * cluster_size == assignment._chunk_size, "Chunk assignment must align on cluster boundaries");
2786               scanner->process_region_slice(r, assignment._chunk_offset, clusters, end_of_range, &cl, true, worker_id);
2787             }
2788           }
2789           if (ShenandoahPacing && (start_of_range < end_of_range)) {
2790             _heap->pacer()->report_updaterefs(pointer_delta(end_of_range, start_of_range));
2791           }
2792         }
2793       }
2794     }
2795   }
2796 };
2797 
2798 void ShenandoahHeap::update_heap_references(bool concurrent) {
2799   assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
2800   uint nworkers = workers()->active_workers();
2801   ShenandoahRegionChunkIterator work_list(nworkers);
2802 
2803   if (concurrent) {
2804     ShenandoahUpdateHeapRefsTask<true> task(&_update_refs_iterator, &work_list);
2805     workers()->run_task(&task);
2806   } else {
2807     ShenandoahUpdateHeapRefsTask<false> task(&_update_refs_iterator, &work_list);
2808     workers()->run_task(&task);
2809   }
2810   if (ShenandoahEnableCardStats && card_scan()!=nullptr) { // generational check proxy
2811     card_scan()->log_card_stats(nworkers, CARD_STAT_UPDATE_REFS);
2812   }
2813 }
2814 

2815 class ShenandoahFinalUpdateRefsUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
2816 private:
2817   ShenandoahMarkingContext* _ctx;
2818   ShenandoahHeapLock* const _lock;
2819   bool _is_generational;
2820 
2821 public:
2822   ShenandoahFinalUpdateRefsUpdateRegionStateClosure(
2823     ShenandoahMarkingContext* ctx) : _ctx(ctx), _lock(ShenandoahHeap::heap()->lock()),
2824                                      _is_generational(ShenandoahHeap::heap()->mode()->is_generational()) { }
2825 
2826   void heap_region_do(ShenandoahHeapRegion* r) {
2827 
2828     // Maintenance of region age must follow evacuation in order to account for evacuation allocations within survivor
2829     // regions.  We consult region age during the subsequent evacuation to determine whether certain objects need to
2830     // be promoted.
2831     if (_is_generational && r->is_young()) {
2832       HeapWord *tams = _ctx->top_at_mark_start(r);
2833       HeapWord *top = r->top();
2834 
2835       // Allocations move the watermark when top moves.  However compacting
2836       // objects will sometimes lower top beneath the watermark, after which,
2837       // attempts to read the watermark will assert out (watermark should not be
2838       // higher than top).
2839       if (top > tams) {
2840         // There have been allocations in this region since the start of the cycle.
2841         // Any objects new to this region must not assimilate elevated age.
2842         r->reset_age();
2843       } else if (ShenandoahHeap::heap()->is_aging_cycle()) {
2844         r->increment_age();
2845       }
2846     }
2847 
2848     // Drop unnecessary "pinned" state from regions that does not have CP marks
2849     // anymore, as this would allow trashing them.

2850     if (r->is_active()) {
2851       if (r->is_pinned()) {
2852         if (r->pin_count() == 0) {
2853           ShenandoahHeapLocker locker(_lock);
2854           r->make_unpinned();
2855         }
2856       } else {
2857         if (r->pin_count() > 0) {
2858           ShenandoahHeapLocker locker(_lock);
2859           r->make_pinned();
2860         }
2861       }
2862     }
2863   }
2864 
2865   bool is_thread_safe() { return true; }
2866 };
2867 
2868 void ShenandoahHeap::update_heap_region_states(bool concurrent) {
2869   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2870   assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
2871 
2872   {
2873     ShenandoahGCPhase phase(concurrent ?
2874                             ShenandoahPhaseTimings::final_update_refs_update_region_states :
2875                             ShenandoahPhaseTimings::degen_gc_final_update_refs_update_region_states);
2876     ShenandoahFinalUpdateRefsUpdateRegionStateClosure cl (active_generation()->complete_marking_context());
2877     parallel_heap_region_iterate(&cl);
2878 
2879     assert_pinned_region_status();
2880   }
2881 
2882   {
2883     ShenandoahGCPhase phase(concurrent ?
2884                             ShenandoahPhaseTimings::final_update_refs_trash_cset :
2885                             ShenandoahPhaseTimings::degen_gc_final_update_refs_trash_cset);
2886     trash_cset_regions();
2887   }
2888 }
2889 
2890 void ShenandoahHeap::rebuild_free_set(bool concurrent) {
2891   {
2892     ShenandoahGCPhase phase(concurrent ?
2893                             ShenandoahPhaseTimings::final_update_refs_rebuild_freeset :
2894                             ShenandoahPhaseTimings::degen_gc_final_update_refs_rebuild_freeset);
2895     ShenandoahHeapLocker locker(lock());
2896     _free_set->rebuild();

2990   EventMark em("%s", msg);
2991 
2992   op_uncommit(shrink_before, shrink_until);
2993 }
2994 
2995 void ShenandoahHeap::try_inject_alloc_failure() {
2996   if (ShenandoahAllocFailureALot && !cancelled_gc() && ((os::random() % 1000) > 950)) {
2997     _inject_alloc_failure.set();
2998     os::naked_short_sleep(1);
2999     if (cancelled_gc()) {
3000       log_info(gc)("Allocation failure was successfully injected");
3001     }
3002   }
3003 }
3004 
3005 bool ShenandoahHeap::should_inject_alloc_failure() {
3006   return _inject_alloc_failure.is_set() && _inject_alloc_failure.try_unset();
3007 }
3008 
3009 void ShenandoahHeap::initialize_serviceability() {
3010   if (mode()->is_generational()) {
3011     _young_gen_memory_pool = new ShenandoahYoungGenMemoryPool(this);
3012     _old_gen_memory_pool = new ShenandoahOldGenMemoryPool(this);
3013     _cycle_memory_manager.add_pool(_young_gen_memory_pool);
3014     _cycle_memory_manager.add_pool(_old_gen_memory_pool);
3015     _stw_memory_manager.add_pool(_young_gen_memory_pool);
3016     _stw_memory_manager.add_pool(_old_gen_memory_pool);
3017   } else {
3018     _memory_pool = new ShenandoahMemoryPool(this);
3019     _cycle_memory_manager.add_pool(_memory_pool);
3020     _stw_memory_manager.add_pool(_memory_pool);
3021   }
3022 }
3023 
3024 GrowableArray<GCMemoryManager*> ShenandoahHeap::memory_managers() {
3025   GrowableArray<GCMemoryManager*> memory_managers(2);
3026   memory_managers.append(&_cycle_memory_manager);
3027   memory_managers.append(&_stw_memory_manager);
3028   return memory_managers;
3029 }
3030 
3031 GrowableArray<MemoryPool*> ShenandoahHeap::memory_pools() {
3032   GrowableArray<MemoryPool*> memory_pools(1);
3033   if (mode()->is_generational()) {
3034     memory_pools.append(_young_gen_memory_pool);
3035     memory_pools.append(_old_gen_memory_pool);
3036   } else {
3037     memory_pools.append(_memory_pool);
3038   }
3039   return memory_pools;
3040 }
3041 
3042 MemoryUsage ShenandoahHeap::memory_usage() {
3043   return MemoryUsage(_initial_size, used(), committed(), max_capacity());
3044 }
3045 
3046 ShenandoahRegionIterator::ShenandoahRegionIterator() :
3047   _heap(ShenandoahHeap::heap()),
3048   _index(0) {}
3049 
3050 ShenandoahRegionIterator::ShenandoahRegionIterator(ShenandoahHeap* heap) :
3051   _heap(heap),
3052   _index(0) {}
3053 
3054 void ShenandoahRegionIterator::reset() {
3055   _index = 0;
3056 }
3057 
3058 bool ShenandoahRegionIterator::has_next() const {
3059   return _index < _heap->num_regions();
3060 }
3061 
3062 char ShenandoahHeap::gc_state() const {
3063   return _gc_state.raw_value();
3064 }
3065 
3066 ShenandoahLiveData* ShenandoahHeap::get_liveness_cache(uint worker_id) {
3067 #ifdef ASSERT
3068   assert(_liveness_cache != nullptr, "sanity");
3069   assert(worker_id < _max_workers, "sanity");
3070   for (uint i = 0; i < num_regions(); i++) {
3071     assert(_liveness_cache[worker_id][i] == 0, "liveness cache should be empty");
3072   }
3073 #endif
3074   return _liveness_cache[worker_id];
3075 }
3076 
3077 void ShenandoahHeap::flush_liveness_cache(uint worker_id) {
3078   assert(worker_id < _max_workers, "sanity");
3079   assert(_liveness_cache != nullptr, "sanity");
3080   ShenandoahLiveData* ld = _liveness_cache[worker_id];
3081 
3082   for (uint i = 0; i < num_regions(); i++) {
3083     ShenandoahLiveData live = ld[i];
3084     if (live > 0) {
3085       ShenandoahHeapRegion* r = get_region(i);
3086       r->increase_live_data_gc_words(live);
3087       ld[i] = 0;
3088     }
3089   }
3090 }
3091 
3092 bool ShenandoahHeap::requires_barriers(stackChunkOop obj) const {
3093   if (is_idle()) return false;
3094 
3095   // Objects allocated after marking start are implicitly alive, don't need any barriers during
3096   // marking phase.
3097   if (is_concurrent_mark_in_progress() &&
3098      !marking_context()->allocated_after_mark_start(obj)) {
3099     return true;
3100   }
3101 
3102   // Can not guarantee obj is deeply good.
3103   if (has_forwarded_objects()) {
3104     return true;
3105   }
3106 
3107   return false;
3108 }
3109 
3110 void ShenandoahHeap::transfer_old_pointers_from_satb() {
3111   _old_generation->transfer_pointers_from_satb();
3112 }
3113 
3114 template<>
3115 void ShenandoahGenerationRegionClosure<YOUNG>::heap_region_do(ShenandoahHeapRegion* region) {
3116   // Visit young and free regions
3117   if (region->affiliation() != OLD_GENERATION) {
3118     _cl->heap_region_do(region);
3119   }
3120 }
3121 
3122 template<>
3123 void ShenandoahGenerationRegionClosure<OLD>::heap_region_do(ShenandoahHeapRegion* region) {
3124   // Visit old and free regions
3125   if (region->affiliation() != YOUNG_GENERATION) {
3126     _cl->heap_region_do(region);
3127   }
3128 }
3129 
3130 template<>
3131 void ShenandoahGenerationRegionClosure<GLOBAL>::heap_region_do(ShenandoahHeapRegion* region) {
3132   _cl->heap_region_do(region);
3133 }
3134 
3135 // Assure that the remember set has a dirty card everywhere there is an interesting pointer.
3136 // This examines the read_card_table between bottom() and top() since all PLABS are retired
3137 // before the safepoint for init_mark.  Actually, we retire them before update-references and don't
3138 // restore them until the start of evacuation.
3139 void ShenandoahHeap::verify_rem_set_at_mark() {
3140   shenandoah_assert_safepoint();
3141   assert(mode()->is_generational(), "Only verify remembered set for generational operational modes");
3142 
3143   ShenandoahRegionIterator iterator;
3144   RememberedScanner* scanner = card_scan();
3145   ShenandoahVerifyRemSetClosure check_interesting_pointers(true);
3146   ShenandoahMarkingContext* ctx;
3147 
3148   log_debug(gc)("Verifying remembered set at %s mark", doing_mixed_evacuations()? "mixed": "young");
3149 
3150   if (is_old_bitmap_stable() || active_generation()->generation_mode() == GLOBAL) {
3151     ctx = complete_marking_context();
3152   } else {
3153     ctx = nullptr;
3154   }
3155 
3156   while (iterator.has_next()) {
3157     ShenandoahHeapRegion* r = iterator.next();
3158     HeapWord* tams = ctx? ctx->top_at_mark_start(r): nullptr;
3159     if (r == nullptr)
3160       break;
3161     if (r->is_old() && r->is_active()) {
3162       HeapWord* obj_addr = r->bottom();
3163       if (r->is_humongous_start()) {
3164         oop obj = cast_to_oop(obj_addr);
3165         if (!ctx || ctx->is_marked(obj)) {
3166           // For humongous objects, the typical object is an array, so the following checks may be overkill
3167           // For regular objects (not object arrays), if the card holding the start of the object is dirty,
3168           // we do not need to verify that cards spanning interesting pointers within this object are dirty.
3169           if (!scanner->is_card_dirty(obj_addr) || obj->is_objArray()) {
3170             obj->oop_iterate(&check_interesting_pointers);
3171           }
3172           // else, object's start is marked dirty and obj is not an objArray, so any interesting pointers are covered
3173         }
3174         // else, this humongous object is not marked so no need to verify its internal pointers
3175         if (!scanner->verify_registration(obj_addr, ctx)) {
3176           ShenandoahAsserts::print_failure(ShenandoahAsserts::_safe_all, obj, obj_addr, nullptr,
3177                                           "Verify init-mark remembered set violation", "object not properly registered", __FILE__, __LINE__);
3178         }
3179       } else if (!r->is_humongous()) {
3180         HeapWord* top = r->top();
3181         while (obj_addr < top) {
3182           oop obj = cast_to_oop(obj_addr);
3183           // ctx->is_marked() returns true if mark bit set (TAMS not relevant during init mark)
3184           if (!ctx || ctx->is_marked(obj)) {
3185             // For regular objects (not object arrays), if the card holding the start of the object is dirty,
3186             // we do not need to verify that cards spanning interesting pointers within this object are dirty.
3187             if (!scanner->is_card_dirty(obj_addr) || obj->is_objArray()) {
3188               obj->oop_iterate(&check_interesting_pointers);
3189             }
3190             // else, object's start is marked dirty and obj is not an objArray, so any interesting pointers are covered
3191             if (!scanner->verify_registration(obj_addr, ctx)) {
3192               ShenandoahAsserts::print_failure(ShenandoahAsserts::_safe_all, obj, obj_addr, nullptr,
3193                                                "Verify init-mark remembered set violation", "object not properly registered", __FILE__, __LINE__);
3194             }
3195             obj_addr += obj->size();
3196           } else {
3197             // This object is not live so we don't verify dirty cards contained therein
3198             assert(tams != nullptr, "If object is not live, ctx and tams should be non-null");
3199             obj_addr = ctx->get_next_marked_addr(obj_addr, tams);
3200           }
3201         }
3202       } // else, we ignore humongous continuation region
3203     } // else, this is not an OLD region so we ignore it
3204   } // all regions have been processed
3205 }
3206 
3207 void ShenandoahHeap::help_verify_region_rem_set(ShenandoahHeapRegion* r, ShenandoahMarkingContext* ctx, HeapWord* from,
3208                                                 HeapWord* top, HeapWord* registration_watermark, const char* message) {
3209   RememberedScanner* scanner = card_scan();
3210   ShenandoahVerifyRemSetClosure check_interesting_pointers(false);
3211 
3212   HeapWord* obj_addr = from;
3213   if (r->is_humongous_start()) {
3214     oop obj = cast_to_oop(obj_addr);
3215     if (!ctx || ctx->is_marked(obj)) {
3216       size_t card_index = scanner->card_index_for_addr(obj_addr);
3217       // For humongous objects, the typical object is an array, so the following checks may be overkill
3218       // For regular objects (not object arrays), if the card holding the start of the object is dirty,
3219       // we do not need to verify that cards spanning interesting pointers within this object are dirty.
3220       if (!scanner->is_write_card_dirty(card_index) || obj->is_objArray()) {
3221         obj->oop_iterate(&check_interesting_pointers);
3222       }
3223       // else, object's start is marked dirty and obj is not an objArray, so any interesting pointers are covered
3224     }
3225     // else, this humongous object is not live so no need to verify its internal pointers
3226 
3227     if ((obj_addr < registration_watermark) && !scanner->verify_registration(obj_addr, ctx)) {
3228       ShenandoahAsserts::print_failure(ShenandoahAsserts::_safe_all, obj, obj_addr, nullptr, message,
3229                                        "object not properly registered", __FILE__, __LINE__);
3230     }
3231   } else if (!r->is_humongous()) {
3232     while (obj_addr < top) {
3233       oop obj = cast_to_oop(obj_addr);
3234       // ctx->is_marked() returns true if mark bit set or if obj above TAMS.
3235       if (!ctx || ctx->is_marked(obj)) {
3236         size_t card_index = scanner->card_index_for_addr(obj_addr);
3237         // For regular objects (not object arrays), if the card holding the start of the object is dirty,
3238         // we do not need to verify that cards spanning interesting pointers within this object are dirty.
3239         if (!scanner->is_write_card_dirty(card_index) || obj->is_objArray()) {
3240           obj->oop_iterate(&check_interesting_pointers);
3241         }
3242         // else, object's start is marked dirty and obj is not an objArray, so any interesting pointers are covered
3243 
3244         if ((obj_addr < registration_watermark) && !scanner->verify_registration(obj_addr, ctx)) {
3245           ShenandoahAsserts::print_failure(ShenandoahAsserts::_safe_all, obj, obj_addr, nullptr, message,
3246                                            "object not properly registered", __FILE__, __LINE__);
3247         }
3248         obj_addr += obj->size();
3249       } else {
3250         // This object is not live so we don't verify dirty cards contained therein
3251         HeapWord* tams = ctx->top_at_mark_start(r);
3252         obj_addr = ctx->get_next_marked_addr(obj_addr, tams);
3253       }
3254     }
3255   }
3256 }
3257 
3258 void ShenandoahHeap::verify_rem_set_after_full_gc() {
3259   shenandoah_assert_safepoint();
3260   assert(mode()->is_generational(), "Only verify remembered set for generational operational modes");
3261 
3262   ShenandoahRegionIterator iterator;
3263 
3264   while (iterator.has_next()) {
3265     ShenandoahHeapRegion* r = iterator.next();
3266     if (r == nullptr)
3267       break;
3268     if (r->is_old() && !r->is_cset()) {
3269       help_verify_region_rem_set(r, nullptr, r->bottom(), r->top(), r->top(), "Remembered set violation at end of Full GC");
3270     }
3271   }
3272 }
3273 
3274 // Assure that the remember set has a dirty card everywhere there is an interesting pointer.  Even though
3275 // the update-references scan of remembered set only examines cards up to update_watermark, the remembered
3276 // set should be valid through top.  This examines the write_card_table between bottom() and top() because
3277 // all PLABS are retired immediately before the start of update refs.
3278 void ShenandoahHeap::verify_rem_set_at_update_ref() {
3279   shenandoah_assert_safepoint();
3280   assert(mode()->is_generational(), "Only verify remembered set for generational operational modes");
3281 
3282   ShenandoahRegionIterator iterator;
3283   ShenandoahMarkingContext* ctx;
3284 
3285   if (is_old_bitmap_stable() || active_generation()->generation_mode() == GLOBAL) {
3286     ctx = complete_marking_context();
3287   } else {
3288     ctx = nullptr;
3289   }
3290 
3291   while (iterator.has_next()) {
3292     ShenandoahHeapRegion* r = iterator.next();
3293     if (r == nullptr)
3294       break;
3295     if (r->is_old() && !r->is_cset()) {
3296       help_verify_region_rem_set(r, ctx, r->bottom(), r->top(), r->get_update_watermark(),
3297                                  "Remembered set violation at init-update-references");
3298     }
3299   }
3300 }
3301 
3302 ShenandoahGeneration* ShenandoahHeap::generation_for(ShenandoahRegionAffiliation affiliation) const {
3303   if (!mode()->is_generational()) {
3304     return global_generation();
3305   } else if (affiliation == YOUNG_GENERATION) {
3306     return young_generation();
3307   } else if (affiliation == OLD_GENERATION) {
3308     return old_generation();
3309   }
3310 
3311   ShouldNotReachHere();
3312   return nullptr;
3313 }
3314 
3315 void ShenandoahHeap::log_heap_status(const char* msg) const {
3316   if (mode()->is_generational()) {
3317     young_generation()->log_status(msg);
3318     old_generation()->log_status(msg);
3319   } else {
3320     global_generation()->log_status(msg);
3321   }
3322 }
3323 
< prev index next >