< prev index next >

src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp

Print this page

  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "memory/allocation.hpp"
  27 #include "memory/universe.hpp"
  28 
  29 #include "gc/shared/gcArguments.hpp"
  30 #include "gc/shared/gcTimer.hpp"
  31 #include "gc/shared/gcTraceTime.inline.hpp"
  32 #include "gc/shared/locationPrinter.inline.hpp"
  33 #include "gc/shared/memAllocator.hpp"
  34 #include "gc/shared/plab.hpp"
  35 #include "gc/shared/tlab_globals.hpp"
  36 
  37 #include "gc/shenandoah/shenandoahBarrierSet.hpp"

  38 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
  39 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  40 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  41 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
  42 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  43 #include "gc/shenandoah/shenandoahControlThread.hpp"

  44 #include "gc/shenandoah/shenandoahFreeSet.hpp"

  45 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  46 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  47 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
  48 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  49 #include "gc/shenandoah/shenandoahInitLogger.hpp"
  50 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  51 #include "gc/shenandoah/shenandoahMemoryPool.hpp"
  52 #include "gc/shenandoah/shenandoahMetrics.hpp"
  53 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"

  54 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  55 #include "gc/shenandoah/shenandoahPacer.inline.hpp"
  56 #include "gc/shenandoah/shenandoahPadding.hpp"
  57 #include "gc/shenandoah/shenandoahParallelCleaning.inline.hpp"
  58 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
  59 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"

  60 #include "gc/shenandoah/shenandoahStringDedup.hpp"
  61 #include "gc/shenandoah/shenandoahSTWMark.hpp"
  62 #include "gc/shenandoah/shenandoahUtils.hpp"
  63 #include "gc/shenandoah/shenandoahVerifier.hpp"
  64 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
  65 #include "gc/shenandoah/shenandoahVMOperations.hpp"
  66 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
  67 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"


  68 #include "gc/shenandoah/mode/shenandoahIUMode.hpp"
  69 #include "gc/shenandoah/mode/shenandoahPassiveMode.hpp"
  70 #include "gc/shenandoah/mode/shenandoahSATBMode.hpp"

  71 #if INCLUDE_JFR
  72 #include "gc/shenandoah/shenandoahJfrSupport.hpp"
  73 #endif
  74 


  75 #include "classfile/systemDictionary.hpp"
  76 #include "code/codeCache.hpp"
  77 #include "memory/classLoaderMetaspace.hpp"
  78 #include "memory/metaspaceUtils.hpp"
  79 #include "oops/compressedOops.inline.hpp"
  80 #include "prims/jvmtiTagMap.hpp"
  81 #include "runtime/atomic.hpp"
  82 #include "runtime/globals.hpp"
  83 #include "runtime/interfaceSupport.inline.hpp"
  84 #include "runtime/java.hpp"
  85 #include "runtime/orderAccess.hpp"
  86 #include "runtime/safepointMechanism.hpp"
  87 #include "runtime/vmThread.hpp"
  88 #include "services/mallocTracker.hpp"
  89 #include "services/memTracker.hpp"
  90 #include "utilities/events.hpp"
  91 #include "utilities/powerOfTwo.hpp"
  92 
  93 class ShenandoahPretouchHeapTask : public WorkerTask {
  94 private:

 142 jint ShenandoahHeap::initialize() {
 143   //
 144   // Figure out heap sizing
 145   //
 146 
 147   size_t init_byte_size = InitialHeapSize;
 148   size_t min_byte_size  = MinHeapSize;
 149   size_t max_byte_size  = MaxHeapSize;
 150   size_t heap_alignment = HeapAlignment;
 151 
 152   size_t reg_size_bytes = ShenandoahHeapRegion::region_size_bytes();
 153 
 154   Universe::check_alignment(max_byte_size,  reg_size_bytes, "Shenandoah heap");
 155   Universe::check_alignment(init_byte_size, reg_size_bytes, "Shenandoah heap");
 156 
 157   _num_regions = ShenandoahHeapRegion::region_count();
 158   assert(_num_regions == (max_byte_size / reg_size_bytes),
 159          "Regions should cover entire heap exactly: " SIZE_FORMAT " != " SIZE_FORMAT "/" SIZE_FORMAT,
 160          _num_regions, max_byte_size, reg_size_bytes);
 161 
 162   // Now we know the number of regions, initialize the heuristics.
 163   initialize_heuristics();
 164 
 165   size_t num_committed_regions = init_byte_size / reg_size_bytes;
 166   num_committed_regions = MIN2(num_committed_regions, _num_regions);
 167   assert(num_committed_regions <= _num_regions, "sanity");
 168   _initial_size = num_committed_regions * reg_size_bytes;
 169 
 170   size_t num_min_regions = min_byte_size / reg_size_bytes;
 171   num_min_regions = MIN2(num_min_regions, _num_regions);
 172   assert(num_min_regions <= _num_regions, "sanity");
 173   _minimum_size = num_min_regions * reg_size_bytes;
 174 
 175   // Default to max heap size.
 176   _soft_max_size = _num_regions * reg_size_bytes;
 177 
 178   _committed = _initial_size;
 179 




 180   size_t heap_page_size   = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
 181   size_t bitmap_page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
 182   size_t region_page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
 183 
 184   //
 185   // Reserve and commit memory for heap
 186   //
 187 
 188   ReservedHeapSpace heap_rs = Universe::reserve_heap(max_byte_size, heap_alignment);
 189   initialize_reserved_region(heap_rs);
 190   _heap_region = MemRegion((HeapWord*)heap_rs.base(), heap_rs.size() / HeapWordSize);
 191   _heap_region_special = heap_rs.special();
 192 
 193   assert((((size_t) base()) & ShenandoahHeapRegion::region_size_bytes_mask()) == 0,
 194          "Misaligned heap: " PTR_FORMAT, p2i(base()));
 195 
 196 #if SHENANDOAH_OPTIMIZED_MARKTASK
 197   // The optimized ShenandoahMarkTask takes some bits away from the full object bits.
 198   // Fail if we ever attempt to address more than we can.
 199   if ((uintptr_t)heap_rs.end() >= ShenandoahMarkTask::max_addressable()) {
 200     FormatBuffer<512> buf("Shenandoah reserved [" PTR_FORMAT ", " PTR_FORMAT") for the heap, \n"
 201                           "but max object address is " PTR_FORMAT ". Try to reduce heap size, or try other \n"
 202                           "VM options that allocate heap at lower addresses (HeapBaseMinAddress, AllocateHeapAt, etc).",
 203                 p2i(heap_rs.base()), p2i(heap_rs.end()), ShenandoahMarkTask::max_addressable());
 204     vm_exit_during_initialization("Fatal Error", buf);
 205   }
 206 #endif
 207 
 208   ReservedSpace sh_rs = heap_rs.first_part(max_byte_size);
 209   if (!_heap_region_special) {
 210     os::commit_memory_or_exit(sh_rs.base(), _initial_size, heap_alignment, false,
 211                               "Cannot commit heap memory");
 212   }
 213 

























 214   //
 215   // Reserve and commit memory for bitmap(s)
 216   //
 217 
 218   _bitmap_size = ShenandoahMarkBitMap::compute_size(heap_rs.size());
 219   _bitmap_size = align_up(_bitmap_size, bitmap_page_size);
 220 
 221   size_t bitmap_bytes_per_region = reg_size_bytes / ShenandoahMarkBitMap::heap_map_factor();
 222 
 223   guarantee(bitmap_bytes_per_region != 0,
 224             "Bitmap bytes per region should not be zero");
 225   guarantee(is_power_of_2(bitmap_bytes_per_region),
 226             "Bitmap bytes per region should be power of two: " SIZE_FORMAT, bitmap_bytes_per_region);
 227 
 228   if (bitmap_page_size > bitmap_bytes_per_region) {
 229     _bitmap_regions_per_slice = bitmap_page_size / bitmap_bytes_per_region;
 230     _bitmap_bytes_per_slice = bitmap_page_size;
 231   } else {
 232     _bitmap_regions_per_slice = 1;
 233     _bitmap_bytes_per_slice = bitmap_bytes_per_region;

 237             "Should have at least one region per slice: " SIZE_FORMAT,
 238             _bitmap_regions_per_slice);
 239 
 240   guarantee(((_bitmap_bytes_per_slice) % bitmap_page_size) == 0,
 241             "Bitmap slices should be page-granular: bps = " SIZE_FORMAT ", page size = " SIZE_FORMAT,
 242             _bitmap_bytes_per_slice, bitmap_page_size);
 243 
 244   ReservedSpace bitmap(_bitmap_size, bitmap_page_size);
 245   MemTracker::record_virtual_memory_type(bitmap.base(), mtGC);
 246   _bitmap_region = MemRegion((HeapWord*) bitmap.base(), bitmap.size() / HeapWordSize);
 247   _bitmap_region_special = bitmap.special();
 248 
 249   size_t bitmap_init_commit = _bitmap_bytes_per_slice *
 250                               align_up(num_committed_regions, _bitmap_regions_per_slice) / _bitmap_regions_per_slice;
 251   bitmap_init_commit = MIN2(_bitmap_size, bitmap_init_commit);
 252   if (!_bitmap_region_special) {
 253     os::commit_memory_or_exit((char *) _bitmap_region.start(), bitmap_init_commit, bitmap_page_size, false,
 254                               "Cannot commit bitmap memory");
 255   }
 256 
 257   _marking_context = new ShenandoahMarkingContext(_heap_region, _bitmap_region, _num_regions, _max_workers);
 258 
 259   if (ShenandoahVerify) {
 260     ReservedSpace verify_bitmap(_bitmap_size, bitmap_page_size);
 261     if (!verify_bitmap.special()) {
 262       os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), bitmap_page_size, false,
 263                                 "Cannot commit verification bitmap memory");
 264     }
 265     MemTracker::record_virtual_memory_type(verify_bitmap.base(), mtGC);
 266     MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize);
 267     _verification_bit_map.initialize(_heap_region, verify_bitmap_region);
 268     _verifier = new ShenandoahVerifier(this, &_verification_bit_map);
 269   }
 270 
 271   // Reserve aux bitmap for use in object_iterate(). We don't commit it here.
 272   ReservedSpace aux_bitmap(_bitmap_size, bitmap_page_size);
 273   MemTracker::record_virtual_memory_type(aux_bitmap.base(), mtGC);
 274   _aux_bitmap_region = MemRegion((HeapWord*) aux_bitmap.base(), aux_bitmap.size() / HeapWordSize);
 275   _aux_bitmap_region_special = aux_bitmap.special();
 276   _aux_bit_map.initialize(_heap_region, _aux_bitmap_region);
 277 

 383   // There should probably be Shenandoah-specific options for these,
 384   // just as there are G1-specific options.
 385   {
 386     ShenandoahSATBMarkQueueSet& satbqs = ShenandoahBarrierSet::satb_mark_queue_set();
 387     satbqs.set_process_completed_buffers_threshold(20); // G1SATBProcessCompletedThreshold
 388     satbqs.set_buffer_enqueue_threshold_percentage(60); // G1SATBBufferEnqueueingThresholdPercent
 389   }
 390 
 391   _monitoring_support = new ShenandoahMonitoringSupport(this);
 392   _phase_timings = new ShenandoahPhaseTimings(max_workers());
 393   ShenandoahCodeRoots::initialize();
 394 
 395   if (ShenandoahPacing) {
 396     _pacer = new ShenandoahPacer(this);
 397     _pacer->setup_for_idle();
 398   } else {
 399     _pacer = NULL;
 400   }
 401 
 402   _control_thread = new ShenandoahControlThread();

 403 
 404   ShenandoahInitLogger::print();
 405 
 406   return JNI_OK;
 407 }
 408 
 409 void ShenandoahHeap::initialize_mode() {











 410   if (ShenandoahGCMode != NULL) {
 411     if (strcmp(ShenandoahGCMode, "satb") == 0) {
 412       _gc_mode = new ShenandoahSATBMode();
 413     } else if (strcmp(ShenandoahGCMode, "iu") == 0) {
 414       _gc_mode = new ShenandoahIUMode();
 415     } else if (strcmp(ShenandoahGCMode, "passive") == 0) {
 416       _gc_mode = new ShenandoahPassiveMode();


 417     } else {
 418       vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option");
 419     }
 420   } else {
 421     vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option (null)");
 422   }
 423   _gc_mode->initialize_flags();
 424   if (_gc_mode->is_diagnostic() && !UnlockDiagnosticVMOptions) {
 425     vm_exit_during_initialization(
 426             err_msg("GC mode \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
 427                     _gc_mode->name()));
 428   }
 429   if (_gc_mode->is_experimental() && !UnlockExperimentalVMOptions) {
 430     vm_exit_during_initialization(
 431             err_msg("GC mode \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
 432                     _gc_mode->name()));
 433   }
 434 }
 435 
 436 void ShenandoahHeap::initialize_heuristics() {
 437   assert(_gc_mode != NULL, "Must be initialized");
 438   _heuristics = _gc_mode->initialize_heuristics();

 439 
 440   if (_heuristics->is_diagnostic() && !UnlockDiagnosticVMOptions) {
 441     vm_exit_during_initialization(
 442             err_msg("Heuristics \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
 443                     _heuristics->name()));
 444   }
 445   if (_heuristics->is_experimental() && !UnlockExperimentalVMOptions) {
 446     vm_exit_during_initialization(
 447             err_msg("Heuristics \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
 448                     _heuristics->name()));
 449   }
 450 }
 451 
 452 #ifdef _MSC_VER
 453 #pragma warning( push )
 454 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
 455 #endif
 456 
 457 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
 458   CollectedHeap(),


 459   _initial_size(0),
 460   _used(0),
 461   _committed(0),
 462   _bytes_allocated_since_gc_start(0),
 463   _max_workers(MAX2(ConcGCThreads, ParallelGCThreads)),
 464   _workers(NULL),
 465   _safepoint_workers(NULL),
 466   _heap_region_special(false),
 467   _num_regions(0),
 468   _regions(NULL),
 469   _update_refs_iterator(this),











 470   _control_thread(NULL),

 471   _shenandoah_policy(policy),
 472   _gc_mode(NULL),
 473   _heuristics(NULL),
 474   _free_set(NULL),
 475   _pacer(NULL),
 476   _verifier(NULL),
 477   _phase_timings(NULL),
 478   _monitoring_support(NULL),
 479   _memory_pool(NULL),


 480   _stw_memory_manager("Shenandoah Pauses", "end of GC pause"),
 481   _cycle_memory_manager("Shenandoah Cycles", "end of GC cycle"),
 482   _gc_timer(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
 483   _soft_ref_policy(),
 484   _log_min_obj_alignment_in_bytes(LogMinObjAlignmentInBytes),
 485   _ref_processor(new ShenandoahReferenceProcessor(MAX2(_max_workers, 1U))),
 486   _marking_context(NULL),
 487   _bitmap_size(0),
 488   _bitmap_regions_per_slice(0),
 489   _bitmap_bytes_per_slice(0),
 490   _bitmap_region_special(false),
 491   _aux_bitmap_region_special(false),
 492   _liveness_cache(NULL),
 493   _collection_set(NULL)

 494 {
 495   // Initialize GC mode early, so we can adjust barrier support
 496   initialize_mode();
 497   BarrierSet::set_barrier_set(new ShenandoahBarrierSet(this));
 498 
 499   _max_workers = MAX2(_max_workers, 1U);
 500   _workers = new ShenandoahWorkerThreads("Shenandoah GC Threads", _max_workers);
 501   if (_workers == NULL) {
 502     vm_exit_during_initialization("Failed necessary allocation.");
 503   } else {
 504     _workers->initialize_workers();
 505   }
 506 
 507   if (ParallelGCThreads > 1) {
 508     _safepoint_workers = new ShenandoahWorkerThreads("Safepoint Cleanup Thread",
 509                                                 ParallelGCThreads);
 510     _safepoint_workers->initialize_workers();
 511   }
 512 }
 513 
 514 #ifdef _MSC_VER
 515 #pragma warning( pop )
 516 #endif
 517 
 518 class ShenandoahResetBitmapTask : public WorkerTask {
 519 private:
 520   ShenandoahRegionIterator _regions;
 521 
 522 public:
 523   ShenandoahResetBitmapTask() :
 524     WorkerTask("Shenandoah Reset Bitmap") {}
 525 
 526   void work(uint worker_id) {
 527     ShenandoahHeapRegion* region = _regions.next();
 528     ShenandoahHeap* heap = ShenandoahHeap::heap();
 529     ShenandoahMarkingContext* const ctx = heap->marking_context();
 530     while (region != NULL) {
 531       if (heap->is_bitmap_slice_committed(region)) {
 532         ctx->clear_bitmap(region);
 533       }
 534       region = _regions.next();
 535     }
 536   }
 537 };
 538 
 539 void ShenandoahHeap::reset_mark_bitmap() {
 540   assert_gc_workers(_workers->active_workers());
 541   mark_incomplete_marking_context();
 542 
 543   ShenandoahResetBitmapTask task;
 544   _workers->run_task(&task);
 545 }
 546 
 547 void ShenandoahHeap::print_on(outputStream* st) const {
 548   st->print_cr("Shenandoah Heap");
 549   st->print_cr(" " SIZE_FORMAT "%s max, " SIZE_FORMAT "%s soft max, " SIZE_FORMAT "%s committed, " SIZE_FORMAT "%s used",
 550                byte_size_in_proper_unit(max_capacity()), proper_unit_for_byte_size(max_capacity()),
 551                byte_size_in_proper_unit(soft_max_capacity()), proper_unit_for_byte_size(soft_max_capacity()),
 552                byte_size_in_proper_unit(committed()),    proper_unit_for_byte_size(committed()),
 553                byte_size_in_proper_unit(used()),         proper_unit_for_byte_size(used()));
 554   st->print_cr(" " SIZE_FORMAT " x " SIZE_FORMAT"%s regions",
 555                num_regions(),
 556                byte_size_in_proper_unit(ShenandoahHeapRegion::region_size_bytes()),
 557                proper_unit_for_byte_size(ShenandoahHeapRegion::region_size_bytes()));
 558 
 559   st->print("Status: ");
 560   if (has_forwarded_objects())                 st->print("has forwarded objects, ");
 561   if (is_concurrent_mark_in_progress())        st->print("marking, ");

 562   if (is_evacuation_in_progress())             st->print("evacuating, ");
 563   if (is_update_refs_in_progress())            st->print("updating refs, ");
 564   if (is_degenerated_gc_in_progress())         st->print("degenerated gc, ");
 565   if (is_full_gc_in_progress())                st->print("full gc, ");
 566   if (is_full_gc_move_in_progress())           st->print("full gc move, ");
 567   if (is_concurrent_weak_root_in_progress())   st->print("concurrent weak roots, ");
 568   if (is_concurrent_strong_root_in_progress() &&
 569       !is_concurrent_weak_root_in_progress())  st->print("concurrent strong roots, ");
 570 
 571   if (cancelled_gc()) {
 572     st->print("cancelled");
 573   } else {
 574     st->print("not cancelled");
 575   }
 576   st->cr();
 577 
 578   st->print_cr("Reserved region:");
 579   st->print_cr(" - [" PTR_FORMAT ", " PTR_FORMAT ") ",
 580                p2i(reserved_region().start()),
 581                p2i(reserved_region().end()));

 604     assert(thread->is_Worker_thread(), "Only worker thread expected");
 605     ShenandoahThreadLocalData::initialize_gclab(thread);
 606   }
 607 };
 608 
 609 void ShenandoahHeap::post_initialize() {
 610   CollectedHeap::post_initialize();
 611   MutexLocker ml(Threads_lock);
 612 
 613   ShenandoahInitWorkerGCLABClosure init_gclabs;
 614   _workers->threads_do(&init_gclabs);
 615 
 616   // gclab can not be initialized early during VM startup, as it can not determinate its max_size.
 617   // Now, we will let WorkerThreads to initialize gclab when new worker is created.
 618   _workers->set_initialize_gclab();
 619   if (_safepoint_workers != NULL) {
 620     _safepoint_workers->threads_do(&init_gclabs);
 621     _safepoint_workers->set_initialize_gclab();
 622   }
 623 
 624   _heuristics->initialize();
 625 
 626   JFR_ONLY(ShenandoahJFRSupport::register_jfr_type_serializers());
 627 }
 628 
















































 629 size_t ShenandoahHeap::used() const {
 630   return Atomic::load(&_used);
 631 }
 632 
 633 size_t ShenandoahHeap::committed() const {
 634   return Atomic::load(&_committed);
 635 }
 636 
 637 void ShenandoahHeap::increase_committed(size_t bytes) {
 638   shenandoah_assert_heaplocked_or_safepoint();
 639   _committed += bytes;
 640 }
 641 
 642 void ShenandoahHeap::decrease_committed(size_t bytes) {
 643   shenandoah_assert_heaplocked_or_safepoint();
 644   _committed -= bytes;
 645 }
 646 
 647 void ShenandoahHeap::increase_used(size_t bytes) {
 648   Atomic::add(&_used, bytes, memory_order_relaxed);
 649 }
 650 
 651 void ShenandoahHeap::set_used(size_t bytes) {
 652   Atomic::store(&_used, bytes);
 653 }
 654 
 655 void ShenandoahHeap::decrease_used(size_t bytes) {
 656   assert(used() >= bytes, "never decrease heap size by more than we've left");
 657   Atomic::sub(&_used, bytes, memory_order_relaxed);
 658 }
 659 
 660 void ShenandoahHeap::increase_allocated(size_t bytes) {
 661   Atomic::add(&_bytes_allocated_since_gc_start, bytes, memory_order_relaxed);
 662 }
 663 
 664 void ShenandoahHeap::notify_mutator_alloc_words(size_t words, bool waste) {
 665   size_t bytes = words * HeapWordSize;
 666   if (!waste) {
 667     increase_used(bytes);
 668   }
 669   increase_allocated(bytes);
 670   if (ShenandoahPacing) {
 671     control_thread()->pacing_notify_alloc(words);
 672     if (waste) {
 673       pacer()->claim_for_alloc(words, true);
 674     }
 675   }
 676 }
 677 
 678 size_t ShenandoahHeap::capacity() const {
 679   return committed();
 680 }
 681 
 682 size_t ShenandoahHeap::max_capacity() const {
 683   return _num_regions * ShenandoahHeapRegion::region_size_bytes();
 684 }
 685 
 686 size_t ShenandoahHeap::soft_max_capacity() const {
 687   size_t v = Atomic::load(&_soft_max_size);
 688   assert(min_capacity() <= v && v <= max_capacity(),
 689          "Should be in bounds: " SIZE_FORMAT " <= " SIZE_FORMAT " <= " SIZE_FORMAT,
 690          min_capacity(), v, max_capacity());
 691   return v;
 692 }
 693 
 694 void ShenandoahHeap::set_soft_max_capacity(size_t v) {
 695   assert(min_capacity() <= v && v <= max_capacity(),
 696          "Should be in bounds: " SIZE_FORMAT " <= " SIZE_FORMAT " <= " SIZE_FORMAT,
 697          min_capacity(), v, max_capacity());
 698   Atomic::store(&_soft_max_size, v);







 699 }
 700 
 701 size_t ShenandoahHeap::min_capacity() const {
 702   return _minimum_size;
 703 }
 704 
 705 size_t ShenandoahHeap::initial_capacity() const {
 706   return _initial_size;
 707 }
 708 
 709 bool ShenandoahHeap::is_in(const void* p) const {
 710   HeapWord* heap_base = (HeapWord*) base();
 711   HeapWord* last_region_end = heap_base + ShenandoahHeapRegion::region_size_words() * num_regions();
 712   return p >= heap_base && p < last_region_end;
 713 }
 714 























 715 void ShenandoahHeap::op_uncommit(double shrink_before, size_t shrink_until) {
 716   assert (ShenandoahUncommit, "should be enabled");
 717 
 718   // Application allocates from the beginning of the heap, and GC allocates at
 719   // the end of it. It is more efficient to uncommit from the end, so that applications
 720   // could enjoy the near committed regions. GC allocations are much less frequent,
 721   // and therefore can accept the committing costs.
 722 
 723   size_t count = 0;
 724   for (size_t i = num_regions(); i > 0; i--) { // care about size_t underflow
 725     ShenandoahHeapRegion* r = get_region(i - 1);
 726     if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
 727       ShenandoahHeapLocker locker(lock());
 728       if (r->is_empty_committed()) {
 729         if (committed() < shrink_until + ShenandoahHeapRegion::region_size_bytes()) {
 730           break;
 731         }
 732 
 733         r->make_uncommitted();
 734         count++;
 735       }
 736     }
 737     SpinPause(); // allow allocators to take the lock
 738   }
 739 
 740   if (count > 0) {
 741     control_thread()->notify_heap_changed();























 742   }
 743 }
 744 




 745 HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) {
 746   // New object should fit the GCLAB size
 747   size_t min_size = MAX2(size, PLAB::min_size());
 748 
 749   // Figure out size of new GCLAB, looking back at heuristics. Expand aggressively.
 750   size_t new_size = ShenandoahThreadLocalData::gclab_size(thread) * 2;






 751   new_size = MIN2(new_size, PLAB::max_size());
 752   new_size = MAX2(new_size, PLAB::min_size());
 753 
 754   // Record new heuristic value even if we take any shortcut. This captures
 755   // the case when moderately-sized objects always take a shortcut. At some point,
 756   // heuristics should catch up with them.
 757   ShenandoahThreadLocalData::set_gclab_size(thread, new_size);
 758 
 759   if (new_size < size) {
 760     // New size still does not fit the object. Fall back to shared allocation.
 761     // This avoids retiring perfectly good GCLABs, when we encounter a large object.
 762     return NULL;
 763   }
 764 
 765   // Retire current GCLAB, and allocate a new one.
 766   PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
 767   gclab->retire();
 768 
 769   size_t actual_size = 0;
 770   HeapWord* gclab_buf = allocate_new_gclab(min_size, new_size, &actual_size);

 774 
 775   assert (size <= actual_size, "allocation should fit");
 776 
 777   if (ZeroTLAB) {
 778     // ..and clear it.
 779     Copy::zero_to_words(gclab_buf, actual_size);
 780   } else {
 781     // ...and zap just allocated object.
 782 #ifdef ASSERT
 783     // Skip mangling the space corresponding to the object header to
 784     // ensure that the returned space is not considered parsable by
 785     // any concurrent GC thread.
 786     size_t hdr_size = oopDesc::header_size();
 787     Copy::fill_to_words(gclab_buf + hdr_size, actual_size - hdr_size, badHeapWordVal);
 788 #endif // ASSERT
 789   }
 790   gclab->set_buf(gclab_buf, actual_size);
 791   return gclab->allocate(size);
 792 }
 793 










































































































































































 794 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t min_size,
 795                                             size_t requested_size,
 796                                             size_t* actual_size) {
 797   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_tlab(min_size, requested_size);
 798   HeapWord* res = allocate_memory(req);
 799   if (res != NULL) {
 800     *actual_size = req.actual_size();
 801   } else {
 802     *actual_size = 0;
 803   }
 804   return res;
 805 }
 806 
 807 HeapWord* ShenandoahHeap::allocate_new_gclab(size_t min_size,
 808                                              size_t word_size,
 809                                              size_t* actual_size) {
 810   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_gclab(min_size, word_size);
 811   HeapWord* res = allocate_memory(req);
 812   if (res != NULL) {
 813     *actual_size = req.actual_size();
 814   } else {
 815     *actual_size = 0;
 816   }
 817   return res;
 818 }
 819 
 820 HeapWord* ShenandoahHeap::allocate_memory(ShenandoahAllocRequest& req) {

















 821   intptr_t pacer_epoch = 0;
 822   bool in_new_region = false;
 823   HeapWord* result = NULL;
 824 
 825   if (req.is_mutator_alloc()) {
 826     if (ShenandoahPacing) {
 827       pacer()->pace_for_alloc(req.size());
 828       pacer_epoch = pacer()->epoch();
 829     }
 830 
 831     if (!ShenandoahAllocFailureALot || !should_inject_alloc_failure()) {
 832       result = allocate_memory_under_lock(req, in_new_region);
 833     }
 834 
 835     // Allocation failed, block until control thread reacted, then retry allocation.
 836     //
 837     // It might happen that one of the threads requesting allocation would unblock
 838     // way later after GC happened, only to fail the second allocation, because
 839     // other threads have already depleted the free storage. In this case, a better
 840     // strategy is to try again, as long as GC makes progress.
 841     //
 842     // Then, we need to make sure the allocation was retried after at least one
 843     // Full GC, which means we want to try more than ShenandoahFullGCThreshold times.
 844 
 845     size_t tries = 0;
 846 
 847     while (result == NULL && _progress_last_gc.is_set()) {
 848       tries++;
 849       control_thread()->handle_alloc_failure(req);
 850       result = allocate_memory_under_lock(req, in_new_region);
 851     }
 852 
 853     while (result == NULL && tries <= ShenandoahFullGCThreshold) {
 854       tries++;
 855       control_thread()->handle_alloc_failure(req);
 856       result = allocate_memory_under_lock(req, in_new_region);
 857     }
 858 
 859   } else {
 860     assert(req.is_gc_alloc(), "Can only accept GC allocs here");
 861     result = allocate_memory_under_lock(req, in_new_region);
 862     // Do not call handle_alloc_failure() here, because we cannot block.
 863     // The allocation failure would be handled by the LRB slowpath with handle_alloc_failure_evac().
 864   }
 865 
 866   if (in_new_region) {
 867     control_thread()->notify_heap_changed();

 868   }
 869 
 870   if (result != NULL) {

 871     size_t requested = req.size();
 872     size_t actual = req.actual_size();

 873 
 874     assert (req.is_lab_alloc() || (requested == actual),
 875             "Only LAB allocations are elastic: %s, requested = " SIZE_FORMAT ", actual = " SIZE_FORMAT,
 876             ShenandoahAllocRequest::alloc_type_to_string(req.type()), requested, actual);
 877 
 878     if (req.is_mutator_alloc()) {
 879       notify_mutator_alloc_words(actual, false);

 880 
 881       // If we requested more than we were granted, give the rest back to pacer.
 882       // This only matters if we are in the same pacing epoch: do not try to unpace
 883       // over the budget for the other phase.
 884       if (ShenandoahPacing && (pacer_epoch > 0) && (requested > actual)) {
 885         pacer()->unpace_for_alloc(pacer_epoch, requested - actual);
 886       }
 887     } else {
 888       increase_used(actual*HeapWordSize);
 889     }
 890   }
 891 
 892   return result;
 893 }
 894 
 895 HeapWord* ShenandoahHeap::allocate_memory_under_lock(ShenandoahAllocRequest& req, bool& in_new_region) {






 896   ShenandoahHeapLocker locker(lock());
 897   return _free_set->allocate(req, in_new_region);






































































































 898 }
 899 
 900 HeapWord* ShenandoahHeap::mem_allocate(size_t size,
 901                                         bool*  gc_overhead_limit_was_exceeded) {
 902   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared(size);
 903   return allocate_memory(req);
 904 }
 905 
 906 MetaWord* ShenandoahHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
 907                                                              size_t size,
 908                                                              Metaspace::MetadataType mdtype) {
 909   MetaWord* result;
 910 
 911   // Inform metaspace OOM to GC heuristics if class unloading is possible.
 912   if (heuristics()->can_unload_classes()) {
 913     ShenandoahHeuristics* h = heuristics();
 914     h->record_metaspace_oom();
 915   }
 916 
 917   // Expand and retry allocation
 918   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
 919   if (result != NULL) {
 920     return result;
 921   }
 922 
 923   // Start full GC
 924   collect(GCCause::_metadata_GC_clear_soft_refs);
 925 
 926   // Retry allocation
 927   result = loader_data->metaspace_non_null()->allocate(size, mdtype);
 928   if (result != NULL) {
 929     return result;
 930   }
 931 
 932   // Expand and retry allocation
 933   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);

 972 
 973   void work(uint worker_id) {
 974     if (_concurrent) {
 975       ShenandoahConcurrentWorkerSession worker_session(worker_id);
 976       ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
 977       ShenandoahEvacOOMScope oom_evac_scope;
 978       do_work();
 979     } else {
 980       ShenandoahParallelWorkerSession worker_session(worker_id);
 981       ShenandoahEvacOOMScope oom_evac_scope;
 982       do_work();
 983     }
 984   }
 985 
 986 private:
 987   void do_work() {
 988     ShenandoahConcurrentEvacuateRegionObjectClosure cl(_sh);
 989     ShenandoahHeapRegion* r;
 990     while ((r =_cs->claim_next()) != NULL) {
 991       assert(r->has_live(), "Region " SIZE_FORMAT " should have been reclaimed early", r->index());

 992       _sh->marked_object_iterate(r, &cl);
 993 
 994       if (ShenandoahPacing) {
 995         _sh->pacer()->report_evac(r->used() >> LogHeapWordSize);
 996       }




























































 997 
 998       if (_sh->check_cancelled_gc_and_yield(_concurrent)) {
 999         break;
1000       }
1001     }
1002   }
1003 };
1004 
1005 void ShenandoahHeap::evacuate_collection_set(bool concurrent) {
1006   ShenandoahEvacuationTask task(this, _collection_set, concurrent);
1007   workers()->run_task(&task);






1008 }
1009 
1010 void ShenandoahHeap::trash_cset_regions() {
1011   ShenandoahHeapLocker locker(lock());
1012 
1013   ShenandoahCollectionSet* set = collection_set();
1014   ShenandoahHeapRegion* r;
1015   set->clear_current_index();
1016   while ((r = set->next()) != NULL) {
1017     r->make_trash();
1018   }
1019   collection_set()->clear();
1020 }
1021 
1022 void ShenandoahHeap::print_heap_regions_on(outputStream* st) const {
1023   st->print_cr("Heap Regions:");
1024   st->print_cr("EU=empty-uncommitted, EC=empty-committed, R=regular, H=humongous start, HC=humongous continuation, CS=collection set, T=trash, P=pinned");
1025   st->print_cr("BTE=bottom/top/end, U=used, T=TLAB allocs, G=GCLAB allocs, S=shared allocs, L=live data");
1026   st->print_cr("R=root, CP=critical pins, TAMS=top-at-mark-start, UWM=update watermark");
1027   st->print_cr("SN=alloc sequence number");
1028 
1029   for (size_t i = 0; i < num_regions(); i++) {
1030     get_region(i)->print_on(st);
1031   }
1032 }
1033 
1034 void ShenandoahHeap::trash_humongous_region_at(ShenandoahHeapRegion* start) {
1035   assert(start->is_humongous_start(), "reclaim regions starting with the first one");
1036 
1037   oop humongous_obj = cast_to_oop(start->bottom());
1038   size_t size = humongous_obj->size();
1039   size_t required_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize);
1040   size_t index = start->index() + required_regions - 1;
1041 
1042   assert(!start->has_live(), "liveness must be zero");
1043 
1044   for(size_t i = 0; i < required_regions; i++) {
1045     // Reclaim from tail. Otherwise, assertion fails when printing region to trace log,
1046     // as it expects that every region belongs to a humongous region starting with a humongous start region.
1047     ShenandoahHeapRegion* region = get_region(index --);
1048 
1049     assert(region->is_humongous(), "expect correct humongous start or continuation");
1050     assert(!region->is_cset(), "Humongous region should not be in collection set");
1051 
1052     region->make_trash_immediate();
1053   }

1054 }
1055 
1056 class ShenandoahCheckCleanGCLABClosure : public ThreadClosure {
1057 public:
1058   ShenandoahCheckCleanGCLABClosure() {}
1059   void do_thread(Thread* thread) {
1060     PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1061     assert(gclab != NULL, "GCLAB should be initialized for %s", thread->name());
1062     assert(gclab->words_remaining() == 0, "GCLAB should not need retirement");




1063   }
1064 };
1065 
1066 class ShenandoahRetireGCLABClosure : public ThreadClosure {
1067 private:
1068   bool const _resize;
1069 public:
1070   ShenandoahRetireGCLABClosure(bool resize) : _resize(resize) {}
1071   void do_thread(Thread* thread) {
1072     PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1073     assert(gclab != NULL, "GCLAB should be initialized for %s", thread->name());
1074     gclab->retire();
1075     if (_resize && ShenandoahThreadLocalData::gclab_size(thread) > 0) {
1076       ShenandoahThreadLocalData::set_gclab_size(thread, 0);
1077     }











1078   }
1079 };
1080 
1081 void ShenandoahHeap::labs_make_parsable() {
1082   assert(UseTLAB, "Only call with UseTLAB");
1083 
1084   ShenandoahRetireGCLABClosure cl(false);
1085 
1086   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1087     ThreadLocalAllocBuffer& tlab = t->tlab();
1088     tlab.make_parsable();
1089     cl.do_thread(t);
1090   }
1091 
1092   workers()->threads_do(&cl);
1093 }
1094 
1095 void ShenandoahHeap::tlabs_retire(bool resize) {
1096   assert(UseTLAB, "Only call with UseTLAB");
1097   assert(!resize || ResizeTLAB, "Only call for resize when ResizeTLAB is enabled");

1115   }
1116   workers()->threads_do(&cl);
1117 #endif
1118 }
1119 
1120 void ShenandoahHeap::gclabs_retire(bool resize) {
1121   assert(UseTLAB, "Only call with UseTLAB");
1122   assert(!resize || ResizeTLAB, "Only call for resize when ResizeTLAB is enabled");
1123 
1124   ShenandoahRetireGCLABClosure cl(resize);
1125   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1126     cl.do_thread(t);
1127   }
1128   workers()->threads_do(&cl);
1129 
1130   if (safepoint_workers() != NULL) {
1131     safepoint_workers()->threads_do(&cl);
1132   }
1133 }
1134 































1135 // Returns size in bytes
1136 size_t ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const {
1137   if (ShenandoahElasticTLAB) {
1138     // With Elastic TLABs, return the max allowed size, and let the allocation path
1139     // figure out the safe size for current allocation.
1140     return ShenandoahHeapRegion::max_tlab_size_bytes();
1141   } else {
1142     return MIN2(_free_set->unsafe_peek_free(), ShenandoahHeapRegion::max_tlab_size_bytes());
1143   }
1144 }
1145 
1146 size_t ShenandoahHeap::max_tlab_size() const {
1147   // Returns size in words
1148   return ShenandoahHeapRegion::max_tlab_size_words();
1149 }
1150 
1151 void ShenandoahHeap::collect(GCCause::Cause cause) {
1152   control_thread()->request_gc(cause);
1153 }
1154 

1511       if (start >= max) break;
1512 
1513       for (size_t i = cur; i < end; i++) {
1514         ShenandoahHeapRegion* current = _heap->get_region(i);
1515         _blk->heap_region_do(current);
1516       }
1517     }
1518   }
1519 };
1520 
1521 void ShenandoahHeap::parallel_heap_region_iterate(ShenandoahHeapRegionClosure* blk) const {
1522   assert(blk->is_thread_safe(), "Only thread-safe closures here");
1523   if (num_regions() > ShenandoahParallelRegionStride) {
1524     ShenandoahParallelHeapRegionTask task(blk);
1525     workers()->run_task(&task);
1526   } else {
1527     heap_region_iterate(blk);
1528   }
1529 }
1530 
1531 class ShenandoahInitMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
1532 private:
1533   ShenandoahMarkingContext* const _ctx;
1534 public:
1535   ShenandoahInitMarkUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
1536 
1537   void heap_region_do(ShenandoahHeapRegion* r) {
1538     assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index());
1539     if (r->is_active()) {
1540       // Check if region needs updating its TAMS. We have updated it already during concurrent
1541       // reset, so it is very likely we don't need to do another write here.
1542       if (_ctx->top_at_mark_start(r) != r->top()) {
1543         _ctx->capture_top_at_mark_start(r);
1544       }
1545     } else {
1546       assert(_ctx->top_at_mark_start(r) == r->top(),
1547              "Region " SIZE_FORMAT " should already have correct TAMS", r->index());
1548     }
1549   }
1550 
1551   bool is_thread_safe() { return true; }
1552 };
1553 
1554 class ShenandoahRendezvousClosure : public HandshakeClosure {
1555 public:
1556   inline ShenandoahRendezvousClosure() : HandshakeClosure("ShenandoahRendezvous") {}
1557   inline void do_thread(Thread* thread) {}
1558 };
1559 
1560 void ShenandoahHeap::rendezvous_threads() {
1561   ShenandoahRendezvousClosure cl;
1562   Handshake::execute(&cl);
1563 }
1564 
1565 void ShenandoahHeap::recycle_trash() {
1566   free_set()->recycle_trash();
1567 }
1568 
1569 class ShenandoahResetUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
1570 private:
1571   ShenandoahMarkingContext* const _ctx;
1572 public:
1573   ShenandoahResetUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
1574 
1575   void heap_region_do(ShenandoahHeapRegion* r) {
1576     if (r->is_active()) {
1577       // Reset live data and set TAMS optimistically. We would recheck these under the pause
1578       // anyway to capture any updates that happened since now.
1579       r->clear_live_data();
1580       _ctx->capture_top_at_mark_start(r);
1581     }
1582   }
1583 
1584   bool is_thread_safe() { return true; }
1585 };
1586 
1587 void ShenandoahHeap::prepare_gc() {
1588   reset_mark_bitmap();
1589 
1590   ShenandoahResetUpdateRegionStateClosure cl;
1591   parallel_heap_region_iterate(&cl);
1592 }
1593 
1594 class ShenandoahFinalMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
1595 private:
1596   ShenandoahMarkingContext* const _ctx;
1597   ShenandoahHeapLock* const _lock;
1598 
1599 public:
1600   ShenandoahFinalMarkUpdateRegionStateClosure() :
1601     _ctx(ShenandoahHeap::heap()->complete_marking_context()), _lock(ShenandoahHeap::heap()->lock()) {}
1602 
1603   void heap_region_do(ShenandoahHeapRegion* r) {
1604     if (r->is_active()) {
1605       // All allocations past TAMS are implicitly live, adjust the region data.
1606       // Bitmaps/TAMS are swapped at this point, so we need to poll complete bitmap.
1607       HeapWord *tams = _ctx->top_at_mark_start(r);
1608       HeapWord *top = r->top();
1609       if (top > tams) {
1610         r->increase_live_data_alloc_words(pointer_delta(top, tams));
1611       }
1612 
1613       // We are about to select the collection set, make sure it knows about
1614       // current pinning status. Also, this allows trashing more regions that
1615       // now have their pinning status dropped.
1616       if (r->is_pinned()) {
1617         if (r->pin_count() == 0) {
1618           ShenandoahHeapLocker locker(_lock);
1619           r->make_unpinned();
1620         }
1621       } else {
1622         if (r->pin_count() > 0) {
1623           ShenandoahHeapLocker locker(_lock);
1624           r->make_pinned();
1625         }
1626       }
1627 
1628       // Remember limit for updating refs. It's guaranteed that we get no
1629       // from-space-refs written from here on.
1630       r->set_update_watermark_at_safepoint(r->top());
1631     } else {
1632       assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index());
1633       assert(_ctx->top_at_mark_start(r) == r->top(),
1634              "Region " SIZE_FORMAT " should have correct TAMS", r->index());
1635     }
1636   }
1637 
1638   bool is_thread_safe() { return true; }
1639 };
1640 
1641 void ShenandoahHeap::prepare_regions_and_collection_set(bool concurrent) {
1642   assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
1643   {
1644     ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::final_update_region_states :
1645                                          ShenandoahPhaseTimings::degen_gc_final_update_region_states);
1646     ShenandoahFinalMarkUpdateRegionStateClosure cl;
1647     parallel_heap_region_iterate(&cl);
1648 
1649     assert_pinned_region_status();
1650   }
1651 
1652   {
1653     ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::choose_cset :
1654                                          ShenandoahPhaseTimings::degen_gc_choose_cset);
1655     ShenandoahHeapLocker locker(lock());
1656     _collection_set->clear();
1657     heuristics()->choose_collection_set(_collection_set);
1658   }
1659 
1660   {
1661     ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::final_rebuild_freeset :
1662                                          ShenandoahPhaseTimings::degen_gc_final_rebuild_freeset);
1663     ShenandoahHeapLocker locker(lock());
1664     _free_set->rebuild();
1665   }
1666 }
1667 
1668 void ShenandoahHeap::do_class_unloading() {
1669   _unloader.unload();
1670 }
1671 
1672 void ShenandoahHeap::stw_weak_refs(bool full_gc) {
1673   // Weak refs processing
1674   ShenandoahPhaseTimings::Phase phase = full_gc ? ShenandoahPhaseTimings::full_gc_weakrefs
1675                                                 : ShenandoahPhaseTimings::degen_gc_weakrefs;
1676   ShenandoahTimingsTracker t(phase);
1677   ShenandoahGCWorkerPhase worker_phase(phase);
1678   ref_processor()->process_references(phase, workers(), false /* concurrent */);
1679 }
1680 
1681 void ShenandoahHeap::prepare_update_heap_references(bool concurrent) {
1682   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
1683 
1684   // Evacuation is over, no GCLABs are needed anymore. GCLABs are under URWM, so we need to
1685   // make them parsable for update code to work correctly. Plus, we can compute new sizes
1686   // for future GCLABs here.
1687   if (UseTLAB) {
1688     ShenandoahGCPhase phase(concurrent ?
1689                             ShenandoahPhaseTimings::init_update_refs_manage_gclabs :
1690                             ShenandoahPhaseTimings::degen_gc_init_update_refs_manage_gclabs);
1691     gclabs_retire(ResizeTLAB);
1692   }
1693 
1694   _update_refs_iterator.reset();
1695 }
1696 
1697 void ShenandoahHeap::set_gc_state_all_threads(char state) {
1698   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1699     ShenandoahThreadLocalData::set_gc_state(t, state);
1700   }
1701 }
1702 
1703 void ShenandoahHeap::set_gc_state_mask(uint mask, bool value) {
1704   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should really be Shenandoah safepoint");
1705   _gc_state.set_cond(mask, value);
1706   set_gc_state_all_threads(_gc_state.raw_value());
1707 }
1708 
1709 void ShenandoahHeap::set_concurrent_mark_in_progress(bool in_progress) {
1710   assert(!has_forwarded_objects(), "Not expected before/after mark phase");
1711   set_gc_state_mask(MARKING, in_progress);
1712   ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);








































1713 }
1714 
1715 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
1716   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only call this at safepoint");
1717   set_gc_state_mask(EVACUATION, in_progress);
1718 }
1719 
1720 void ShenandoahHeap::set_concurrent_strong_root_in_progress(bool in_progress) {
1721   if (in_progress) {
1722     _concurrent_strong_root_in_progress.set();
1723   } else {
1724     _concurrent_strong_root_in_progress.unset();
1725   }
1726 }
1727 
1728 void ShenandoahHeap::set_concurrent_weak_root_in_progress(bool cond) {
1729   set_gc_state_mask(WEAK_ROOTS, cond);
1730 }
1731 
1732 GCTracer* ShenandoahHeap::tracer() {

1737   return _free_set->used();
1738 }
1739 
1740 bool ShenandoahHeap::try_cancel_gc() {
1741   while (true) {
1742     jbyte prev = _cancelled_gc.cmpxchg(CANCELLED, CANCELLABLE);
1743     if (prev == CANCELLABLE) return true;
1744     else if (prev == CANCELLED) return false;
1745     assert(ShenandoahSuspendibleWorkers, "should not get here when not using suspendible workers");
1746     assert(prev == NOT_CANCELLED, "must be NOT_CANCELLED");
1747     Thread* thread = Thread::current();
1748     if (thread->is_Java_thread()) {
1749       // We need to provide a safepoint here, otherwise we might
1750       // spin forever if a SP is pending.
1751       ThreadBlockInVM sp(JavaThread::cast(thread));
1752       SpinPause();
1753     }
1754   }
1755 }
1756 








1757 void ShenandoahHeap::cancel_gc(GCCause::Cause cause) {
1758   if (try_cancel_gc()) {
1759     FormatBuffer<> msg("Cancelling GC: %s", GCCause::to_string(cause));
1760     log_info(gc)("%s", msg.buffer());
1761     Events::log(Thread::current(), "%s", msg.buffer());




1762   }
1763 }
1764 
1765 uint ShenandoahHeap::max_workers() {
1766   return _max_workers;
1767 }
1768 
1769 void ShenandoahHeap::stop() {
1770   // The shutdown sequence should be able to terminate when GC is running.
1771 



1772   // Step 0. Notify policy to disable event recording.
1773   _shenandoah_policy->record_shutdown();
1774 
1775   // Step 1. Notify control thread that we are in shutdown.
1776   // Note that we cannot do that with stop(), because stop() is blocking and waits for the actual shutdown.
1777   // Doing stop() here would wait for the normal GC cycle to complete, never falling through to cancel below.
1778   control_thread()->prepare_for_graceful_shutdown();
1779 
1780   // Step 2. Notify GC workers that we are cancelling GC.
1781   cancel_gc(GCCause::_shenandoah_stop_vm);
1782 
1783   // Step 3. Wait until GC worker exits normally.
1784   control_thread()->stop();
1785 }
1786 
1787 void ShenandoahHeap::stw_unload_classes(bool full_gc) {
1788   if (!unload_classes()) return;
1789   // Unload classes and purge SystemDictionary.
1790   {
1791     ShenandoahPhaseTimings::Phase phase = full_gc ?

1857 }
1858 
1859 void ShenandoahHeap::set_has_forwarded_objects(bool cond) {
1860   set_gc_state_mask(HAS_FORWARDED, cond);
1861 }
1862 
1863 void ShenandoahHeap::set_unload_classes(bool uc) {
1864   _unload_classes.set_cond(uc);
1865 }
1866 
1867 bool ShenandoahHeap::unload_classes() const {
1868   return _unload_classes.is_set();
1869 }
1870 
1871 address ShenandoahHeap::in_cset_fast_test_addr() {
1872   ShenandoahHeap* heap = ShenandoahHeap::heap();
1873   assert(heap->collection_set() != NULL, "Sanity");
1874   return (address) heap->collection_set()->biased_map_address();
1875 }
1876 
1877 address ShenandoahHeap::cancelled_gc_addr() {
1878   return (address) ShenandoahHeap::heap()->_cancelled_gc.addr_of();
1879 }
1880 
1881 address ShenandoahHeap::gc_state_addr() {
1882   return (address) ShenandoahHeap::heap()->_gc_state.addr_of();
1883 }
1884 
1885 size_t ShenandoahHeap::bytes_allocated_since_gc_start() {
1886   return Atomic::load(&_bytes_allocated_since_gc_start);
1887 }
1888 
1889 void ShenandoahHeap::reset_bytes_allocated_since_gc_start() {
1890   Atomic::store(&_bytes_allocated_since_gc_start, (size_t)0);





1891 }
1892 
1893 void ShenandoahHeap::set_degenerated_gc_in_progress(bool in_progress) {
1894   _degenerated_gc_in_progress.set_cond(in_progress);
1895 }
1896 
1897 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) {
1898   _full_gc_in_progress.set_cond(in_progress);
1899 }
1900 
1901 void ShenandoahHeap::set_full_gc_move_in_progress(bool in_progress) {
1902   assert (is_full_gc_in_progress(), "should be");
1903   _full_gc_move_in_progress.set_cond(in_progress);
1904 }
1905 
1906 void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) {
1907   set_gc_state_mask(UPDATEREFS, in_progress);
1908 }
1909 
1910 void ShenandoahHeap::register_nmethod(nmethod* nm) {

1939     if (r->is_active()) {
1940       if (r->is_pinned()) {
1941         if (r->pin_count() == 0) {
1942           r->make_unpinned();
1943         }
1944       } else {
1945         if (r->pin_count() > 0) {
1946           r->make_pinned();
1947         }
1948       }
1949     }
1950   }
1951 
1952   assert_pinned_region_status();
1953 }
1954 
1955 #ifdef ASSERT
1956 void ShenandoahHeap::assert_pinned_region_status() {
1957   for (size_t i = 0; i < num_regions(); i++) {
1958     ShenandoahHeapRegion* r = get_region(i);
1959     assert((r->is_pinned() && r->pin_count() > 0) || (!r->is_pinned() && r->pin_count() == 0),
1960            "Region " SIZE_FORMAT " pinning status is inconsistent", i);


1961   }
1962 }
1963 #endif
1964 
1965 ConcurrentGCTimer* ShenandoahHeap::gc_timer() const {
1966   return _gc_timer;
1967 }
1968 
1969 void ShenandoahHeap::prepare_concurrent_roots() {
1970   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
1971   assert(!is_stw_gc_in_progress(), "Only concurrent GC");
1972   set_concurrent_strong_root_in_progress(!collection_set()->is_empty());
1973   set_concurrent_weak_root_in_progress(true);
1974   if (unload_classes()) {
1975     _unloader.prepare();
1976   }
1977 }
1978 
1979 void ShenandoahHeap::finish_concurrent_roots() {
1980   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");

2000       assert(nworkers <= ConcGCThreads, "Cannot use more than it has");
2001     } else {
2002       // Use ConcGCThreads outside safepoints
2003       assert(nworkers == ConcGCThreads, "Use ConcGCThreads outside safepoints");
2004     }
2005   }
2006 }
2007 #endif
2008 
2009 ShenandoahVerifier* ShenandoahHeap::verifier() {
2010   guarantee(ShenandoahVerify, "Should be enabled");
2011   assert (_verifier != NULL, "sanity");
2012   return _verifier;
2013 }
2014 
2015 template<bool CONCURRENT>
2016 class ShenandoahUpdateHeapRefsTask : public WorkerTask {
2017 private:
2018   ShenandoahHeap* _heap;
2019   ShenandoahRegionIterator* _regions;


2020 public:
2021   ShenandoahUpdateHeapRefsTask(ShenandoahRegionIterator* regions) :

2022     WorkerTask("Shenandoah Update References"),
2023     _heap(ShenandoahHeap::heap()),
2024     _regions(regions) {


2025   }
2026 
2027   void work(uint worker_id) {
2028     if (CONCURRENT) {
2029       ShenandoahConcurrentWorkerSession worker_session(worker_id);
2030       ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
2031       do_work<ShenandoahConcUpdateRefsClosure>();
2032     } else {
2033       ShenandoahParallelWorkerSession worker_session(worker_id);
2034       do_work<ShenandoahSTWUpdateRefsClosure>();
2035     }
2036   }
2037 
2038 private:
2039   template<class T>
2040   void do_work() {
2041     T cl;
2042     ShenandoahHeapRegion* r = _regions->next();
2043     ShenandoahMarkingContext* const ctx = _heap->complete_marking_context();



2044     while (r != NULL) {
2045       HeapWord* update_watermark = r->get_update_watermark();
2046       assert (update_watermark >= r->bottom(), "sanity");



2047       if (r->is_active() && !r->is_cset()) {
2048         _heap->marked_object_oop_iterate(r, &cl, update_watermark);






























2049       }
2050       if (ShenandoahPacing) {
2051         _heap->pacer()->report_updaterefs(pointer_delta(update_watermark, r->bottom()));
2052       }
2053       if (_heap->check_cancelled_gc_and_yield(CONCURRENT)) {
2054         return;
2055       }
2056       r = _regions->next();
2057     }























































































































2058   }
2059 };
2060 
2061 void ShenandoahHeap::update_heap_references(bool concurrent) {
2062   assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");

2063 
2064   if (concurrent) {
2065     ShenandoahUpdateHeapRefsTask<true> task(&_update_refs_iterator);
2066     workers()->run_task(&task);
2067   } else {
2068     ShenandoahUpdateHeapRefsTask<false> task(&_update_refs_iterator);
2069     workers()->run_task(&task);
2070   }
2071 }
2072 
2073 
2074 class ShenandoahFinalUpdateRefsUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
2075 private:

2076   ShenandoahHeapLock* const _lock;

2077 
2078 public:
2079   ShenandoahFinalUpdateRefsUpdateRegionStateClosure() : _lock(ShenandoahHeap::heap()->lock()) {}


2080 
2081   void heap_region_do(ShenandoahHeapRegion* r) {





















2082     // Drop unnecessary "pinned" state from regions that does not have CP marks
2083     // anymore, as this would allow trashing them.
2084 
2085     if (r->is_active()) {
2086       if (r->is_pinned()) {
2087         if (r->pin_count() == 0) {
2088           ShenandoahHeapLocker locker(_lock);
2089           r->make_unpinned();
2090         }
2091       } else {
2092         if (r->pin_count() > 0) {
2093           ShenandoahHeapLocker locker(_lock);
2094           r->make_pinned();
2095         }
2096       }
2097     }
2098   }
2099 
2100   bool is_thread_safe() { return true; }
2101 };
2102 
2103 void ShenandoahHeap::update_heap_region_states(bool concurrent) {
2104   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2105   assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
2106 
2107   {
2108     ShenandoahGCPhase phase(concurrent ?
2109                             ShenandoahPhaseTimings::final_update_refs_update_region_states :
2110                             ShenandoahPhaseTimings::degen_gc_final_update_refs_update_region_states);
2111     ShenandoahFinalUpdateRefsUpdateRegionStateClosure cl;
2112     parallel_heap_region_iterate(&cl);
2113 
2114     assert_pinned_region_status();
2115   }
2116 
2117   {
2118     ShenandoahGCPhase phase(concurrent ?
2119                             ShenandoahPhaseTimings::final_update_refs_trash_cset :
2120                             ShenandoahPhaseTimings::degen_gc_final_update_refs_trash_cset);
2121     trash_cset_regions();
2122   }
2123 }
2124 
2125 void ShenandoahHeap::rebuild_free_set(bool concurrent) {
2126   {
2127     ShenandoahGCPhase phase(concurrent ?
2128                             ShenandoahPhaseTimings::final_update_refs_rebuild_freeset :
2129                             ShenandoahPhaseTimings::degen_gc_final_update_refs_rebuild_freeset);
2130     ShenandoahHeapLocker locker(lock());
2131     _free_set->rebuild();

2225   EventMark em("%s", msg);
2226 
2227   op_uncommit(shrink_before, shrink_until);
2228 }
2229 
2230 void ShenandoahHeap::try_inject_alloc_failure() {
2231   if (ShenandoahAllocFailureALot && !cancelled_gc() && ((os::random() % 1000) > 950)) {
2232     _inject_alloc_failure.set();
2233     os::naked_short_sleep(1);
2234     if (cancelled_gc()) {
2235       log_info(gc)("Allocation failure was successfully injected");
2236     }
2237   }
2238 }
2239 
2240 bool ShenandoahHeap::should_inject_alloc_failure() {
2241   return _inject_alloc_failure.is_set() && _inject_alloc_failure.try_unset();
2242 }
2243 
2244 void ShenandoahHeap::initialize_serviceability() {
2245   _memory_pool = new ShenandoahMemoryPool(this);
2246   _cycle_memory_manager.add_pool(_memory_pool);
2247   _stw_memory_manager.add_pool(_memory_pool);









2248 }
2249 
2250 GrowableArray<GCMemoryManager*> ShenandoahHeap::memory_managers() {
2251   GrowableArray<GCMemoryManager*> memory_managers(2);
2252   memory_managers.append(&_cycle_memory_manager);
2253   memory_managers.append(&_stw_memory_manager);
2254   return memory_managers;
2255 }
2256 
2257 GrowableArray<MemoryPool*> ShenandoahHeap::memory_pools() {
2258   GrowableArray<MemoryPool*> memory_pools(1);
2259   memory_pools.append(_memory_pool);





2260   return memory_pools;
2261 }
2262 
2263 MemoryUsage ShenandoahHeap::memory_usage() {
2264   return _memory_pool->get_memory_usage();
2265 }
2266 
2267 ShenandoahRegionIterator::ShenandoahRegionIterator() :
2268   _heap(ShenandoahHeap::heap()),
2269   _index(0) {}
2270 
2271 ShenandoahRegionIterator::ShenandoahRegionIterator(ShenandoahHeap* heap) :
2272   _heap(heap),
2273   _index(0) {}
2274 
2275 void ShenandoahRegionIterator::reset() {
2276   _index = 0;
2277 }
2278 
2279 bool ShenandoahRegionIterator::has_next() const {
2280   return _index < _heap->num_regions();
2281 }
2282 
2283 char ShenandoahHeap::gc_state() const {
2284   return _gc_state.raw_value();
2285 }
2286 
2287 ShenandoahLiveData* ShenandoahHeap::get_liveness_cache(uint worker_id) {
2288 #ifdef ASSERT
2289   assert(_liveness_cache != NULL, "sanity");
2290   assert(worker_id < _max_workers, "sanity");
2291   for (uint i = 0; i < num_regions(); i++) {
2292     assert(_liveness_cache[worker_id][i] == 0, "liveness cache should be empty");
2293   }
2294 #endif
2295   return _liveness_cache[worker_id];
2296 }
2297 
2298 void ShenandoahHeap::flush_liveness_cache(uint worker_id) {
2299   assert(worker_id < _max_workers, "sanity");
2300   assert(_liveness_cache != NULL, "sanity");
2301   ShenandoahLiveData* ld = _liveness_cache[worker_id];

2302   for (uint i = 0; i < num_regions(); i++) {
2303     ShenandoahLiveData live = ld[i];
2304     if (live > 0) {
2305       ShenandoahHeapRegion* r = get_region(i);
2306       r->increase_live_data_gc_words(live);
2307       ld[i] = 0;
2308     }
2309   }
2310 }
2311 
2312 bool ShenandoahHeap::requires_barriers(stackChunkOop obj) const {
2313   if (is_idle()) return false;
2314 
2315   // Objects allocated after marking start are implicitly alive, don't need any barriers during
2316   // marking phase.
2317   if (is_concurrent_mark_in_progress() &&
2318      !marking_context()->allocated_after_mark_start(obj)) {
2319     return true;
2320   }
2321 
2322   // Can not guarantee obj is deeply good.
2323   if (has_forwarded_objects()) {
2324     return true;
2325   }
2326 
2327   return false;
2328 }













































































































































































































  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "memory/allocation.hpp"
  27 #include "memory/universe.hpp"
  28 
  29 #include "gc/shared/gcArguments.hpp"
  30 #include "gc/shared/gcTimer.hpp"
  31 #include "gc/shared/gcTraceTime.inline.hpp"
  32 #include "gc/shared/locationPrinter.inline.hpp"
  33 #include "gc/shared/memAllocator.hpp"
  34 #include "gc/shared/plab.hpp"
  35 #include "gc/shared/tlab_globals.hpp"
  36 
  37 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  38 #include "gc/shenandoah/shenandoahCardTable.hpp"
  39 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
  40 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  41 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  42 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
  43 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  44 #include "gc/shenandoah/shenandoahControlThread.hpp"
  45 #include "gc/shenandoah/shenandoahRegulatorThread.hpp"
  46 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  47 #include "gc/shenandoah/shenandoahGlobalGeneration.hpp"
  48 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  49 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  50 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
  51 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  52 #include "gc/shenandoah/shenandoahInitLogger.hpp"
  53 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  54 #include "gc/shenandoah/shenandoahMemoryPool.hpp"
  55 #include "gc/shenandoah/shenandoahMetrics.hpp"
  56 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  57 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
  58 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  59 #include "gc/shenandoah/shenandoahPacer.inline.hpp"
  60 #include "gc/shenandoah/shenandoahPadding.hpp"
  61 #include "gc/shenandoah/shenandoahParallelCleaning.inline.hpp"
  62 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
  63 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
  64 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
  65 #include "gc/shenandoah/shenandoahStringDedup.hpp"
  66 #include "gc/shenandoah/shenandoahSTWMark.hpp"
  67 #include "gc/shenandoah/shenandoahUtils.hpp"
  68 #include "gc/shenandoah/shenandoahVerifier.hpp"
  69 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
  70 #include "gc/shenandoah/shenandoahVMOperations.hpp"
  71 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
  72 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  73 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
  74 #include "gc/shenandoah/mode/shenandoahGenerationalMode.hpp"
  75 #include "gc/shenandoah/mode/shenandoahIUMode.hpp"
  76 #include "gc/shenandoah/mode/shenandoahPassiveMode.hpp"
  77 #include "gc/shenandoah/mode/shenandoahSATBMode.hpp"
  78 
  79 #if INCLUDE_JFR
  80 #include "gc/shenandoah/shenandoahJfrSupport.hpp"
  81 #endif
  82 
  83 #include "gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp"
  84 
  85 #include "classfile/systemDictionary.hpp"
  86 #include "code/codeCache.hpp"
  87 #include "memory/classLoaderMetaspace.hpp"
  88 #include "memory/metaspaceUtils.hpp"
  89 #include "oops/compressedOops.inline.hpp"
  90 #include "prims/jvmtiTagMap.hpp"
  91 #include "runtime/atomic.hpp"
  92 #include "runtime/globals.hpp"
  93 #include "runtime/interfaceSupport.inline.hpp"
  94 #include "runtime/java.hpp"
  95 #include "runtime/orderAccess.hpp"
  96 #include "runtime/safepointMechanism.hpp"
  97 #include "runtime/vmThread.hpp"
  98 #include "services/mallocTracker.hpp"
  99 #include "services/memTracker.hpp"
 100 #include "utilities/events.hpp"
 101 #include "utilities/powerOfTwo.hpp"
 102 
 103 class ShenandoahPretouchHeapTask : public WorkerTask {
 104 private:

 152 jint ShenandoahHeap::initialize() {
 153   //
 154   // Figure out heap sizing
 155   //
 156 
 157   size_t init_byte_size = InitialHeapSize;
 158   size_t min_byte_size  = MinHeapSize;
 159   size_t max_byte_size  = MaxHeapSize;
 160   size_t heap_alignment = HeapAlignment;
 161 
 162   size_t reg_size_bytes = ShenandoahHeapRegion::region_size_bytes();
 163 
 164   Universe::check_alignment(max_byte_size,  reg_size_bytes, "Shenandoah heap");
 165   Universe::check_alignment(init_byte_size, reg_size_bytes, "Shenandoah heap");
 166 
 167   _num_regions = ShenandoahHeapRegion::region_count();
 168   assert(_num_regions == (max_byte_size / reg_size_bytes),
 169          "Regions should cover entire heap exactly: " SIZE_FORMAT " != " SIZE_FORMAT "/" SIZE_FORMAT,
 170          _num_regions, max_byte_size, reg_size_bytes);
 171 



 172   size_t num_committed_regions = init_byte_size / reg_size_bytes;
 173   num_committed_regions = MIN2(num_committed_regions, _num_regions);
 174   assert(num_committed_regions <= _num_regions, "sanity");
 175   _initial_size = num_committed_regions * reg_size_bytes;
 176 
 177   size_t num_min_regions = min_byte_size / reg_size_bytes;
 178   num_min_regions = MIN2(num_min_regions, _num_regions);
 179   assert(num_min_regions <= _num_regions, "sanity");
 180   _minimum_size = num_min_regions * reg_size_bytes;
 181 
 182   // Default to max heap size.
 183   _soft_max_size = _num_regions * reg_size_bytes;
 184 
 185   _committed = _initial_size;
 186 
 187   // Now we know the number of regions and heap sizes, initialize the heuristics.
 188   initialize_generations();
 189   initialize_heuristics();
 190 
 191   size_t heap_page_size   = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
 192   size_t bitmap_page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
 193   size_t region_page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
 194 
 195   //
 196   // Reserve and commit memory for heap
 197   //
 198 
 199   ReservedHeapSpace heap_rs = Universe::reserve_heap(max_byte_size, heap_alignment);
 200   initialize_reserved_region(heap_rs);
 201   _heap_region = MemRegion((HeapWord*)heap_rs.base(), heap_rs.size() / HeapWordSize);
 202   _heap_region_special = heap_rs.special();
 203 
 204   assert((((size_t) base()) & ShenandoahHeapRegion::region_size_bytes_mask()) == 0,
 205          "Misaligned heap: " PTR_FORMAT, p2i(base()));
 206 
 207 #if SHENANDOAH_OPTIMIZED_MARKTASK
 208   // The optimized ShenandoahMarkTask takes some bits away from the full object bits.
 209   // Fail if we ever attempt to address more than we can.
 210   if ((uintptr_t)heap_rs.end() >= ShenandoahMarkTask::max_addressable()) {
 211     FormatBuffer<512> buf("Shenandoah reserved [" PTR_FORMAT ", " PTR_FORMAT") for the heap, \n"
 212                           "but max object address is " PTR_FORMAT ". Try to reduce heap size, or try other \n"
 213                           "VM options that allocate heap at lower addresses (HeapBaseMinAddress, AllocateHeapAt, etc).",
 214                 p2i(heap_rs.base()), p2i(heap_rs.end()), ShenandoahMarkTask::max_addressable());
 215     vm_exit_during_initialization("Fatal Error", buf);
 216   }
 217 #endif
 218 
 219   ReservedSpace sh_rs = heap_rs.first_part(max_byte_size);
 220   if (!_heap_region_special) {
 221     os::commit_memory_or_exit(sh_rs.base(), _initial_size, heap_alignment, false,
 222                               "Cannot commit heap memory");
 223   }
 224 
 225   BarrierSet::set_barrier_set(new ShenandoahBarrierSet(this, _heap_region));
 226 
 227   //
 228   // After reserving the Java heap, create the card table, barriers, and workers, in dependency order
 229   //
 230   if (mode()->is_generational()) {
 231     ShenandoahDirectCardMarkRememberedSet *rs;
 232     ShenandoahCardTable* card_table = ShenandoahBarrierSet::barrier_set()->card_table();
 233     size_t card_count = card_table->cards_required(heap_rs.size() / HeapWordSize) - 1;
 234     rs = new ShenandoahDirectCardMarkRememberedSet(ShenandoahBarrierSet::barrier_set()->card_table(), card_count);
 235     _card_scan = new ShenandoahScanRemembered<ShenandoahDirectCardMarkRememberedSet>(rs);
 236   }
 237 
 238   _workers = new ShenandoahWorkerThreads("Shenandoah GC Threads", _max_workers);
 239   if (_workers == NULL) {
 240     vm_exit_during_initialization("Failed necessary allocation.");
 241   } else {
 242     _workers->initialize_workers();
 243   }
 244 
 245   if (ParallelGCThreads > 1) {
 246     _safepoint_workers = new ShenandoahWorkerThreads("Safepoint Cleanup Thread", ParallelGCThreads);
 247     _safepoint_workers->initialize_workers();
 248   }
 249 
 250   //
 251   // Reserve and commit memory for bitmap(s)
 252   //
 253 
 254   _bitmap_size = ShenandoahMarkBitMap::compute_size(heap_rs.size());
 255   _bitmap_size = align_up(_bitmap_size, bitmap_page_size);
 256 
 257   size_t bitmap_bytes_per_region = reg_size_bytes / ShenandoahMarkBitMap::heap_map_factor();
 258 
 259   guarantee(bitmap_bytes_per_region != 0,
 260             "Bitmap bytes per region should not be zero");
 261   guarantee(is_power_of_2(bitmap_bytes_per_region),
 262             "Bitmap bytes per region should be power of two: " SIZE_FORMAT, bitmap_bytes_per_region);
 263 
 264   if (bitmap_page_size > bitmap_bytes_per_region) {
 265     _bitmap_regions_per_slice = bitmap_page_size / bitmap_bytes_per_region;
 266     _bitmap_bytes_per_slice = bitmap_page_size;
 267   } else {
 268     _bitmap_regions_per_slice = 1;
 269     _bitmap_bytes_per_slice = bitmap_bytes_per_region;

 273             "Should have at least one region per slice: " SIZE_FORMAT,
 274             _bitmap_regions_per_slice);
 275 
 276   guarantee(((_bitmap_bytes_per_slice) % bitmap_page_size) == 0,
 277             "Bitmap slices should be page-granular: bps = " SIZE_FORMAT ", page size = " SIZE_FORMAT,
 278             _bitmap_bytes_per_slice, bitmap_page_size);
 279 
 280   ReservedSpace bitmap(_bitmap_size, bitmap_page_size);
 281   MemTracker::record_virtual_memory_type(bitmap.base(), mtGC);
 282   _bitmap_region = MemRegion((HeapWord*) bitmap.base(), bitmap.size() / HeapWordSize);
 283   _bitmap_region_special = bitmap.special();
 284 
 285   size_t bitmap_init_commit = _bitmap_bytes_per_slice *
 286                               align_up(num_committed_regions, _bitmap_regions_per_slice) / _bitmap_regions_per_slice;
 287   bitmap_init_commit = MIN2(_bitmap_size, bitmap_init_commit);
 288   if (!_bitmap_region_special) {
 289     os::commit_memory_or_exit((char *) _bitmap_region.start(), bitmap_init_commit, bitmap_page_size, false,
 290                               "Cannot commit bitmap memory");
 291   }
 292 
 293   _marking_context = new ShenandoahMarkingContext(_heap_region, _bitmap_region, _num_regions);
 294 
 295   if (ShenandoahVerify) {
 296     ReservedSpace verify_bitmap(_bitmap_size, bitmap_page_size);
 297     if (!verify_bitmap.special()) {
 298       os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), bitmap_page_size, false,
 299                                 "Cannot commit verification bitmap memory");
 300     }
 301     MemTracker::record_virtual_memory_type(verify_bitmap.base(), mtGC);
 302     MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize);
 303     _verification_bit_map.initialize(_heap_region, verify_bitmap_region);
 304     _verifier = new ShenandoahVerifier(this, &_verification_bit_map);
 305   }
 306 
 307   // Reserve aux bitmap for use in object_iterate(). We don't commit it here.
 308   ReservedSpace aux_bitmap(_bitmap_size, bitmap_page_size);
 309   MemTracker::record_virtual_memory_type(aux_bitmap.base(), mtGC);
 310   _aux_bitmap_region = MemRegion((HeapWord*) aux_bitmap.base(), aux_bitmap.size() / HeapWordSize);
 311   _aux_bitmap_region_special = aux_bitmap.special();
 312   _aux_bit_map.initialize(_heap_region, _aux_bitmap_region);
 313 

 419   // There should probably be Shenandoah-specific options for these,
 420   // just as there are G1-specific options.
 421   {
 422     ShenandoahSATBMarkQueueSet& satbqs = ShenandoahBarrierSet::satb_mark_queue_set();
 423     satbqs.set_process_completed_buffers_threshold(20); // G1SATBProcessCompletedThreshold
 424     satbqs.set_buffer_enqueue_threshold_percentage(60); // G1SATBBufferEnqueueingThresholdPercent
 425   }
 426 
 427   _monitoring_support = new ShenandoahMonitoringSupport(this);
 428   _phase_timings = new ShenandoahPhaseTimings(max_workers());
 429   ShenandoahCodeRoots::initialize();
 430 
 431   if (ShenandoahPacing) {
 432     _pacer = new ShenandoahPacer(this);
 433     _pacer->setup_for_idle();
 434   } else {
 435     _pacer = NULL;
 436   }
 437 
 438   _control_thread = new ShenandoahControlThread();
 439   _regulator_thread = new ShenandoahRegulatorThread(_control_thread);
 440 
 441   ShenandoahInitLogger::print();
 442 
 443   return JNI_OK;
 444 }
 445 
 446 void ShenandoahHeap::initialize_generations() {
 447   size_t max_capacity_new      = young_generation_capacity(max_capacity());
 448   size_t soft_max_capacity_new = young_generation_capacity(soft_max_capacity());
 449   size_t max_capacity_old      = max_capacity() - max_capacity_new;
 450   size_t soft_max_capacity_old = soft_max_capacity() - soft_max_capacity_new;
 451 
 452   _young_generation = new ShenandoahYoungGeneration(_max_workers, max_capacity_new, soft_max_capacity_new);
 453   _old_generation = new ShenandoahOldGeneration(_max_workers, max_capacity_old, soft_max_capacity_old);
 454   _global_generation = new ShenandoahGlobalGeneration(_max_workers);
 455 }
 456 
 457 void ShenandoahHeap::initialize_heuristics() {
 458   if (ShenandoahGCMode != NULL) {
 459     if (strcmp(ShenandoahGCMode, "satb") == 0) {
 460       _gc_mode = new ShenandoahSATBMode();
 461     } else if (strcmp(ShenandoahGCMode, "iu") == 0) {
 462       _gc_mode = new ShenandoahIUMode();
 463     } else if (strcmp(ShenandoahGCMode, "passive") == 0) {
 464       _gc_mode = new ShenandoahPassiveMode();
 465     } else if (strcmp(ShenandoahGCMode, "generational") == 0) {
 466       _gc_mode = new ShenandoahGenerationalMode();
 467     } else {
 468       vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option");
 469     }
 470   } else {
 471     vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option (null)");
 472   }
 473   _gc_mode->initialize_flags();
 474   if (_gc_mode->is_diagnostic() && !UnlockDiagnosticVMOptions) {
 475     vm_exit_during_initialization(
 476             err_msg("GC mode \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
 477                     _gc_mode->name()));
 478   }
 479   if (_gc_mode->is_experimental() && !UnlockExperimentalVMOptions) {
 480     vm_exit_during_initialization(
 481             err_msg("GC mode \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
 482                     _gc_mode->name()));
 483   }

 484 
 485   _global_generation->initialize_heuristics(_gc_mode);
 486   if (mode()->is_generational()) {
 487     _young_generation->initialize_heuristics(_gc_mode);
 488     _old_generation->initialize_heuristics(_gc_mode);
 489 
 490     ShenandoahEvacWaste = ShenandoahGenerationalEvacWaste;








 491   }
 492 }
 493 
 494 #ifdef _MSC_VER
 495 #pragma warning( push )
 496 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
 497 #endif
 498 
 499 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
 500   CollectedHeap(),
 501   _gc_generation(NULL),
 502   _prepare_for_old_mark(false),
 503   _initial_size(0),
 504   _used(0),
 505   _committed(0),
 506   _max_workers(MAX3(ConcGCThreads, ParallelGCThreads, 1U)),

 507   _workers(NULL),
 508   _safepoint_workers(NULL),
 509   _heap_region_special(false),
 510   _num_regions(0),
 511   _regions(NULL),
 512   _update_refs_iterator(this),
 513   _alloc_supplement_reserve(0),
 514   _promoted_reserve(0),
 515   _old_evac_reserve(0),
 516   _old_evac_expended(0),
 517   _young_evac_reserve(0),
 518   _captured_old_usage(0),
 519   _previous_promotion(0),
 520   _cancel_requested_time(0),
 521   _young_generation(NULL),
 522   _global_generation(NULL),
 523   _old_generation(NULL),
 524   _control_thread(NULL),
 525   _regulator_thread(NULL),
 526   _shenandoah_policy(policy),


 527   _free_set(NULL),
 528   _pacer(NULL),
 529   _verifier(NULL),
 530   _phase_timings(NULL),
 531   _monitoring_support(NULL),
 532   _memory_pool(NULL),
 533   _young_gen_memory_pool(NULL),
 534   _old_gen_memory_pool(NULL),
 535   _stw_memory_manager("Shenandoah Pauses", "end of GC pause"),
 536   _cycle_memory_manager("Shenandoah Cycles", "end of GC cycle"),
 537   _gc_timer(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
 538   _soft_ref_policy(),
 539   _log_min_obj_alignment_in_bytes(LogMinObjAlignmentInBytes),

 540   _marking_context(NULL),
 541   _bitmap_size(0),
 542   _bitmap_regions_per_slice(0),
 543   _bitmap_bytes_per_slice(0),
 544   _bitmap_region_special(false),
 545   _aux_bitmap_region_special(false),
 546   _liveness_cache(NULL),
 547   _collection_set(NULL),
 548   _card_scan(NULL)
 549 {

















 550 }
 551 
 552 #ifdef _MSC_VER
 553 #pragma warning( pop )
 554 #endif
 555 





























 556 void ShenandoahHeap::print_on(outputStream* st) const {
 557   st->print_cr("Shenandoah Heap");
 558   st->print_cr(" " SIZE_FORMAT "%s max, " SIZE_FORMAT "%s soft max, " SIZE_FORMAT "%s committed, " SIZE_FORMAT "%s used",
 559                byte_size_in_proper_unit(max_capacity()), proper_unit_for_byte_size(max_capacity()),
 560                byte_size_in_proper_unit(soft_max_capacity()), proper_unit_for_byte_size(soft_max_capacity()),
 561                byte_size_in_proper_unit(committed()),    proper_unit_for_byte_size(committed()),
 562                byte_size_in_proper_unit(used()),         proper_unit_for_byte_size(used()));
 563   st->print_cr(" " SIZE_FORMAT " x " SIZE_FORMAT"%s regions",
 564                num_regions(),
 565                byte_size_in_proper_unit(ShenandoahHeapRegion::region_size_bytes()),
 566                proper_unit_for_byte_size(ShenandoahHeapRegion::region_size_bytes()));
 567 
 568   st->print("Status: ");
 569   if (has_forwarded_objects())                 st->print("has forwarded objects, ");
 570   if (is_concurrent_old_mark_in_progress())    st->print("old marking, ");
 571   if (is_concurrent_young_mark_in_progress())  st->print("young marking, ");
 572   if (is_evacuation_in_progress())             st->print("evacuating, ");
 573   if (is_update_refs_in_progress())            st->print("updating refs, ");
 574   if (is_degenerated_gc_in_progress())         st->print("degenerated gc, ");
 575   if (is_full_gc_in_progress())                st->print("full gc, ");
 576   if (is_full_gc_move_in_progress())           st->print("full gc move, ");
 577   if (is_concurrent_weak_root_in_progress())   st->print("concurrent weak roots, ");
 578   if (is_concurrent_strong_root_in_progress() &&
 579       !is_concurrent_weak_root_in_progress())  st->print("concurrent strong roots, ");
 580 
 581   if (cancelled_gc()) {
 582     st->print("cancelled");
 583   } else {
 584     st->print("not cancelled");
 585   }
 586   st->cr();
 587 
 588   st->print_cr("Reserved region:");
 589   st->print_cr(" - [" PTR_FORMAT ", " PTR_FORMAT ") ",
 590                p2i(reserved_region().start()),
 591                p2i(reserved_region().end()));

 614     assert(thread->is_Worker_thread(), "Only worker thread expected");
 615     ShenandoahThreadLocalData::initialize_gclab(thread);
 616   }
 617 };
 618 
 619 void ShenandoahHeap::post_initialize() {
 620   CollectedHeap::post_initialize();
 621   MutexLocker ml(Threads_lock);
 622 
 623   ShenandoahInitWorkerGCLABClosure init_gclabs;
 624   _workers->threads_do(&init_gclabs);
 625 
 626   // gclab can not be initialized early during VM startup, as it can not determinate its max_size.
 627   // Now, we will let WorkerThreads to initialize gclab when new worker is created.
 628   _workers->set_initialize_gclab();
 629   if (_safepoint_workers != NULL) {
 630     _safepoint_workers->threads_do(&init_gclabs);
 631     _safepoint_workers->set_initialize_gclab();
 632   }
 633 


 634   JFR_ONLY(ShenandoahJFRSupport::register_jfr_type_serializers());
 635 }
 636 
 637 
 638 ShenandoahOldHeuristics* ShenandoahHeap::old_heuristics() {
 639   return (ShenandoahOldHeuristics*) _old_generation->heuristics();
 640 }
 641 
 642 bool ShenandoahHeap::doing_mixed_evacuations() {
 643   return old_heuristics()->unprocessed_old_collection_candidates() > 0;
 644 }
 645 
 646 bool ShenandoahHeap::is_old_bitmap_stable() const {
 647   ShenandoahOldGeneration::State state = _old_generation->state();
 648   return state != ShenandoahOldGeneration::MARKING
 649       && state != ShenandoahOldGeneration::BOOTSTRAPPING;
 650 }
 651 
 652 bool ShenandoahHeap::is_gc_generation_young() const {
 653   return _gc_generation != NULL && _gc_generation->generation_mode() == YOUNG;
 654 }
 655 
 656 // There are three JVM parameters for setting young gen capacity:
 657 //    NewSize, MaxNewSize, NewRatio.
 658 //
 659 // If only NewSize is set, it assigns a fixed size and the other two parameters are ignored.
 660 // Otherwise NewRatio applies.
 661 //
 662 // If NewSize is set in any combination, it provides a lower bound.
 663 //
 664 // If MaxNewSize is set it provides an upper bound.
 665 // If this bound is smaller than NewSize, it supersedes,
 666 // resulting in a fixed size given by MaxNewSize.
 667 size_t ShenandoahHeap::young_generation_capacity(size_t capacity) {
 668   if (strcmp(ShenandoahGCMode, "generational") == 0) {
 669     if (FLAG_IS_CMDLINE(NewSize) && !FLAG_IS_CMDLINE(MaxNewSize) && !FLAG_IS_CMDLINE(NewRatio)) {
 670       capacity = MIN2(NewSize, capacity);
 671     } else {
 672       capacity /= NewRatio + 1;
 673       if (FLAG_IS_CMDLINE(NewSize)) {
 674         capacity = MAX2(NewSize, capacity);
 675       }
 676       if (FLAG_IS_CMDLINE(MaxNewSize)) {
 677         capacity = MIN2(MaxNewSize, capacity);
 678       }
 679     }
 680   }
 681   // else, make no adjustment to global capacity
 682   return capacity;
 683 }
 684 
 685 size_t ShenandoahHeap::used() const {
 686   return Atomic::load(&_used);
 687 }
 688 
 689 size_t ShenandoahHeap::committed() const {
 690   return Atomic::load(&_committed);
 691 }
 692 
 693 void ShenandoahHeap::increase_committed(size_t bytes) {
 694   shenandoah_assert_heaplocked_or_safepoint();
 695   _committed += bytes;
 696 }
 697 
 698 void ShenandoahHeap::decrease_committed(size_t bytes) {
 699   shenandoah_assert_heaplocked_or_safepoint();
 700   _committed -= bytes;
 701 }
 702 
 703 void ShenandoahHeap::increase_used(size_t bytes) {
 704   Atomic::add(&_used, bytes, memory_order_relaxed);
 705 }
 706 
 707 void ShenandoahHeap::set_used(size_t bytes) {
 708   Atomic::store(&_used, bytes);
 709 }
 710 
 711 void ShenandoahHeap::decrease_used(size_t bytes) {
 712   assert(used() >= bytes, "never decrease heap size by more than we've left");
 713   Atomic::sub(&_used, bytes, memory_order_relaxed);
 714 }
 715 




 716 void ShenandoahHeap::notify_mutator_alloc_words(size_t words, bool waste) {
 717   size_t bytes = words * HeapWordSize;
 718   if (!waste) {
 719     increase_used(bytes);
 720   }
 721 
 722   if (ShenandoahPacing) {
 723     control_thread()->pacing_notify_alloc(words);
 724     if (waste) {
 725       pacer()->claim_for_alloc(words, true);
 726     }
 727   }
 728 }
 729 
 730 size_t ShenandoahHeap::capacity() const {
 731   return committed();
 732 }
 733 
 734 size_t ShenandoahHeap::max_capacity() const {
 735   return _num_regions * ShenandoahHeapRegion::region_size_bytes();
 736 }
 737 
 738 size_t ShenandoahHeap::soft_max_capacity() const {
 739   size_t v = Atomic::load(&_soft_max_size);
 740   assert(min_capacity() <= v && v <= max_capacity(),
 741          "Should be in bounds: " SIZE_FORMAT " <= " SIZE_FORMAT " <= " SIZE_FORMAT,
 742          min_capacity(), v, max_capacity());
 743   return v;
 744 }
 745 
 746 void ShenandoahHeap::set_soft_max_capacity(size_t v) {
 747   assert(min_capacity() <= v && v <= max_capacity(),
 748          "Should be in bounds: " SIZE_FORMAT " <= " SIZE_FORMAT " <= " SIZE_FORMAT,
 749          min_capacity(), v, max_capacity());
 750   Atomic::store(&_soft_max_size, v);
 751 
 752   if (mode()->is_generational()) {
 753     size_t soft_max_capacity_young = young_generation_capacity(_soft_max_size);
 754     size_t soft_max_capacity_old = _soft_max_size - soft_max_capacity_young;
 755     _young_generation->set_soft_max_capacity(soft_max_capacity_young);
 756     _old_generation->set_soft_max_capacity(soft_max_capacity_old);
 757   }
 758 }
 759 
 760 size_t ShenandoahHeap::min_capacity() const {
 761   return _minimum_size;
 762 }
 763 
 764 size_t ShenandoahHeap::initial_capacity() const {
 765   return _initial_size;
 766 }
 767 
 768 bool ShenandoahHeap::is_in(const void* p) const {
 769   HeapWord* heap_base = (HeapWord*) base();
 770   HeapWord* last_region_end = heap_base + ShenandoahHeapRegion::region_size_words() * num_regions();
 771   return p >= heap_base && p < last_region_end;
 772 }
 773 
 774 bool ShenandoahHeap::is_in_young(const void* p) const {
 775   return is_in(p) && heap_region_containing(p)->affiliation() == ShenandoahRegionAffiliation::YOUNG_GENERATION;
 776 }
 777 
 778 bool ShenandoahHeap::is_in_old(const void* p) const {
 779   return is_in(p) && heap_region_containing(p)->affiliation() == ShenandoahRegionAffiliation::OLD_GENERATION;
 780 }
 781 
 782 bool ShenandoahHeap::is_in_active_generation(oop obj) const {
 783   if (!mode()->is_generational()) {
 784     // everything is the same single generation
 785     return true;
 786   }
 787 
 788   if (active_generation() == NULL) {
 789     // no collection is happening, only expect this to be called
 790     // when concurrent processing is active, but that could change
 791     return false;
 792   }
 793 
 794   return active_generation()->contains(obj);
 795 }
 796 
 797 void ShenandoahHeap::op_uncommit(double shrink_before, size_t shrink_until) {
 798   assert (ShenandoahUncommit, "should be enabled");
 799 
 800   // Application allocates from the beginning of the heap, and GC allocates at
 801   // the end of it. It is more efficient to uncommit from the end, so that applications
 802   // could enjoy the near committed regions. GC allocations are much less frequent,
 803   // and therefore can accept the committing costs.
 804 
 805   size_t count = 0;
 806   for (size_t i = num_regions(); i > 0; i--) { // care about size_t underflow
 807     ShenandoahHeapRegion* r = get_region(i - 1);
 808     if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
 809       ShenandoahHeapLocker locker(lock());
 810       if (r->is_empty_committed()) {
 811         if (committed() < shrink_until + ShenandoahHeapRegion::region_size_bytes()) {
 812           break;
 813         }
 814 
 815         r->make_uncommitted();
 816         count++;
 817       }
 818     }
 819     SpinPause(); // allow allocators to take the lock
 820   }
 821 
 822   if (count > 0) {
 823     control_thread()->notify_heap_changed();
 824     regulator_thread()->notify_heap_changed();
 825   }
 826 }
 827 
 828 void ShenandoahHeap::handle_old_evacuation(HeapWord* obj, size_t words, bool promotion) {
 829   // Only register the copy of the object that won the evacuation race.
 830   card_scan()->register_object_wo_lock(obj);
 831 
 832   // Mark the entire range of the evacuated object as dirty.  At next remembered set scan,
 833   // we will clear dirty bits that do not hold interesting pointers.  It's more efficient to
 834   // do this in batch, in a background GC thread than to try to carefully dirty only cards
 835   // that hold interesting pointers right now.
 836   card_scan()->mark_range_as_dirty(obj, words);
 837 
 838   if (promotion) {
 839     // This evacuation was a promotion, track this as allocation against old gen
 840     old_generation()->increase_allocated(words * HeapWordSize);
 841   }
 842 }
 843 
 844 void ShenandoahHeap::handle_old_evacuation_failure() {
 845   if (_old_gen_oom_evac.try_set()) {
 846     log_info(gc)("Old gen evac failure.");
 847   }
 848 }
 849 
 850 void ShenandoahHeap::handle_promotion_failure() {
 851   old_heuristics()->handle_promotion_failure();
 852 }
 853 
 854 HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) {
 855   // New object should fit the GCLAB size
 856   size_t min_size = MAX2(size, PLAB::min_size());
 857 
 858   // Figure out size of new GCLAB, looking back at heuristics. Expand aggressively.
 859   size_t new_size = ShenandoahThreadLocalData::gclab_size(thread) * 2;
 860 
 861   // Limit growth of GCLABs to ShenandoahMaxEvacLABRatio * the minimum size.  This enables more equitable distribution of
 862   // available evacuation buidget between the many threads that are coordinating in the evacuation effort.
 863   if (ShenandoahMaxEvacLABRatio > 0) {
 864     new_size = MIN2(new_size, PLAB::min_size() * ShenandoahMaxEvacLABRatio);
 865   }
 866   new_size = MIN2(new_size, PLAB::max_size());
 867   new_size = MAX2(new_size, PLAB::min_size());
 868 
 869   // Record new heuristic value even if we take any shortcut. This captures
 870   // the case when moderately-sized objects always take a shortcut. At some point,
 871   // heuristics should catch up with them.
 872   ShenandoahThreadLocalData::set_gclab_size(thread, new_size);
 873 
 874   if (new_size < size) {
 875     // New size still does not fit the object. Fall back to shared allocation.
 876     // This avoids retiring perfectly good GCLABs, when we encounter a large object.
 877     return NULL;
 878   }
 879 
 880   // Retire current GCLAB, and allocate a new one.
 881   PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
 882   gclab->retire();
 883 
 884   size_t actual_size = 0;
 885   HeapWord* gclab_buf = allocate_new_gclab(min_size, new_size, &actual_size);

 889 
 890   assert (size <= actual_size, "allocation should fit");
 891 
 892   if (ZeroTLAB) {
 893     // ..and clear it.
 894     Copy::zero_to_words(gclab_buf, actual_size);
 895   } else {
 896     // ...and zap just allocated object.
 897 #ifdef ASSERT
 898     // Skip mangling the space corresponding to the object header to
 899     // ensure that the returned space is not considered parsable by
 900     // any concurrent GC thread.
 901     size_t hdr_size = oopDesc::header_size();
 902     Copy::fill_to_words(gclab_buf + hdr_size, actual_size - hdr_size, badHeapWordVal);
 903 #endif // ASSERT
 904   }
 905   gclab->set_buf(gclab_buf, actual_size);
 906   return gclab->allocate(size);
 907 }
 908 
 909 // Establish a new PLAB and allocate size HeapWords within it.
 910 HeapWord* ShenandoahHeap::allocate_from_plab_slow(Thread* thread, size_t size, bool is_promotion) {
 911   // New object should fit the PLAB size
 912   size_t min_size = MAX2(size, PLAB::min_size());
 913 
 914   // Figure out size of new PLAB, looking back at heuristics. Expand aggressively.
 915   size_t cur_size = ShenandoahThreadLocalData::plab_size(thread);
 916   if (cur_size == 0) {
 917     cur_size = PLAB::min_size();
 918   }
 919   size_t future_size = cur_size * 2;
 920   // Limit growth of PLABs to ShenandoahMaxEvacLABRatio * the minimum size.  This enables more equitable distribution of
 921   // available evacuation buidget between the many threads that are coordinating in the evacuation effort.
 922   if (ShenandoahMaxEvacLABRatio > 0) {
 923     future_size = MIN2(future_size, PLAB::min_size() * ShenandoahMaxEvacLABRatio);
 924   }
 925   future_size = MIN2(future_size, PLAB::max_size());
 926   future_size = MAX2(future_size, PLAB::min_size());
 927 
 928   size_t unalignment = future_size % CardTable::card_size_in_words();
 929   if (unalignment != 0) {
 930     future_size = future_size - unalignment + CardTable::card_size_in_words();
 931   }
 932 
 933   // Record new heuristic value even if we take any shortcut. This captures
 934   // the case when moderately-sized objects always take a shortcut. At some point,
 935   // heuristics should catch up with them.  Note that the requested cur_size may
 936   // not be honored, but we remember that this is the preferred size.
 937   ShenandoahThreadLocalData::set_plab_size(thread, future_size);
 938   if (cur_size < size) {
 939     // The PLAB to be allocated is still not large enough to hold the object. Fall back to shared allocation.
 940     // This avoids retiring perfectly good PLABs in order to represent a single large object allocation.
 941     return nullptr;
 942   }
 943 
 944   // Retire current PLAB, and allocate a new one.
 945   PLAB* plab = ShenandoahThreadLocalData::plab(thread);
 946   if (plab->words_remaining() < PLAB::min_size()) {
 947     // Retire current PLAB, and allocate a new one.
 948     // CAUTION: retire_plab may register the remnant filler object with the remembered set scanner without a lock.  This
 949     // is safe iff it is assured that each PLAB is a whole-number multiple of card-mark memory size and each PLAB is
 950     // aligned with the start of a card's memory range.
 951 
 952     retire_plab(plab, thread);
 953 
 954     size_t actual_size = 0;
 955     // allocate_new_plab resets plab_evacuated and plab_promoted and disables promotions if old-gen available is
 956     // less than the remaining evacuation need.  It also adjusts plab_preallocated and expend_promoted if appropriate.
 957     HeapWord* plab_buf = allocate_new_plab(min_size, cur_size, &actual_size);
 958     if (plab_buf == NULL) {
 959       return NULL;
 960     } else {
 961       ShenandoahThreadLocalData::enable_plab_retries(thread);
 962     }
 963     assert (size <= actual_size, "allocation should fit");
 964     if (ZeroTLAB) {
 965       // ..and clear it.
 966       Copy::zero_to_words(plab_buf, actual_size);
 967     } else {
 968       // ...and zap just allocated object.
 969 #ifdef ASSERT
 970       // Skip mangling the space corresponding to the object header to
 971       // ensure that the returned space is not considered parsable by
 972       // any concurrent GC thread.
 973       size_t hdr_size = oopDesc::header_size();
 974       Copy::fill_to_words(plab_buf + hdr_size, actual_size - hdr_size, badHeapWordVal);
 975 #endif // ASSERT
 976     }
 977     plab->set_buf(plab_buf, actual_size);
 978 
 979     if (is_promotion && !ShenandoahThreadLocalData::allow_plab_promotions(thread)) {
 980       return nullptr;
 981     }
 982     return plab->allocate(size);
 983   } else {
 984     // If there's still at least min_size() words available within the current plab, don't retire it.  Let's gnaw
 985     // away on this plab as long as we can.  Meanwhile, return nullptr to force this particular allocation request
 986     // to be satisfied with a shared allocation.  By packing more promotions into the previously allocated PLAB, we
 987     // reduce the likelihood of evacuation failures, and we we reduce the need for downsizing our PLABs.
 988     return nullptr;
 989   }
 990 }
 991 
 992 // TODO: It is probably most efficient to register all objects (both promotions and evacuations) that were allocated within
 993 // this plab at the time we retire the plab.  A tight registration loop will run within both code and data caches.  This change
 994 // would allow smaller and faster in-line implementation of alloc_from_plab().  Since plabs are aligned on card-table boundaries,
 995 // this object registration loop can be performed without acquiring a lock.
 996 void ShenandoahHeap::retire_plab(PLAB* plab, Thread* thread) {
 997   // We don't enforce limits on plab_evacuated.  We let it consume all available old-gen memory in order to reduce
 998   // probability of an evacuation failure.  We do enforce limits on promotion, to make sure that excessive promotion
 999   // does not result in an old-gen evacuation failure.  Note that a failed promotion is relatively harmless.  Any
1000   // object that fails to promote in the current cycle will be eligible for promotion in a subsequent cycle.
1001 
1002   // When the plab was instantiated, its entirety was treated as if the entire buffer was going to be dedicated to
1003   // promotions.  Now that we are retiring the buffer, we adjust for the reality that the plab is not entirely promotions.
1004   //  1. Some of the plab may have been dedicated to evacuations.
1005   //  2. Some of the plab may have been abandoned due to waste (at the end of the plab).
1006   size_t not_promoted =
1007     ShenandoahThreadLocalData::get_plab_preallocated_promoted(thread) - ShenandoahThreadLocalData::get_plab_promoted(thread);
1008   ShenandoahThreadLocalData::reset_plab_promoted(thread);
1009   ShenandoahThreadLocalData::reset_plab_evacuated(thread);
1010   ShenandoahThreadLocalData::set_plab_preallocated_promoted(thread, 0);
1011   if (not_promoted > 0) {
1012     unexpend_promoted(not_promoted);
1013   }
1014   size_t waste = plab->waste();
1015   HeapWord* top = plab->top();
1016   plab->retire();
1017   if (top != NULL && plab->waste() > waste && is_in_old(top)) {
1018     // If retiring the plab created a filler object, then we
1019     // need to register it with our card scanner so it can
1020     // safely walk the region backing the plab.
1021     log_debug(gc)("retire_plab() is registering remnant of size " SIZE_FORMAT " at " PTR_FORMAT,
1022                   plab->waste() - waste, p2i(top));
1023     card_scan()->register_object_wo_lock(top);
1024   }
1025 }
1026 
1027 void ShenandoahHeap::retire_plab(PLAB* plab) {
1028   Thread* thread = Thread::current();
1029   retire_plab(plab, thread);
1030 }
1031 
1032 void ShenandoahHeap::cancel_old_gc() {
1033   shenandoah_assert_safepoint();
1034   assert(_old_generation != NULL, "Should only have mixed collections in generation mode.");
1035   log_info(gc)("Terminating old gc cycle.");
1036 
1037   // Stop marking
1038   old_generation()->cancel_marking();
1039   // Stop coalescing undead objects
1040   set_prepare_for_old_mark_in_progress(false);
1041   // Stop tracking old regions
1042   old_heuristics()->abandon_collection_candidates();
1043   // Remove old generation access to young generation mark queues
1044   young_generation()->set_old_gen_task_queues(nullptr);
1045   // Transition to IDLE now.
1046   _old_generation->transition_to(ShenandoahOldGeneration::IDLE);
1047 }
1048 
1049 bool ShenandoahHeap::is_old_gc_active() {
1050   return is_concurrent_old_mark_in_progress()
1051          || is_prepare_for_old_mark_in_progress()
1052          || old_heuristics()->unprocessed_old_collection_candidates() > 0
1053          || young_generation()->old_gen_task_queues() != nullptr;
1054 }
1055 
1056 void ShenandoahHeap::coalesce_and_fill_old_regions() {
1057   class ShenandoahGlobalCoalesceAndFill : public ShenandoahHeapRegionClosure {
1058    public:
1059     virtual void heap_region_do(ShenandoahHeapRegion* region) override {
1060       // old region is not in the collection set and was not immediately trashed
1061       if (region->is_old() && region->is_active() && !region->is_humongous()) {
1062         // Reset the coalesce and fill boundary because this is a global collect
1063         // and cannot be preempted by young collects. We want to be sure the entire
1064         // region is coalesced here and does not resume from a previously interrupted
1065         // or completed coalescing.
1066         region->begin_preemptible_coalesce_and_fill();
1067         region->oop_fill_and_coalesce();
1068       }
1069     }
1070 
1071     virtual bool is_thread_safe() override {
1072       return true;
1073     }
1074   };
1075   ShenandoahGlobalCoalesceAndFill coalesce;
1076   parallel_heap_region_iterate(&coalesce);
1077 }
1078 
1079 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t min_size,
1080                                             size_t requested_size,
1081                                             size_t* actual_size) {
1082   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_tlab(min_size, requested_size);
1083   HeapWord* res = allocate_memory(req, false);
1084   if (res != NULL) {
1085     *actual_size = req.actual_size();
1086   } else {
1087     *actual_size = 0;
1088   }
1089   return res;
1090 }
1091 
1092 HeapWord* ShenandoahHeap::allocate_new_gclab(size_t min_size,
1093                                              size_t word_size,
1094                                              size_t* actual_size) {
1095   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_gclab(min_size, word_size);
1096   HeapWord* res = allocate_memory(req, false);
1097   if (res != NULL) {
1098     *actual_size = req.actual_size();
1099   } else {
1100     *actual_size = 0;
1101   }
1102   return res;
1103 }
1104 
1105 HeapWord* ShenandoahHeap::allocate_new_plab(size_t min_size,
1106                                             size_t word_size,
1107                                             size_t* actual_size) {
1108   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_plab(min_size, word_size);
1109   // Note that allocate_memory() sets a thread-local flag to prohibit further promotions by this thread
1110   // if we are at risk of exceeding the old-gen evacuation budget.
1111   HeapWord* res = allocate_memory(req, false);
1112   if (res != NULL) {
1113     *actual_size = req.actual_size();
1114   } else {
1115     *actual_size = 0;
1116   }
1117   return res;
1118 }
1119 
1120 // is_promotion is true iff this allocation is known for sure to hold the result of young-gen evacuation
1121 // to old-gen.  plab allocates arre not known as such, since they may hold old-gen evacuations.
1122 HeapWord* ShenandoahHeap::allocate_memory(ShenandoahAllocRequest& req, bool is_promotion) {
1123   intptr_t pacer_epoch = 0;
1124   bool in_new_region = false;
1125   HeapWord* result = NULL;
1126 
1127   if (req.is_mutator_alloc()) {
1128     if (ShenandoahPacing) {
1129       pacer()->pace_for_alloc(req.size());
1130       pacer_epoch = pacer()->epoch();
1131     }
1132 
1133     if (!ShenandoahAllocFailureALot || !should_inject_alloc_failure()) {
1134       result = allocate_memory_under_lock(req, in_new_region, is_promotion);
1135     }
1136 
1137     // Allocation failed, block until control thread reacted, then retry allocation.
1138     //
1139     // It might happen that one of the threads requesting allocation would unblock
1140     // way later after GC happened, only to fail the second allocation, because
1141     // other threads have already depleted the free storage. In this case, a better
1142     // strategy is to try again, as long as GC makes progress.
1143     //
1144     // Then, we need to make sure the allocation was retried after at least one
1145     // Full GC, which means we want to try more than ShenandoahFullGCThreshold times.
1146 
1147     size_t tries = 0;
1148 
1149     while (result == NULL && _progress_last_gc.is_set()) {
1150       tries++;
1151       control_thread()->handle_alloc_failure(req);
1152       result = allocate_memory_under_lock(req, in_new_region, is_promotion);
1153     }
1154 
1155     while (result == NULL && tries <= ShenandoahFullGCThreshold) {
1156       tries++;
1157       control_thread()->handle_alloc_failure(req);
1158       result = allocate_memory_under_lock(req, in_new_region, is_promotion);
1159     }
1160 
1161   } else {
1162     assert(req.is_gc_alloc(), "Can only accept GC allocs here");
1163     result = allocate_memory_under_lock(req, in_new_region, is_promotion);
1164     // Do not call handle_alloc_failure() here, because we cannot block.
1165     // The allocation failure would be handled by the LRB slowpath with handle_alloc_failure_evac().
1166   }
1167 
1168   if (in_new_region) {
1169     control_thread()->notify_heap_changed();
1170     regulator_thread()->notify_heap_changed();
1171   }
1172 
1173   if (result != NULL) {
1174     ShenandoahGeneration* alloc_generation = generation_for(req.affiliation());
1175     size_t requested = req.size();
1176     size_t actual = req.actual_size();
1177     size_t actual_bytes = actual * HeapWordSize;
1178 
1179     assert (req.is_lab_alloc() || (requested == actual),
1180             "Only LAB allocations are elastic: %s, requested = " SIZE_FORMAT ", actual = " SIZE_FORMAT,
1181             ShenandoahAllocRequest::alloc_type_to_string(req.type()), requested, actual);
1182 
1183     if (req.is_mutator_alloc()) {
1184       notify_mutator_alloc_words(actual, false);
1185       alloc_generation->increase_allocated(actual_bytes);
1186 
1187       // If we requested more than we were granted, give the rest back to pacer.
1188       // This only matters if we are in the same pacing epoch: do not try to unpace
1189       // over the budget for the other phase.
1190       if (ShenandoahPacing && (pacer_epoch > 0) && (requested > actual)) {
1191         pacer()->unpace_for_alloc(pacer_epoch, requested - actual);
1192       }
1193     } else {
1194       increase_used(actual_bytes);
1195     }
1196   }
1197 
1198   return result;
1199 }
1200 
1201 HeapWord* ShenandoahHeap::allocate_memory_under_lock(ShenandoahAllocRequest& req, bool& in_new_region, bool is_promotion) {
1202   // promotion_eligible pertains only to PLAB allocations, denoting that the PLAB is allowed to allocate for promotions.
1203   bool promotion_eligible = false;
1204   bool allow_allocation = true;
1205   bool plab_alloc = false;
1206   size_t requested_bytes = req.size() * HeapWordSize;
1207   HeapWord* result = nullptr;
1208   ShenandoahHeapLocker locker(lock());
1209   Thread* thread = Thread::current();
1210   if (mode()->is_generational()) {
1211     if (req.affiliation() == YOUNG_GENERATION) {
1212       if (req.is_mutator_alloc()) {
1213         if (requested_bytes >= young_generation()->adjusted_available()) {
1214           // We know this is not a GCLAB.  This must be a TLAB or a shared allocation.  Reject the allocation request if
1215           // exceeds established capacity limits.
1216           return nullptr;
1217         }
1218       }
1219     } else {                    // reg.affiliation() == OLD_GENERATION
1220       assert(req.type() != ShenandoahAllocRequest::_alloc_gclab, "GCLAB pertains only to young-gen memory");
1221       if (req.type() ==  ShenandoahAllocRequest::_alloc_plab) {
1222         plab_alloc = true;
1223         size_t promotion_avail = get_promoted_reserve();
1224         size_t promotion_expended = get_promoted_expended();
1225         if (promotion_expended + requested_bytes > promotion_avail) {
1226           promotion_avail = 0;
1227           if (get_old_evac_reserve() == 0) {
1228             // There are no old-gen evacuations in this pass.  There's no value in creating a plab that cannot
1229             // be used for promotions.
1230             allow_allocation = false;
1231           }
1232         } else {
1233           promotion_avail = promotion_avail - (promotion_expended + requested_bytes);
1234           promotion_eligible = true;
1235         }
1236       } else if (is_promotion) {
1237         // This is a shared alloc for promotion
1238         size_t promotion_avail = get_promoted_reserve();
1239         size_t promotion_expended = get_promoted_expended();
1240         if (promotion_expended + requested_bytes > promotion_avail) {
1241           promotion_avail = 0;
1242         } else {
1243           promotion_avail = promotion_avail - (promotion_expended + requested_bytes);
1244         }
1245 
1246         if (promotion_avail == 0) {
1247           // We need to reserve the remaining memory for evacuation.  Reject this allocation.  The object will be
1248           // evacuated to young-gen memory and promoted during a future GC pass.
1249           return nullptr;
1250         }
1251         // Else, we'll allow the allocation to proceed.  (Since we hold heap lock, the tested condition remains true.)
1252       } else {
1253         // This is a shared allocation for evacuation.  Memory has already been reserved for this purpose.
1254       }
1255     }
1256   }
1257   result = (allow_allocation)? _free_set->allocate(req, in_new_region): nullptr;
1258   if (result != NULL) {
1259     if (req.affiliation() == ShenandoahRegionAffiliation::OLD_GENERATION) {
1260       ShenandoahThreadLocalData::reset_plab_promoted(thread);
1261       if (req.is_gc_alloc()) {
1262         if (req.type() ==  ShenandoahAllocRequest::_alloc_plab) {
1263           if (promotion_eligible) {
1264             size_t actual_size = req.actual_size() * HeapWordSize;
1265             // Assume the entirety of this PLAB will be used for promotion.  This prevents promotion from overreach.
1266             // When we retire this plab, we'll unexpend what we don't really use.
1267             ShenandoahThreadLocalData::enable_plab_promotions(thread);
1268             expend_promoted(actual_size);
1269             assert(get_promoted_expended() <= get_promoted_reserve(), "Do not expend more promotion than budgeted");
1270             ShenandoahThreadLocalData::set_plab_preallocated_promoted(thread, actual_size);
1271           } else {
1272             // Disable promotions in this thread because entirety of this PLAB must be available to hold old-gen evacuations.
1273             ShenandoahThreadLocalData::disable_plab_promotions(thread);
1274             ShenandoahThreadLocalData::set_plab_preallocated_promoted(thread, 0);
1275           }
1276         } else if (is_promotion) {
1277           // Shared promotion.  Assume size is requested_bytes.
1278           expend_promoted(requested_bytes);
1279           assert(get_promoted_expended() <= get_promoted_reserve(), "Do not expend more promotion than budgeted");
1280         }
1281       }
1282 
1283       // Register the newly allocated object while we're holding the global lock since there's no synchronization
1284       // built in to the implementation of register_object().  There are potential races when multiple independent
1285       // threads are allocating objects, some of which might span the same card region.  For example, consider
1286       // a card table's memory region within which three objects are being allocated by three different threads:
1287       //
1288       // objects being "concurrently" allocated:
1289       //    [-----a------][-----b-----][--------------c------------------]
1290       //            [---- card table memory range --------------]
1291       //
1292       // Before any objects are allocated, this card's memory range holds no objects.  Note that:
1293       //   allocation of object a wants to set the has-object, first-start, and last-start attributes of the preceding card region.
1294       //   allocation of object b wants to set the has-object, first-start, and last-start attributes of this card region.
1295       //   allocation of object c also wants to set the has-object, first-start, and last-start attributes of this card region.
1296       //
1297       // The thread allocating b and the thread allocating c can "race" in various ways, resulting in confusion, such as last-start
1298       // representing object b while first-start represents object c.  This is why we need to require all register_object()
1299       // invocations to be "mutually exclusive" with respect to each card's memory range.
1300       ShenandoahHeap::heap()->card_scan()->register_object(result);
1301     }
1302   } else {
1303     // The allocation failed.  If this was a plab allocation, We've already retired it and no longer have a plab.
1304     if ((req.affiliation() == ShenandoahRegionAffiliation::OLD_GENERATION) && req.is_gc_alloc() &&
1305         (req.type() == ShenandoahAllocRequest::_alloc_plab)) {
1306       // We don't need to disable PLAB promotions because there is no PLAB.  We leave promotions enabled because
1307       // this allows the surrounding infrastructure to retry alloc_plab_slow() with a smaller PLAB size.
1308       ShenandoahThreadLocalData::set_plab_preallocated_promoted(thread, 0);
1309     }
1310   }
1311   return result;
1312 }
1313 
1314 HeapWord* ShenandoahHeap::mem_allocate(size_t size,
1315                                         bool*  gc_overhead_limit_was_exceeded) {
1316   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared(size);
1317   return allocate_memory(req, false);
1318 }
1319 
1320 MetaWord* ShenandoahHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
1321                                                              size_t size,
1322                                                              Metaspace::MetadataType mdtype) {
1323   MetaWord* result;
1324 
1325   // Inform metaspace OOM to GC heuristics if class unloading is possible.
1326   ShenandoahHeuristics* h = global_generation()->heuristics();
1327   if (h->can_unload_classes()) {
1328     h->record_metaspace_oom();
1329   }
1330 
1331   // Expand and retry allocation
1332   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
1333   if (result != NULL) {
1334     return result;
1335   }
1336 
1337   // Start full GC
1338   collect(GCCause::_metadata_GC_clear_soft_refs);
1339 
1340   // Retry allocation
1341   result = loader_data->metaspace_non_null()->allocate(size, mdtype);
1342   if (result != NULL) {
1343     return result;
1344   }
1345 
1346   // Expand and retry allocation
1347   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);

1386 
1387   void work(uint worker_id) {
1388     if (_concurrent) {
1389       ShenandoahConcurrentWorkerSession worker_session(worker_id);
1390       ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
1391       ShenandoahEvacOOMScope oom_evac_scope;
1392       do_work();
1393     } else {
1394       ShenandoahParallelWorkerSession worker_session(worker_id);
1395       ShenandoahEvacOOMScope oom_evac_scope;
1396       do_work();
1397     }
1398   }
1399 
1400 private:
1401   void do_work() {
1402     ShenandoahConcurrentEvacuateRegionObjectClosure cl(_sh);
1403     ShenandoahHeapRegion* r;
1404     while ((r =_cs->claim_next()) != NULL) {
1405       assert(r->has_live(), "Region " SIZE_FORMAT " should have been reclaimed early", r->index());
1406 
1407       _sh->marked_object_iterate(r, &cl);
1408 
1409       if (ShenandoahPacing) {
1410         _sh->pacer()->report_evac(r->used() >> LogHeapWordSize);
1411       }
1412       if (_sh->check_cancelled_gc_and_yield(_concurrent)) {
1413         break;
1414       }
1415     }
1416   }
1417 };
1418 
1419 // Unlike ShenandoahEvacuationTask, this iterates over all regions rather than just the collection set.
1420 // This is needed in order to promote humongous start regions if age() >= tenure threshold.
1421 class ShenandoahGenerationalEvacuationTask : public WorkerTask {
1422 private:
1423   ShenandoahHeap* const _sh;
1424   ShenandoahRegionIterator *_regions;
1425   bool _concurrent;
1426 public:
1427   ShenandoahGenerationalEvacuationTask(ShenandoahHeap* sh,
1428                                        ShenandoahRegionIterator* iterator,
1429                                        bool concurrent) :
1430     WorkerTask("Shenandoah Evacuation"),
1431     _sh(sh),
1432     _regions(iterator),
1433     _concurrent(concurrent)
1434   {}
1435 
1436   void work(uint worker_id) {
1437     if (_concurrent) {
1438       ShenandoahConcurrentWorkerSession worker_session(worker_id);
1439       ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
1440       ShenandoahEvacOOMScope oom_evac_scope;
1441       do_work();
1442     } else {
1443       ShenandoahParallelWorkerSession worker_session(worker_id);
1444       ShenandoahEvacOOMScope oom_evac_scope;
1445       do_work();
1446     }
1447   }
1448 
1449 private:
1450   void do_work() {
1451     ShenandoahConcurrentEvacuateRegionObjectClosure cl(_sh);
1452     ShenandoahHeapRegion* r;
1453     while ((r = _regions->next()) != nullptr) {
1454       log_debug(gc)("GenerationalEvacuationTask do_work(), looking at %s region " SIZE_FORMAT ", (age: %d) [%s, %s]",
1455                     r->is_old()? "old": r->is_young()? "young": "free", r->index(), r->age(),
1456                     r->is_active()? "active": "inactive",
1457                     r->is_humongous()? (r->is_humongous_start()? "humongous_start": "humongous_continuation"): "regular");
1458       if (r->is_cset()) {
1459         assert(r->has_live(), "Region " SIZE_FORMAT " should have been reclaimed early", r->index());
1460         _sh->marked_object_iterate(r, &cl);
1461         if (ShenandoahPacing) {
1462           _sh->pacer()->report_evac(r->used() >> LogHeapWordSize);
1463         }
1464       } else if (r->is_young() && r->is_active() && r->is_humongous_start() && (r->age() > InitialTenuringThreshold)) {
1465         // We promote humongous_start regions along with their affiliated continuations during evacuation rather than
1466         // doing this work during a safepoint.  We cannot put humongous regions into the collection set because that
1467         // triggers the load-reference barrier (LRB) to copy on reference fetch.
1468         r->promote_humongous();
1469       }
1470       // else, region is free, or OLD, or not in collection set, or humongous_continuation,
1471       // or is young humongous_start that is too young to be promoted
1472 
1473       if (_sh->check_cancelled_gc_and_yield(_concurrent)) {
1474         break;
1475       }
1476     }
1477   }
1478 };
1479 
1480 void ShenandoahHeap::evacuate_collection_set(bool concurrent) {
1481   if (ShenandoahHeap::heap()->mode()->is_generational()) {
1482     ShenandoahRegionIterator regions;
1483     ShenandoahGenerationalEvacuationTask task(this, &regions, concurrent);
1484     workers()->run_task(&task);
1485   } else {
1486     ShenandoahEvacuationTask task(this, _collection_set, concurrent);
1487     workers()->run_task(&task);
1488   }
1489 }
1490 
1491 void ShenandoahHeap::trash_cset_regions() {
1492   ShenandoahHeapLocker locker(lock());
1493 
1494   ShenandoahCollectionSet* set = collection_set();
1495   ShenandoahHeapRegion* r;
1496   set->clear_current_index();
1497   while ((r = set->next()) != NULL) {
1498     r->make_trash();
1499   }
1500   collection_set()->clear();
1501 }
1502 
1503 void ShenandoahHeap::print_heap_regions_on(outputStream* st) const {
1504   st->print_cr("Heap Regions:");
1505   st->print_cr("EU=empty-uncommitted, EC=empty-committed, R=regular, H=humongous start, HC=humongous continuation, CS=collection set, T=trash, P=pinned");
1506   st->print_cr("BTE=bottom/top/end, U=used, T=TLAB allocs, G=GCLAB allocs, S=shared allocs, L=live data");
1507   st->print_cr("R=root, CP=critical pins, TAMS=top-at-mark-start, UWM=update watermark");
1508   st->print_cr("SN=alloc sequence number");
1509 
1510   for (size_t i = 0; i < num_regions(); i++) {
1511     get_region(i)->print_on(st);
1512   }
1513 }
1514 
1515 size_t ShenandoahHeap::trash_humongous_region_at(ShenandoahHeapRegion* start) {
1516   assert(start->is_humongous_start(), "reclaim regions starting with the first one");
1517 
1518   oop humongous_obj = cast_to_oop(start->bottom());
1519   size_t size = humongous_obj->size();
1520   size_t required_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize);
1521   size_t index = start->index() + required_regions - 1;
1522 
1523   assert(!start->has_live(), "liveness must be zero");
1524 
1525   for(size_t i = 0; i < required_regions; i++) {
1526     // Reclaim from tail. Otherwise, assertion fails when printing region to trace log,
1527     // as it expects that every region belongs to a humongous region starting with a humongous start region.
1528     ShenandoahHeapRegion* region = get_region(index --);
1529 
1530     assert(region->is_humongous(), "expect correct humongous start or continuation");
1531     assert(!region->is_cset(), "Humongous region should not be in collection set");
1532 
1533     region->make_trash_immediate();
1534   }
1535   return required_regions;
1536 }
1537 
1538 class ShenandoahCheckCleanGCLABClosure : public ThreadClosure {
1539 public:
1540   ShenandoahCheckCleanGCLABClosure() {}
1541   void do_thread(Thread* thread) {
1542     PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1543     assert(gclab != NULL, "GCLAB should be initialized for %s", thread->name());
1544     assert(gclab->words_remaining() == 0, "GCLAB should not need retirement");
1545 
1546     PLAB* plab = ShenandoahThreadLocalData::plab(thread);
1547     assert(plab != NULL, "PLAB should be initialized for %s", thread->name());
1548     assert(plab->words_remaining() == 0, "PLAB should not need retirement");
1549   }
1550 };
1551 
1552 class ShenandoahRetireGCLABClosure : public ThreadClosure {
1553 private:
1554   bool const _resize;
1555 public:
1556   ShenandoahRetireGCLABClosure(bool resize) : _resize(resize) {}
1557   void do_thread(Thread* thread) {
1558     PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1559     assert(gclab != NULL, "GCLAB should be initialized for %s", thread->name());
1560     gclab->retire();
1561     if (_resize && ShenandoahThreadLocalData::gclab_size(thread) > 0) {
1562       ShenandoahThreadLocalData::set_gclab_size(thread, 0);
1563     }
1564 
1565     PLAB* plab = ShenandoahThreadLocalData::plab(thread);
1566     assert(plab != NULL, "PLAB should be initialized for %s", thread->name());
1567 
1568     // There are two reasons to retire all plabs between old-gen evacuation passes.
1569     //  1. We need to make the plab memory parseable by remembered-set scanning.
1570     //  2. We need to establish a trustworthy UpdateWaterMark value within each old-gen heap region
1571     ShenandoahHeap::heap()->retire_plab(plab, thread);
1572     if (_resize && ShenandoahThreadLocalData::plab_size(thread) > 0) {
1573       ShenandoahThreadLocalData::set_plab_size(thread, 0);
1574     }
1575   }
1576 };
1577 
1578 void ShenandoahHeap::labs_make_parsable() {
1579   assert(UseTLAB, "Only call with UseTLAB");
1580 
1581   ShenandoahRetireGCLABClosure cl(false);
1582 
1583   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1584     ThreadLocalAllocBuffer& tlab = t->tlab();
1585     tlab.make_parsable();
1586     cl.do_thread(t);
1587   }
1588 
1589   workers()->threads_do(&cl);
1590 }
1591 
1592 void ShenandoahHeap::tlabs_retire(bool resize) {
1593   assert(UseTLAB, "Only call with UseTLAB");
1594   assert(!resize || ResizeTLAB, "Only call for resize when ResizeTLAB is enabled");

1612   }
1613   workers()->threads_do(&cl);
1614 #endif
1615 }
1616 
1617 void ShenandoahHeap::gclabs_retire(bool resize) {
1618   assert(UseTLAB, "Only call with UseTLAB");
1619   assert(!resize || ResizeTLAB, "Only call for resize when ResizeTLAB is enabled");
1620 
1621   ShenandoahRetireGCLABClosure cl(resize);
1622   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1623     cl.do_thread(t);
1624   }
1625   workers()->threads_do(&cl);
1626 
1627   if (safepoint_workers() != NULL) {
1628     safepoint_workers()->threads_do(&cl);
1629   }
1630 }
1631 
1632 class ShenandoahTagGCLABClosure : public ThreadClosure {
1633 public:
1634   void do_thread(Thread* thread) {
1635     PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1636     assert(gclab != NULL, "GCLAB should be initialized for %s", thread->name());
1637     if (gclab->words_remaining() > 0) {
1638       ShenandoahHeapRegion* r = ShenandoahHeap::heap()->heap_region_containing(gclab->allocate(0));
1639       r->set_young_lab_flag();
1640     }
1641   }
1642 };
1643 
1644 void ShenandoahHeap::set_young_lab_region_flags() {
1645   if (!UseTLAB) {
1646     return;
1647   }
1648   for (size_t i = 0; i < _num_regions; i++) {
1649     _regions[i]->clear_young_lab_flags();
1650   }
1651   ShenandoahTagGCLABClosure cl;
1652   workers()->threads_do(&cl);
1653   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1654     cl.do_thread(t);
1655     ThreadLocalAllocBuffer& tlab = t->tlab();
1656     if (tlab.end() != NULL) {
1657       ShenandoahHeapRegion* r = heap_region_containing(tlab.start());
1658       r->set_young_lab_flag();
1659     }
1660   }
1661 }
1662 
1663 // Returns size in bytes
1664 size_t ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const {
1665   if (ShenandoahElasticTLAB) {
1666     // With Elastic TLABs, return the max allowed size, and let the allocation path
1667     // figure out the safe size for current allocation.
1668     return ShenandoahHeapRegion::max_tlab_size_bytes();
1669   } else {
1670     return MIN2(_free_set->unsafe_peek_free(), ShenandoahHeapRegion::max_tlab_size_bytes());
1671   }
1672 }
1673 
1674 size_t ShenandoahHeap::max_tlab_size() const {
1675   // Returns size in words
1676   return ShenandoahHeapRegion::max_tlab_size_words();
1677 }
1678 
1679 void ShenandoahHeap::collect(GCCause::Cause cause) {
1680   control_thread()->request_gc(cause);
1681 }
1682 

2039       if (start >= max) break;
2040 
2041       for (size_t i = cur; i < end; i++) {
2042         ShenandoahHeapRegion* current = _heap->get_region(i);
2043         _blk->heap_region_do(current);
2044       }
2045     }
2046   }
2047 };
2048 
2049 void ShenandoahHeap::parallel_heap_region_iterate(ShenandoahHeapRegionClosure* blk) const {
2050   assert(blk->is_thread_safe(), "Only thread-safe closures here");
2051   if (num_regions() > ShenandoahParallelRegionStride) {
2052     ShenandoahParallelHeapRegionTask task(blk);
2053     workers()->run_task(&task);
2054   } else {
2055     heap_region_iterate(blk);
2056   }
2057 }
2058 























2059 class ShenandoahRendezvousClosure : public HandshakeClosure {
2060 public:
2061   inline ShenandoahRendezvousClosure() : HandshakeClosure("ShenandoahRendezvous") {}
2062   inline void do_thread(Thread* thread) {}
2063 };
2064 
2065 void ShenandoahHeap::rendezvous_threads() {
2066   ShenandoahRendezvousClosure cl;
2067   Handshake::execute(&cl);
2068 }
2069 
2070 void ShenandoahHeap::recycle_trash() {
2071   free_set()->recycle_trash();
2072 }
2073 



































































































2074 void ShenandoahHeap::do_class_unloading() {
2075   _unloader.unload();
2076 }
2077 
2078 void ShenandoahHeap::stw_weak_refs(bool full_gc) {
2079   // Weak refs processing
2080   ShenandoahPhaseTimings::Phase phase = full_gc ? ShenandoahPhaseTimings::full_gc_weakrefs
2081                                                 : ShenandoahPhaseTimings::degen_gc_weakrefs;
2082   ShenandoahTimingsTracker t(phase);
2083   ShenandoahGCWorkerPhase worker_phase(phase);
2084   active_generation()->ref_processor()->process_references(phase, workers(), false /* concurrent */);
2085 }
2086 
2087 void ShenandoahHeap::prepare_update_heap_references(bool concurrent) {
2088   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
2089 
2090   // Evacuation is over, no GCLABs are needed anymore. GCLABs are under URWM, so we need to
2091   // make them parsable for update code to work correctly. Plus, we can compute new sizes
2092   // for future GCLABs here.
2093   if (UseTLAB) {
2094     ShenandoahGCPhase phase(concurrent ?
2095                             ShenandoahPhaseTimings::init_update_refs_manage_gclabs :
2096                             ShenandoahPhaseTimings::degen_gc_init_update_refs_manage_gclabs);
2097     gclabs_retire(ResizeTLAB);
2098   }
2099 
2100   _update_refs_iterator.reset();
2101 }
2102 
2103 void ShenandoahHeap::set_gc_state_all_threads(char state) {
2104   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
2105     ShenandoahThreadLocalData::set_gc_state(t, state);
2106   }
2107 }
2108 
2109 void ShenandoahHeap::set_gc_state_mask(uint mask, bool value) {
2110   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should really be Shenandoah safepoint");
2111   _gc_state.set_cond(mask, value);
2112   set_gc_state_all_threads(_gc_state.raw_value());
2113 }
2114 
2115 void ShenandoahHeap::set_concurrent_young_mark_in_progress(bool in_progress) {
2116   if (has_forwarded_objects()) {
2117     set_gc_state_mask(YOUNG_MARKING | UPDATEREFS, in_progress);
2118   } else {
2119     set_gc_state_mask(YOUNG_MARKING, in_progress);
2120   }
2121 
2122   manage_satb_barrier(in_progress);
2123 }
2124 
2125 void ShenandoahHeap::set_concurrent_old_mark_in_progress(bool in_progress) {
2126   if (has_forwarded_objects()) {
2127     set_gc_state_mask(OLD_MARKING | UPDATEREFS, in_progress);
2128   } else {
2129     set_gc_state_mask(OLD_MARKING, in_progress);
2130   }
2131 
2132   manage_satb_barrier(in_progress);
2133 }
2134 
2135 void ShenandoahHeap::set_prepare_for_old_mark_in_progress(bool in_progress) {
2136   // Unlike other set-gc-state functions, this may happen outside safepoint.
2137   // Is only set and queried by control thread, so no coherence issues.
2138   _prepare_for_old_mark = in_progress;
2139 }
2140 
2141 void ShenandoahHeap::set_aging_cycle(bool in_progress) {
2142   _is_aging_cycle.set_cond(in_progress);
2143 }
2144 
2145 void ShenandoahHeap::manage_satb_barrier(bool active) {
2146   if (is_concurrent_mark_in_progress()) {
2147     // Ignore request to deactivate barrier while concurrent mark is in progress.
2148     // Do not attempt to re-activate the barrier if it is already active.
2149     if (active && !ShenandoahBarrierSet::satb_mark_queue_set().is_active()) {
2150       ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(active, !active);
2151     }
2152   } else {
2153     // No concurrent marking is in progress so honor request to deactivate,
2154     // but only if the barrier is already active.
2155     if (!active && ShenandoahBarrierSet::satb_mark_queue_set().is_active()) {
2156       ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(active, !active);
2157     }
2158   }
2159 }
2160 
2161 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
2162   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only call this at safepoint");
2163   set_gc_state_mask(EVACUATION, in_progress);
2164 }
2165 
2166 void ShenandoahHeap::set_concurrent_strong_root_in_progress(bool in_progress) {
2167   if (in_progress) {
2168     _concurrent_strong_root_in_progress.set();
2169   } else {
2170     _concurrent_strong_root_in_progress.unset();
2171   }
2172 }
2173 
2174 void ShenandoahHeap::set_concurrent_weak_root_in_progress(bool cond) {
2175   set_gc_state_mask(WEAK_ROOTS, cond);
2176 }
2177 
2178 GCTracer* ShenandoahHeap::tracer() {

2183   return _free_set->used();
2184 }
2185 
2186 bool ShenandoahHeap::try_cancel_gc() {
2187   while (true) {
2188     jbyte prev = _cancelled_gc.cmpxchg(CANCELLED, CANCELLABLE);
2189     if (prev == CANCELLABLE) return true;
2190     else if (prev == CANCELLED) return false;
2191     assert(ShenandoahSuspendibleWorkers, "should not get here when not using suspendible workers");
2192     assert(prev == NOT_CANCELLED, "must be NOT_CANCELLED");
2193     Thread* thread = Thread::current();
2194     if (thread->is_Java_thread()) {
2195       // We need to provide a safepoint here, otherwise we might
2196       // spin forever if a SP is pending.
2197       ThreadBlockInVM sp(JavaThread::cast(thread));
2198       SpinPause();
2199     }
2200   }
2201 }
2202 
2203 void ShenandoahHeap::cancel_concurrent_mark() {
2204   _young_generation->cancel_marking();
2205   _old_generation->cancel_marking();
2206   _global_generation->cancel_marking();
2207 
2208   ShenandoahBarrierSet::satb_mark_queue_set().abandon_partial_marking();
2209 }
2210 
2211 void ShenandoahHeap::cancel_gc(GCCause::Cause cause) {
2212   if (try_cancel_gc()) {
2213     FormatBuffer<> msg("Cancelling GC: %s", GCCause::to_string(cause));
2214     log_info(gc)("%s", msg.buffer());
2215     Events::log(Thread::current(), "%s", msg.buffer());
2216     _cancel_requested_time = os::elapsedTime();
2217     if (cause == GCCause::_shenandoah_upgrade_to_full_gc) {
2218       _upgraded_to_full = true;
2219     }
2220   }
2221 }
2222 
2223 uint ShenandoahHeap::max_workers() {
2224   return _max_workers;
2225 }
2226 
2227 void ShenandoahHeap::stop() {
2228   // The shutdown sequence should be able to terminate when GC is running.
2229 
2230   // Step 0a. Stop requesting collections.
2231   regulator_thread()->stop();
2232 
2233   // Step 0. Notify policy to disable event recording.
2234   _shenandoah_policy->record_shutdown();
2235 
2236   // Step 1. Notify control thread that we are in shutdown.
2237   // Note that we cannot do that with stop(), because stop() is blocking and waits for the actual shutdown.
2238   // Doing stop() here would wait for the normal GC cycle to complete, never falling through to cancel below.
2239   control_thread()->prepare_for_graceful_shutdown();
2240 
2241   // Step 2. Notify GC workers that we are cancelling GC.
2242   cancel_gc(GCCause::_shenandoah_stop_vm);
2243 
2244   // Step 3. Wait until GC worker exits normally.
2245   control_thread()->stop();
2246 }
2247 
2248 void ShenandoahHeap::stw_unload_classes(bool full_gc) {
2249   if (!unload_classes()) return;
2250   // Unload classes and purge SystemDictionary.
2251   {
2252     ShenandoahPhaseTimings::Phase phase = full_gc ?

2318 }
2319 
2320 void ShenandoahHeap::set_has_forwarded_objects(bool cond) {
2321   set_gc_state_mask(HAS_FORWARDED, cond);
2322 }
2323 
2324 void ShenandoahHeap::set_unload_classes(bool uc) {
2325   _unload_classes.set_cond(uc);
2326 }
2327 
2328 bool ShenandoahHeap::unload_classes() const {
2329   return _unload_classes.is_set();
2330 }
2331 
2332 address ShenandoahHeap::in_cset_fast_test_addr() {
2333   ShenandoahHeap* heap = ShenandoahHeap::heap();
2334   assert(heap->collection_set() != NULL, "Sanity");
2335   return (address) heap->collection_set()->biased_map_address();
2336 }
2337 




2338 address ShenandoahHeap::gc_state_addr() {
2339   return (address) ShenandoahHeap::heap()->_gc_state.addr_of();
2340 }
2341 




2342 void ShenandoahHeap::reset_bytes_allocated_since_gc_start() {
2343   if (mode()->is_generational()) {
2344     young_generation()->reset_bytes_allocated_since_gc_start();
2345     old_generation()->reset_bytes_allocated_since_gc_start();
2346   }
2347 
2348   global_generation()->reset_bytes_allocated_since_gc_start();
2349 }
2350 
2351 void ShenandoahHeap::set_degenerated_gc_in_progress(bool in_progress) {
2352   _degenerated_gc_in_progress.set_cond(in_progress);
2353 }
2354 
2355 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) {
2356   _full_gc_in_progress.set_cond(in_progress);
2357 }
2358 
2359 void ShenandoahHeap::set_full_gc_move_in_progress(bool in_progress) {
2360   assert (is_full_gc_in_progress(), "should be");
2361   _full_gc_move_in_progress.set_cond(in_progress);
2362 }
2363 
2364 void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) {
2365   set_gc_state_mask(UPDATEREFS, in_progress);
2366 }
2367 
2368 void ShenandoahHeap::register_nmethod(nmethod* nm) {

2397     if (r->is_active()) {
2398       if (r->is_pinned()) {
2399         if (r->pin_count() == 0) {
2400           r->make_unpinned();
2401         }
2402       } else {
2403         if (r->pin_count() > 0) {
2404           r->make_pinned();
2405         }
2406       }
2407     }
2408   }
2409 
2410   assert_pinned_region_status();
2411 }
2412 
2413 #ifdef ASSERT
2414 void ShenandoahHeap::assert_pinned_region_status() {
2415   for (size_t i = 0; i < num_regions(); i++) {
2416     ShenandoahHeapRegion* r = get_region(i);
2417     if (active_generation()->contains(r)) {
2418       assert((r->is_pinned() && r->pin_count() > 0) || (!r->is_pinned() && r->pin_count() == 0),
2419              "Region " SIZE_FORMAT " pinning status is inconsistent", i);
2420     }
2421   }
2422 }
2423 #endif
2424 
2425 ConcurrentGCTimer* ShenandoahHeap::gc_timer() const {
2426   return _gc_timer;
2427 }
2428 
2429 void ShenandoahHeap::prepare_concurrent_roots() {
2430   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2431   assert(!is_stw_gc_in_progress(), "Only concurrent GC");
2432   set_concurrent_strong_root_in_progress(!collection_set()->is_empty());
2433   set_concurrent_weak_root_in_progress(true);
2434   if (unload_classes()) {
2435     _unloader.prepare();
2436   }
2437 }
2438 
2439 void ShenandoahHeap::finish_concurrent_roots() {
2440   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");

2460       assert(nworkers <= ConcGCThreads, "Cannot use more than it has");
2461     } else {
2462       // Use ConcGCThreads outside safepoints
2463       assert(nworkers == ConcGCThreads, "Use ConcGCThreads outside safepoints");
2464     }
2465   }
2466 }
2467 #endif
2468 
2469 ShenandoahVerifier* ShenandoahHeap::verifier() {
2470   guarantee(ShenandoahVerify, "Should be enabled");
2471   assert (_verifier != NULL, "sanity");
2472   return _verifier;
2473 }
2474 
2475 template<bool CONCURRENT>
2476 class ShenandoahUpdateHeapRefsTask : public WorkerTask {
2477 private:
2478   ShenandoahHeap* _heap;
2479   ShenandoahRegionIterator* _regions;
2480   ShenandoahRegionChunkIterator* _work_chunks;
2481 
2482 public:
2483   explicit ShenandoahUpdateHeapRefsTask(ShenandoahRegionIterator* regions,
2484                                         ShenandoahRegionChunkIterator* work_chunks) :
2485     WorkerTask("Shenandoah Update References"),
2486     _heap(ShenandoahHeap::heap()),
2487     _regions(regions),
2488     _work_chunks(work_chunks)
2489   {
2490   }
2491 
2492   void work(uint worker_id) {
2493     if (CONCURRENT) {
2494       ShenandoahConcurrentWorkerSession worker_session(worker_id);
2495       ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
2496       do_work<ShenandoahConcUpdateRefsClosure>(worker_id);
2497     } else {
2498       ShenandoahParallelWorkerSession worker_session(worker_id);
2499       do_work<ShenandoahSTWUpdateRefsClosure>(worker_id);
2500     }
2501   }
2502 
2503 private:
2504   template<class T>
2505   void do_work(uint worker_id) {
2506     T cl;
2507     ShenandoahHeapRegion* r = _regions->next();
2508     // We update references for global, old, and young collections.
2509     assert(_heap->active_generation()->is_mark_complete(), "Expected complete marking");
2510     ShenandoahMarkingContext* const ctx = _heap->marking_context();
2511     bool is_mixed = _heap->collection_set()->has_old_regions();
2512     while (r != NULL) {
2513       HeapWord* update_watermark = r->get_update_watermark();
2514       assert (update_watermark >= r->bottom(), "sanity");
2515 
2516       log_debug(gc)("ShenandoahUpdateHeapRefsTask::do_work(%u) looking at region " SIZE_FORMAT, worker_id, r->index());
2517       bool region_progress = false;
2518       if (r->is_active() && !r->is_cset()) {
2519         if (!_heap->mode()->is_generational() || (r->affiliation() == ShenandoahRegionAffiliation::YOUNG_GENERATION)) {
2520           _heap->marked_object_oop_iterate(r, &cl, update_watermark);
2521           region_progress = true;
2522         } else if (r->affiliation() == ShenandoahRegionAffiliation::OLD_GENERATION) {
2523           if (_heap->active_generation()->generation_mode() == GLOBAL) {
2524             // Note that GLOBAL collection is not as effectively balanced as young and mixed cycles.  This is because
2525             // concurrent GC threads are parceled out entire heap regions of work at a time and there
2526             // is no "catchup phase" consisting of remembered set scanning, during which parcels of work are smaller
2527             // and more easily distributed more fairly across threads.
2528 
2529             // TODO: Consider an improvement to load balance GLOBAL GC.
2530             _heap->marked_object_oop_iterate(r, &cl, update_watermark);
2531             region_progress = true;
2532           }
2533           // Otherwise, this is an old region in a young or mixed cycle.  Process it during a second phase, below.
2534           // Don't bother to report pacing progress in this case.
2535         } else {
2536           // Because updating of references runs concurrently, it is possible that a FREE inactive region transitions
2537           // to a non-free active region while this loop is executing.  Whenever this happens, the changing of a region's
2538           // active status may propagate at a different speed than the changing of the region's affiliation.
2539 
2540           // When we reach this control point, it is because a race has allowed a region's is_active() status to be seen
2541           // by this thread before the region's affiliation() is seen by this thread.
2542 
2543           // It's ok for this race to occur because the newly transformed region does not have any references to be
2544           // updated.
2545 
2546           assert(r->get_update_watermark() == r->bottom(),
2547                  "%s Region " SIZE_FORMAT " is_active but not recognized as YOUNG or OLD so must be newly transitioned from FREE",
2548                  affiliation_name(r->affiliation()), r->index());
2549         }
2550       }
2551       if (region_progress && ShenandoahPacing) {
2552         _heap->pacer()->report_updaterefs(pointer_delta(update_watermark, r->bottom()));
2553       }
2554       if (_heap->check_cancelled_gc_and_yield(CONCURRENT)) {
2555         return;
2556       }
2557       r = _regions->next();
2558     }
2559     if (_heap->mode()->is_generational() && (_heap->active_generation()->generation_mode() != GLOBAL)) {
2560       // Since this is generational and not GLOBAL, we have to process the remembered set.  There's no remembered
2561       // set processing if not in generational mode or if GLOBAL mode.
2562 
2563       // After this thread has exhausted its traditional update-refs work, it continues with updating refs within remembered set.
2564       // The remembered set workload is better balanced between threads, so threads that are "behind" can catch up with other
2565       // threads during this phase, allowing all threads to work more effectively in parallel.
2566       struct ShenandoahRegionChunk assignment;
2567       bool have_work = _work_chunks->next(&assignment);
2568       RememberedScanner* scanner = _heap->card_scan();
2569       while (have_work) {
2570         ShenandoahHeapRegion* r = assignment._r;
2571         if (r->is_active() && !r->is_cset() && (r->affiliation() == ShenandoahRegionAffiliation::OLD_GENERATION)) {
2572           HeapWord* start_of_range = r->bottom() + assignment._chunk_offset;
2573           HeapWord* end_of_range = r->get_update_watermark();
2574           if (end_of_range > start_of_range + assignment._chunk_size) {
2575             end_of_range = start_of_range + assignment._chunk_size;
2576           }
2577 
2578           // Old region in a young cycle or mixed cycle.
2579           if (is_mixed) {
2580             // TODO: For mixed evac, consider building an old-gen remembered set that allows restricted updating
2581             // within old-gen HeapRegions.  This remembered set can be constructed by old-gen concurrent marking
2582             // and augmented by card marking.  For example, old-gen concurrent marking can remember for each old-gen
2583             // card which other old-gen regions it refers to: none, one-other specifically, multiple-other non-specific.
2584             // Update-references when _mixed_evac processess each old-gen memory range that has a traditional DIRTY
2585             // card or if the "old-gen remembered set" indicates that this card holds pointers specifically to an
2586             // old-gen region in the most recent collection set, or if this card holds pointers to other non-specific
2587             // old-gen heap regions.
2588 
2589             if (r->is_humongous()) {
2590               if (start_of_range < end_of_range) {
2591                 // Need to examine both dirty and clean cards during mixed evac.
2592                 r->oop_iterate_humongous_slice(&cl, false, start_of_range, assignment._chunk_size, true, CONCURRENT);
2593               }
2594             } else {
2595               // Since this is mixed evacuation, old regions that are candidates for collection have not been coalesced
2596               // and filled.  Use mark bits to find objects that need to be updated.
2597               //
2598               // Future TODO: establish a second remembered set to identify which old-gen regions point to other old-gen
2599               // regions which are in the collection set for a particular mixed evacuation.
2600               if (start_of_range < end_of_range) {
2601                 HeapWord* p = nullptr;
2602                 size_t card_index = scanner->card_index_for_addr(start_of_range);
2603                 // In case last object in my range spans boundary of my chunk, I may need to scan all the way to top()
2604                 ShenandoahObjectToOopBoundedClosure<T> objs(&cl, start_of_range, r->top());
2605 
2606                 // Any object that begins in a previous range is part of a different scanning assignment.  Any object that
2607                 // starts after end_of_range is also not my responsibility.  (Either allocated during evacuation, so does
2608                 // not hold pointers to from-space, or is beyond the range of my assigned work chunk.)
2609 
2610                 // Find the first object that begins in my range, if there is one.
2611                 p = start_of_range;
2612                 oop obj = cast_to_oop(p);
2613                 HeapWord* tams = ctx->top_at_mark_start(r);
2614                 if (p >= tams) {
2615                   // We cannot use ctx->is_marked(obj) to test whether an object begins at this address.  Instead,
2616                   // we need to use the remembered set crossing map to advance p to the first object that starts
2617                   // within the enclosing card.
2618 
2619                   while (true) {
2620                     HeapWord* first_object = scanner->first_object_in_card(card_index);
2621                     if (first_object != nullptr) {
2622                       p = first_object;
2623                       break;
2624                     } else if (scanner->addr_for_card_index(card_index + 1) < end_of_range) {
2625                       card_index++;
2626                     } else {
2627                       // Force the loop that follows to immediately terminate.
2628                       p = end_of_range;
2629                       break;
2630                     }
2631                   }
2632                   obj = cast_to_oop(p);
2633                   // Note: p may be >= end_of_range
2634                 } else if (!ctx->is_marked(obj)) {
2635                   p = ctx->get_next_marked_addr(p, tams);
2636                   obj = cast_to_oop(p);
2637                   // If there are no more marked objects before tams, this returns tams.
2638                   // Note that tams is either >= end_of_range, or tams is the start of an object that is marked.
2639                 }
2640                 while (p < end_of_range) {
2641                   // p is known to point to the beginning of marked object obj
2642                   objs.do_object(obj);
2643                   HeapWord* prev_p = p;
2644                   p += obj->size();
2645                   if (p < tams) {
2646                     p = ctx->get_next_marked_addr(p, tams);
2647                     // If there are no more marked objects before tams, this returns tams.  Note that tams is
2648                     // either >= end_of_range, or tams is the start of an object that is marked.
2649                   }
2650                   assert(p != prev_p, "Lack of forward progress");
2651                   obj = cast_to_oop(p);
2652                 }
2653               }
2654             }
2655           } else {
2656             // This is a young evac..
2657             if (start_of_range < end_of_range) {
2658               size_t cluster_size =
2659                 CardTable::card_size_in_words() * ShenandoahCardCluster<ShenandoahDirectCardMarkRememberedSet>::CardsPerCluster;
2660               size_t clusters = assignment._chunk_size / cluster_size;
2661               assert(clusters * cluster_size == assignment._chunk_size, "Chunk assignment must align on cluster boundaries");
2662               scanner->process_region_slice(r, assignment._chunk_offset, clusters, end_of_range, &cl, true, CONCURRENT);
2663             }
2664           }
2665           if (ShenandoahPacing && (start_of_range < end_of_range)) {
2666             _heap->pacer()->report_updaterefs(pointer_delta(end_of_range, start_of_range));
2667           }
2668         }
2669         // Otherwise, this work chunk had nothing for me to do, so do not report pacer progress.
2670 
2671         // Before we take responsibility for another chunk of work, see if cancellation is requested.
2672         if (_heap->check_cancelled_gc_and_yield(CONCURRENT)) {
2673           return;
2674         }
2675         have_work = _work_chunks->next(&assignment);
2676       }
2677     }
2678   }
2679 };
2680 
2681 void ShenandoahHeap::update_heap_references(bool concurrent) {
2682   assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
2683   ShenandoahRegionChunkIterator work_list(workers()->active_workers());
2684 
2685   if (concurrent) {
2686     ShenandoahUpdateHeapRefsTask<true> task(&_update_refs_iterator, &work_list);
2687     workers()->run_task(&task);
2688   } else {
2689     ShenandoahUpdateHeapRefsTask<false> task(&_update_refs_iterator, &work_list);
2690     workers()->run_task(&task);
2691   }
2692 }
2693 

2694 class ShenandoahFinalUpdateRefsUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
2695 private:
2696   ShenandoahMarkingContext* _ctx;
2697   ShenandoahHeapLock* const _lock;
2698   bool _is_generational;
2699 
2700 public:
2701   ShenandoahFinalUpdateRefsUpdateRegionStateClosure(
2702     ShenandoahMarkingContext* ctx) : _ctx(ctx), _lock(ShenandoahHeap::heap()->lock()),
2703                                      _is_generational(ShenandoahHeap::heap()->mode()->is_generational()) { }
2704 
2705   void heap_region_do(ShenandoahHeapRegion* r) {
2706 
2707     // Maintenance of region age must follow evacuation in order to account for evacuation allocations within survivor
2708     // regions.  We consult region age during the subsequent evacuation to determine whether certain objects need to
2709     // be promoted.
2710     if (_is_generational && r->is_young()) {
2711       HeapWord *tams = _ctx->top_at_mark_start(r);
2712       HeapWord *top = r->top();
2713 
2714       // Allocations move the watermark when top moves.  However compacting
2715       // objects will sometimes lower top beneath the watermark, after which,
2716       // attempts to read the watermark will assert out (watermark should not be
2717       // higher than top).
2718       if (top > tams) {
2719         // There have been allocations in this region since the start of the cycle.
2720         // Any objects new to this region must not assimilate elevated age.
2721         r->reset_age();
2722       } else if (ShenandoahHeap::heap()->is_aging_cycle()) {
2723         r->increment_age();
2724       }
2725     }
2726 
2727     // Drop unnecessary "pinned" state from regions that does not have CP marks
2728     // anymore, as this would allow trashing them.

2729     if (r->is_active()) {
2730       if (r->is_pinned()) {
2731         if (r->pin_count() == 0) {
2732           ShenandoahHeapLocker locker(_lock);
2733           r->make_unpinned();
2734         }
2735       } else {
2736         if (r->pin_count() > 0) {
2737           ShenandoahHeapLocker locker(_lock);
2738           r->make_pinned();
2739         }
2740       }
2741     }
2742   }
2743 
2744   bool is_thread_safe() { return true; }
2745 };
2746 
2747 void ShenandoahHeap::update_heap_region_states(bool concurrent) {
2748   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2749   assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
2750 
2751   {
2752     ShenandoahGCPhase phase(concurrent ?
2753                             ShenandoahPhaseTimings::final_update_refs_update_region_states :
2754                             ShenandoahPhaseTimings::degen_gc_final_update_refs_update_region_states);
2755     ShenandoahFinalUpdateRefsUpdateRegionStateClosure cl (active_generation()->complete_marking_context());
2756     parallel_heap_region_iterate(&cl);
2757 
2758     assert_pinned_region_status();
2759   }
2760 
2761   {
2762     ShenandoahGCPhase phase(concurrent ?
2763                             ShenandoahPhaseTimings::final_update_refs_trash_cset :
2764                             ShenandoahPhaseTimings::degen_gc_final_update_refs_trash_cset);
2765     trash_cset_regions();
2766   }
2767 }
2768 
2769 void ShenandoahHeap::rebuild_free_set(bool concurrent) {
2770   {
2771     ShenandoahGCPhase phase(concurrent ?
2772                             ShenandoahPhaseTimings::final_update_refs_rebuild_freeset :
2773                             ShenandoahPhaseTimings::degen_gc_final_update_refs_rebuild_freeset);
2774     ShenandoahHeapLocker locker(lock());
2775     _free_set->rebuild();

2869   EventMark em("%s", msg);
2870 
2871   op_uncommit(shrink_before, shrink_until);
2872 }
2873 
2874 void ShenandoahHeap::try_inject_alloc_failure() {
2875   if (ShenandoahAllocFailureALot && !cancelled_gc() && ((os::random() % 1000) > 950)) {
2876     _inject_alloc_failure.set();
2877     os::naked_short_sleep(1);
2878     if (cancelled_gc()) {
2879       log_info(gc)("Allocation failure was successfully injected");
2880     }
2881   }
2882 }
2883 
2884 bool ShenandoahHeap::should_inject_alloc_failure() {
2885   return _inject_alloc_failure.is_set() && _inject_alloc_failure.try_unset();
2886 }
2887 
2888 void ShenandoahHeap::initialize_serviceability() {
2889   if (mode()->is_generational()) {
2890     _young_gen_memory_pool = new ShenandoahYoungGenMemoryPool(this);
2891     _old_gen_memory_pool = new ShenandoahOldGenMemoryPool(this);
2892     _cycle_memory_manager.add_pool(_young_gen_memory_pool);
2893     _cycle_memory_manager.add_pool(_old_gen_memory_pool);
2894     _stw_memory_manager.add_pool(_young_gen_memory_pool);
2895     _stw_memory_manager.add_pool(_old_gen_memory_pool);
2896   } else {
2897     _memory_pool = new ShenandoahMemoryPool(this);
2898     _cycle_memory_manager.add_pool(_memory_pool);
2899     _stw_memory_manager.add_pool(_memory_pool);
2900   }
2901 }
2902 
2903 GrowableArray<GCMemoryManager*> ShenandoahHeap::memory_managers() {
2904   GrowableArray<GCMemoryManager*> memory_managers(2);
2905   memory_managers.append(&_cycle_memory_manager);
2906   memory_managers.append(&_stw_memory_manager);
2907   return memory_managers;
2908 }
2909 
2910 GrowableArray<MemoryPool*> ShenandoahHeap::memory_pools() {
2911   GrowableArray<MemoryPool*> memory_pools(1);
2912   if (mode()->is_generational()) {
2913     memory_pools.append(_young_gen_memory_pool);
2914     memory_pools.append(_old_gen_memory_pool);
2915   } else {
2916     memory_pools.append(_memory_pool);
2917   }
2918   return memory_pools;
2919 }
2920 
2921 MemoryUsage ShenandoahHeap::memory_usage() {
2922   return MemoryUsage(_initial_size, used(), committed(), max_capacity());
2923 }
2924 
2925 ShenandoahRegionIterator::ShenandoahRegionIterator() :
2926   _heap(ShenandoahHeap::heap()),
2927   _index(0) {}
2928 
2929 ShenandoahRegionIterator::ShenandoahRegionIterator(ShenandoahHeap* heap) :
2930   _heap(heap),
2931   _index(0) {}
2932 
2933 void ShenandoahRegionIterator::reset() {
2934   _index = 0;
2935 }
2936 
2937 bool ShenandoahRegionIterator::has_next() const {
2938   return _index < _heap->num_regions();
2939 }
2940 
2941 char ShenandoahHeap::gc_state() const {
2942   return _gc_state.raw_value();
2943 }
2944 
2945 ShenandoahLiveData* ShenandoahHeap::get_liveness_cache(uint worker_id) {
2946 #ifdef ASSERT
2947   assert(_liveness_cache != NULL, "sanity");
2948   assert(worker_id < _max_workers, "sanity");
2949   for (uint i = 0; i < num_regions(); i++) {
2950     assert(_liveness_cache[worker_id][i] == 0, "liveness cache should be empty");
2951   }
2952 #endif
2953   return _liveness_cache[worker_id];
2954 }
2955 
2956 void ShenandoahHeap::flush_liveness_cache(uint worker_id) {
2957   assert(worker_id < _max_workers, "sanity");
2958   assert(_liveness_cache != NULL, "sanity");
2959   ShenandoahLiveData* ld = _liveness_cache[worker_id];
2960 
2961   for (uint i = 0; i < num_regions(); i++) {
2962     ShenandoahLiveData live = ld[i];
2963     if (live > 0) {
2964       ShenandoahHeapRegion* r = get_region(i);
2965       r->increase_live_data_gc_words(live);
2966       ld[i] = 0;
2967     }
2968   }
2969 }
2970 
2971 bool ShenandoahHeap::requires_barriers(stackChunkOop obj) const {
2972   if (is_idle()) return false;
2973 
2974   // Objects allocated after marking start are implicitly alive, don't need any barriers during
2975   // marking phase.
2976   if (is_concurrent_mark_in_progress() &&
2977      !marking_context()->allocated_after_mark_start(obj)) {
2978     return true;
2979   }
2980 
2981   // Can not guarantee obj is deeply good.
2982   if (has_forwarded_objects()) {
2983     return true;
2984   }
2985 
2986   return false;
2987 }
2988 
2989 void ShenandoahHeap::transfer_old_pointers_from_satb() {
2990   _old_generation->transfer_pointers_from_satb();
2991 }
2992 
2993 template<>
2994 void ShenandoahGenerationRegionClosure<YOUNG>::heap_region_do(ShenandoahHeapRegion* region) {
2995   // Visit young and free regions
2996   if (region->affiliation() != OLD_GENERATION) {
2997     _cl->heap_region_do(region);
2998   }
2999 }
3000 
3001 template<>
3002 void ShenandoahGenerationRegionClosure<OLD>::heap_region_do(ShenandoahHeapRegion* region) {
3003   // Visit old and free regions
3004   if (region->affiliation() != YOUNG_GENERATION) {
3005     _cl->heap_region_do(region);
3006   }
3007 }
3008 
3009 template<>
3010 void ShenandoahGenerationRegionClosure<GLOBAL>::heap_region_do(ShenandoahHeapRegion* region) {
3011   _cl->heap_region_do(region);
3012 }
3013 
3014 // Assure that the remember set has a dirty card everywhere there is an interesting pointer.
3015 // This examines the read_card_table between bottom() and top() since all PLABS are retired
3016 // before the safepoint for init_mark.  Actually, we retire them before update-references and don't
3017 // restore them until the start of evacuation.
3018 void ShenandoahHeap::verify_rem_set_at_mark() {
3019   shenandoah_assert_safepoint();
3020   assert(mode()->is_generational(), "Only verify remembered set for generational operational modes");
3021 
3022   ShenandoahRegionIterator iterator;
3023   RememberedScanner* scanner = card_scan();
3024   ShenandoahVerifyRemSetClosure check_interesting_pointers(true);
3025   ShenandoahMarkingContext* ctx;
3026 
3027   log_debug(gc)("Verifying remembered set at %s mark", doing_mixed_evacuations()? "mixed": "young");
3028 
3029   if (is_old_bitmap_stable() || active_generation()->generation_mode() == GLOBAL) {
3030     ctx = complete_marking_context();
3031   } else {
3032     ctx = nullptr;
3033   }
3034 
3035   while (iterator.has_next()) {
3036     ShenandoahHeapRegion* r = iterator.next();
3037     if (r == nullptr)
3038       break;
3039     if (r->is_old() && r->is_active()) {
3040       HeapWord* obj_addr = r->bottom();
3041       if (r->is_humongous_start()) {
3042         oop obj = cast_to_oop(obj_addr);
3043         if (!ctx || ctx->is_marked(obj)) {
3044           // For humongous objects, the typical object is an array, so the following checks may be overkill
3045           // For regular objects (not object arrays), if the card holding the start of the object is dirty,
3046           // we do not need to verify that cards spanning interesting pointers within this object are dirty.
3047           if (!scanner->is_card_dirty(obj_addr) || obj->is_objArray()) {
3048             obj->oop_iterate(&check_interesting_pointers);
3049           }
3050           // else, object's start is marked dirty and obj is not an objArray, so any interesting pointers are covered
3051         }
3052         // else, this humongous object is not marked so no need to verify its internal pointers
3053         if (!scanner->verify_registration(obj_addr, ctx)) {
3054           ShenandoahAsserts::print_failure(ShenandoahAsserts::_safe_all, obj, obj_addr, NULL,
3055                                           "Verify init-mark remembered set violation", "object not properly registered", __FILE__, __LINE__);
3056         }
3057       } else if (!r->is_humongous()) {
3058         HeapWord* top = r->top();
3059         while (obj_addr < top) {
3060           oop obj = cast_to_oop(obj_addr);
3061           // ctx->is_marked() returns true if mark bit set (TAMS not relevant during init mark)
3062           if (!ctx || ctx->is_marked(obj)) {
3063             // For regular objects (not object arrays), if the card holding the start of the object is dirty,
3064             // we do not need to verify that cards spanning interesting pointers within this object are dirty.
3065             if (!scanner->is_card_dirty(obj_addr) || obj->is_objArray()) {
3066               obj->oop_iterate(&check_interesting_pointers);
3067             }
3068             // else, object's start is marked dirty and obj is not an objArray, so any interesting pointers are covered
3069             if (!scanner->verify_registration(obj_addr, ctx)) {
3070               ShenandoahAsserts::print_failure(ShenandoahAsserts::_safe_all, obj, obj_addr, NULL,
3071                                             "Verify init-mark remembered set violation", "object not properly registered", __FILE__, __LINE__);
3072             }
3073             obj_addr += obj->size();
3074           } else {
3075             // This object is not live so we don't verify dirty cards contained therein
3076             assert(ctx->top_at_mark_start(r) == top, "Expect tams == top at start of mark.");
3077             obj_addr = ctx->get_next_marked_addr(obj_addr, top);
3078           }
3079         }
3080       } // else, we ignore humongous continuation region
3081     } // else, this is not an OLD region so we ignore it
3082   } // all regions have been processed
3083 }
3084 
3085 void ShenandoahHeap::help_verify_region_rem_set(ShenandoahHeapRegion* r, ShenandoahMarkingContext* ctx, HeapWord* from,
3086                                                 HeapWord* top, HeapWord* registration_watermark, const char* message) {
3087   RememberedScanner* scanner = card_scan();
3088   ShenandoahVerifyRemSetClosure check_interesting_pointers(false);
3089 
3090   HeapWord* obj_addr = from;
3091   if (r->is_humongous_start()) {
3092     oop obj = cast_to_oop(obj_addr);
3093     if (!ctx || ctx->is_marked(obj)) {
3094       size_t card_index = scanner->card_index_for_addr(obj_addr);
3095       // For humongous objects, the typical object is an array, so the following checks may be overkill
3096       // For regular objects (not object arrays), if the card holding the start of the object is dirty,
3097       // we do not need to verify that cards spanning interesting pointers within this object are dirty.
3098       if (!scanner->is_write_card_dirty(card_index) || obj->is_objArray()) {
3099         obj->oop_iterate(&check_interesting_pointers);
3100       }
3101       // else, object's start is marked dirty and obj is not an objArray, so any interesting pointers are covered
3102     }
3103     // else, this humongous object is not live so no need to verify its internal pointers
3104 
3105     if ((obj_addr < registration_watermark) && !scanner->verify_registration(obj_addr, ctx)) {
3106       ShenandoahAsserts::print_failure(ShenandoahAsserts::_safe_all, obj, obj_addr, NULL, message,
3107                                        "object not properly registered", __FILE__, __LINE__);
3108     }
3109   } else if (!r->is_humongous()) {
3110     while (obj_addr < top) {
3111       oop obj = cast_to_oop(obj_addr);
3112       // ctx->is_marked() returns true if mark bit set or if obj above TAMS.
3113       if (!ctx || ctx->is_marked(obj)) {
3114         size_t card_index = scanner->card_index_for_addr(obj_addr);
3115         // For regular objects (not object arrays), if the card holding the start of the object is dirty,
3116         // we do not need to verify that cards spanning interesting pointers within this object are dirty.
3117         if (!scanner->is_write_card_dirty(card_index) || obj->is_objArray()) {
3118           obj->oop_iterate(&check_interesting_pointers);
3119         }
3120         // else, object's start is marked dirty and obj is not an objArray, so any interesting pointers are covered
3121 
3122         if ((obj_addr < registration_watermark) && !scanner->verify_registration(obj_addr, ctx)) {
3123           ShenandoahAsserts::print_failure(ShenandoahAsserts::_safe_all, obj, obj_addr, NULL, message,
3124                                            "object not properly registered", __FILE__, __LINE__);
3125         }
3126         obj_addr += obj->size();
3127       } else {
3128         // This object is not live so we don't verify dirty cards contained therein
3129         HeapWord* tams = ctx->top_at_mark_start(r);
3130         obj_addr = ctx->get_next_marked_addr(obj_addr, tams);
3131       }
3132     }
3133   }
3134 }
3135 
3136 void ShenandoahHeap::verify_rem_set_after_full_gc() {
3137   shenandoah_assert_safepoint();
3138   assert(mode()->is_generational(), "Only verify remembered set for generational operational modes");
3139 
3140   ShenandoahRegionIterator iterator;
3141 
3142   while (iterator.has_next()) {
3143     ShenandoahHeapRegion* r = iterator.next();
3144     if (r == nullptr)
3145       break;
3146     if (r->is_old() && !r->is_cset()) {
3147       help_verify_region_rem_set(r, nullptr, r->bottom(), r->top(), r->top(), "Remembered set violation at end of Full GC");
3148     }
3149   }
3150 }
3151 
3152 // Assure that the remember set has a dirty card everywhere there is an interesting pointer.  Even though
3153 // the update-references scan of remembered set only examines cards up to update_watermark, the remembered
3154 // set should be valid through top.  This examines the write_card_table between bottom() and top() because
3155 // all PLABS are retired immediately before the start of update refs.
3156 void ShenandoahHeap::verify_rem_set_at_update_ref() {
3157   shenandoah_assert_safepoint();
3158   assert(mode()->is_generational(), "Only verify remembered set for generational operational modes");
3159 
3160   ShenandoahRegionIterator iterator;
3161   ShenandoahMarkingContext* ctx;
3162 
3163   if (is_old_bitmap_stable() || active_generation()->generation_mode() == GLOBAL) {
3164     ctx = complete_marking_context();
3165   } else {
3166     ctx = nullptr;
3167   }
3168 
3169   while (iterator.has_next()) {
3170     ShenandoahHeapRegion* r = iterator.next();
3171     if (r == nullptr)
3172       break;
3173     if (r->is_old() && !r->is_cset()) {
3174       help_verify_region_rem_set(r, ctx, r->bottom(), r->top(), r->get_update_watermark(),
3175                                  "Remembered set violation at init-update-references");
3176     }
3177   }
3178 }
3179 
3180 ShenandoahGeneration* ShenandoahHeap::generation_for(ShenandoahRegionAffiliation affiliation) const {
3181   if (!mode()->is_generational()) {
3182     return global_generation();
3183   } else if (affiliation == YOUNG_GENERATION) {
3184     return young_generation();
3185   } else if (affiliation == OLD_GENERATION) {
3186     return old_generation();
3187   }
3188 
3189   ShouldNotReachHere();
3190   return nullptr;
3191 }
< prev index next >