< prev index next >

src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp

Print this page

   1 /*
   2  * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2013, 2022, Red Hat, Inc. All rights reserved.

   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "memory/allocation.hpp"
  28 #include "memory/universe.hpp"
  29 
  30 #include "gc/shared/gcArguments.hpp"
  31 #include "gc/shared/gcTimer.hpp"
  32 #include "gc/shared/gcTraceTime.inline.hpp"
  33 #include "gc/shared/locationPrinter.inline.hpp"
  34 #include "gc/shared/memAllocator.hpp"
  35 #include "gc/shared/plab.hpp"
  36 #include "gc/shared/tlab_globals.hpp"
  37 




  38 #include "gc/shenandoah/shenandoahBarrierSet.hpp"

  39 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
  40 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  41 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  42 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
  43 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  44 #include "gc/shenandoah/shenandoahControlThread.hpp"

  45 #include "gc/shenandoah/shenandoahFreeSet.hpp"

  46 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  47 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  48 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
  49 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  50 #include "gc/shenandoah/shenandoahInitLogger.hpp"
  51 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  52 #include "gc/shenandoah/shenandoahMemoryPool.hpp"
  53 #include "gc/shenandoah/shenandoahMetrics.hpp"
  54 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"

  55 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  56 #include "gc/shenandoah/shenandoahPacer.inline.hpp"
  57 #include "gc/shenandoah/shenandoahPadding.hpp"
  58 #include "gc/shenandoah/shenandoahParallelCleaning.inline.hpp"
  59 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
  60 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"

  61 #include "gc/shenandoah/shenandoahSTWMark.hpp"
  62 #include "gc/shenandoah/shenandoahUtils.hpp"
  63 #include "gc/shenandoah/shenandoahVerifier.hpp"
  64 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
  65 #include "gc/shenandoah/shenandoahVMOperations.hpp"
  66 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
  67 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"


  68 #include "gc/shenandoah/mode/shenandoahIUMode.hpp"
  69 #include "gc/shenandoah/mode/shenandoahPassiveMode.hpp"
  70 #include "gc/shenandoah/mode/shenandoahSATBMode.hpp"


  71 #if INCLUDE_JFR
  72 #include "gc/shenandoah/shenandoahJfrSupport.hpp"
  73 #endif
  74 
  75 #include "classfile/systemDictionary.hpp"
  76 #include "code/codeCache.hpp"
  77 #include "memory/classLoaderMetaspace.hpp"
  78 #include "memory/metaspaceUtils.hpp"
  79 #include "oops/compressedOops.inline.hpp"
  80 #include "prims/jvmtiTagMap.hpp"
  81 #include "runtime/atomic.hpp"
  82 #include "runtime/globals.hpp"
  83 #include "runtime/interfaceSupport.inline.hpp"
  84 #include "runtime/java.hpp"
  85 #include "runtime/orderAccess.hpp"
  86 #include "runtime/safepointMechanism.hpp"
  87 #include "runtime/vmThread.hpp"
  88 #include "services/mallocTracker.hpp"
  89 #include "services/memTracker.hpp"
  90 #include "utilities/events.hpp"

 142 jint ShenandoahHeap::initialize() {
 143   //
 144   // Figure out heap sizing
 145   //
 146 
 147   size_t init_byte_size = InitialHeapSize;
 148   size_t min_byte_size  = MinHeapSize;
 149   size_t max_byte_size  = MaxHeapSize;
 150   size_t heap_alignment = HeapAlignment;
 151 
 152   size_t reg_size_bytes = ShenandoahHeapRegion::region_size_bytes();
 153 
 154   Universe::check_alignment(max_byte_size,  reg_size_bytes, "Shenandoah heap");
 155   Universe::check_alignment(init_byte_size, reg_size_bytes, "Shenandoah heap");
 156 
 157   _num_regions = ShenandoahHeapRegion::region_count();
 158   assert(_num_regions == (max_byte_size / reg_size_bytes),
 159          "Regions should cover entire heap exactly: " SIZE_FORMAT " != " SIZE_FORMAT "/" SIZE_FORMAT,
 160          _num_regions, max_byte_size, reg_size_bytes);
 161 
 162   // Now we know the number of regions, initialize the heuristics.
 163   initialize_heuristics();
 164 
 165   size_t num_committed_regions = init_byte_size / reg_size_bytes;
 166   num_committed_regions = MIN2(num_committed_regions, _num_regions);
 167   assert(num_committed_regions <= _num_regions, "sanity");
 168   _initial_size = num_committed_regions * reg_size_bytes;
 169 
 170   size_t num_min_regions = min_byte_size / reg_size_bytes;
 171   num_min_regions = MIN2(num_min_regions, _num_regions);
 172   assert(num_min_regions <= _num_regions, "sanity");
 173   _minimum_size = num_min_regions * reg_size_bytes;
 174 
 175   // Default to max heap size.
 176   _soft_max_size = _num_regions * reg_size_bytes;
 177 
 178   _committed = _initial_size;
 179 



 180   size_t heap_page_size   = UseLargePages ? os::large_page_size() : os::vm_page_size();
 181   size_t bitmap_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
 182   size_t region_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
 183 
 184   //
 185   // Reserve and commit memory for heap
 186   //
 187 
 188   ReservedHeapSpace heap_rs = Universe::reserve_heap(max_byte_size, heap_alignment);
 189   initialize_reserved_region(heap_rs);
 190   _heap_region = MemRegion((HeapWord*)heap_rs.base(), heap_rs.size() / HeapWordSize);
 191   _heap_region_special = heap_rs.special();
 192 
 193   assert((((size_t) base()) & ShenandoahHeapRegion::region_size_bytes_mask()) == 0,
 194          "Misaligned heap: " PTR_FORMAT, p2i(base()));



 195 
 196 #if SHENANDOAH_OPTIMIZED_MARKTASK
 197   // The optimized ShenandoahMarkTask takes some bits away from the full object bits.
 198   // Fail if we ever attempt to address more than we can.
 199   if ((uintptr_t)heap_rs.end() >= ShenandoahMarkTask::max_addressable()) {
 200     FormatBuffer<512> buf("Shenandoah reserved [" PTR_FORMAT ", " PTR_FORMAT") for the heap, \n"
 201                           "but max object address is " PTR_FORMAT ". Try to reduce heap size, or try other \n"
 202                           "VM options that allocate heap at lower addresses (HeapBaseMinAddress, AllocateHeapAt, etc).",
 203                 p2i(heap_rs.base()), p2i(heap_rs.end()), ShenandoahMarkTask::max_addressable());
 204     vm_exit_during_initialization("Fatal Error", buf);
 205   }
 206 #endif
 207 
 208   ReservedSpace sh_rs = heap_rs.first_part(max_byte_size);
 209   if (!_heap_region_special) {
 210     os::commit_memory_or_exit(sh_rs.base(), _initial_size, heap_alignment, false,
 211                               "Cannot commit heap memory");
 212   }
 213 




























 214   //
 215   // Reserve and commit memory for bitmap(s)
 216   //
 217 
 218   _bitmap_size = ShenandoahMarkBitMap::compute_size(heap_rs.size());
 219   _bitmap_size = align_up(_bitmap_size, bitmap_page_size);
 220 
 221   size_t bitmap_bytes_per_region = reg_size_bytes / ShenandoahMarkBitMap::heap_map_factor();
 222 
 223   guarantee(bitmap_bytes_per_region != 0,
 224             "Bitmap bytes per region should not be zero");
 225   guarantee(is_power_of_2(bitmap_bytes_per_region),
 226             "Bitmap bytes per region should be power of two: " SIZE_FORMAT, bitmap_bytes_per_region);
 227 
 228   if (bitmap_page_size > bitmap_bytes_per_region) {
 229     _bitmap_regions_per_slice = bitmap_page_size / bitmap_bytes_per_region;
 230     _bitmap_bytes_per_slice = bitmap_page_size;
 231   } else {
 232     _bitmap_regions_per_slice = 1;
 233     _bitmap_bytes_per_slice = bitmap_bytes_per_region;
 234   }
 235 
 236   guarantee(_bitmap_regions_per_slice >= 1,
 237             "Should have at least one region per slice: " SIZE_FORMAT,
 238             _bitmap_regions_per_slice);
 239 
 240   guarantee(((_bitmap_bytes_per_slice) % bitmap_page_size) == 0,
 241             "Bitmap slices should be page-granular: bps = " SIZE_FORMAT ", page size = " SIZE_FORMAT,
 242             _bitmap_bytes_per_slice, bitmap_page_size);
 243 
 244   ReservedSpace bitmap(_bitmap_size, bitmap_page_size);




 245   MemTracker::record_virtual_memory_type(bitmap.base(), mtGC);
 246   _bitmap_region = MemRegion((HeapWord*) bitmap.base(), bitmap.size() / HeapWordSize);
 247   _bitmap_region_special = bitmap.special();
 248 
 249   size_t bitmap_init_commit = _bitmap_bytes_per_slice *
 250                               align_up(num_committed_regions, _bitmap_regions_per_slice) / _bitmap_regions_per_slice;
 251   bitmap_init_commit = MIN2(_bitmap_size, bitmap_init_commit);
 252   if (!_bitmap_region_special) {
 253     os::commit_memory_or_exit((char *) _bitmap_region.start(), bitmap_init_commit, bitmap_page_size, false,
 254                               "Cannot commit bitmap memory");
 255   }
 256 
 257   _marking_context = new ShenandoahMarkingContext(_heap_region, _bitmap_region, _num_regions, _max_workers);
 258 
 259   if (ShenandoahVerify) {
 260     ReservedSpace verify_bitmap(_bitmap_size, bitmap_page_size);




 261     if (!verify_bitmap.special()) {
 262       os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), bitmap_page_size, false,
 263                                 "Cannot commit verification bitmap memory");
 264     }
 265     MemTracker::record_virtual_memory_type(verify_bitmap.base(), mtGC);
 266     MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize);
 267     _verification_bit_map.initialize(_heap_region, verify_bitmap_region);
 268     _verifier = new ShenandoahVerifier(this, &_verification_bit_map);
 269   }
 270 
 271   // Reserve aux bitmap for use in object_iterate(). We don't commit it here.
 272   ReservedSpace aux_bitmap(_bitmap_size, bitmap_page_size);












 273   MemTracker::record_virtual_memory_type(aux_bitmap.base(), mtGC);
 274   _aux_bitmap_region = MemRegion((HeapWord*) aux_bitmap.base(), aux_bitmap.size() / HeapWordSize);
 275   _aux_bitmap_region_special = aux_bitmap.special();
 276   _aux_bit_map.initialize(_heap_region, _aux_bitmap_region);
 277 
 278   //
 279   // Create regions and region sets
 280   //
 281   size_t region_align = align_up(sizeof(ShenandoahHeapRegion), SHENANDOAH_CACHE_LINE_SIZE);
 282   size_t region_storage_size = align_up(region_align * _num_regions, region_page_size);
 283   region_storage_size = align_up(region_storage_size, os::vm_allocation_granularity());

 284 
 285   ReservedSpace region_storage(region_storage_size, region_page_size);



 286   MemTracker::record_virtual_memory_type(region_storage.base(), mtGC);
 287   if (!region_storage.special()) {
 288     os::commit_memory_or_exit(region_storage.base(), region_storage_size, region_page_size, false,
 289                               "Cannot commit region memory");
 290   }
 291 
 292   // Try to fit the collection set bitmap at lower addresses. This optimizes code generation for cset checks.
 293   // Go up until a sensible limit (subject to encoding constraints) and try to reserve the space there.
 294   // If not successful, bite a bullet and allocate at whatever address.
 295   {
 296     size_t cset_align = MAX2<size_t>(os::vm_page_size(), os::vm_allocation_granularity());
 297     size_t cset_size = align_up(((size_t) sh_rs.base() + sh_rs.size()) >> ShenandoahHeapRegion::region_size_bytes_shift(), cset_align);

 298 
 299     uintptr_t min = round_up_power_of_2(cset_align);
 300     uintptr_t max = (1u << 30u);

 301 
 302     for (uintptr_t addr = min; addr <= max; addr <<= 1u) {
 303       char* req_addr = (char*)addr;
 304       assert(is_aligned(req_addr, cset_align), "Should be aligned");
 305       ReservedSpace cset_rs(cset_size, cset_align, os::vm_page_size(), req_addr);
 306       if (cset_rs.is_reserved()) {
 307         assert(cset_rs.base() == req_addr, "Allocated where requested: " PTR_FORMAT ", " PTR_FORMAT, p2i(cset_rs.base()), addr);
 308         _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
 309         break;
 310       }
 311     }
 312 
 313     if (_collection_set == nullptr) {
 314       ReservedSpace cset_rs(cset_size, cset_align, os::vm_page_size());
 315       _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
 316     }




 317   }
 318 
 319   _regions = NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, _num_regions, mtGC);

 320   _free_set = new ShenandoahFreeSet(this, _num_regions);
 321 
 322   {
 323     ShenandoahHeapLocker locker(lock());
 324 

 325     for (size_t i = 0; i < _num_regions; i++) {
 326       HeapWord* start = (HeapWord*)sh_rs.base() + ShenandoahHeapRegion::region_size_words() * i;
 327       bool is_committed = i < num_committed_regions;
 328       void* loc = region_storage.base() + i * region_align;
 329 
 330       ShenandoahHeapRegion* r = new (loc) ShenandoahHeapRegion(start, i, is_committed);
 331       assert(is_aligned(r, SHENANDOAH_CACHE_LINE_SIZE), "Sanity");
 332 
 333       _marking_context->initialize_top_at_mark_start(r);
 334       _regions[i] = r;
 335       assert(!collection_set()->is_in(i), "New region should not be in collection set");


 336     }
 337 
 338     // Initialize to complete
 339     _marking_context->mark_complete();

 340 
 341     _free_set->rebuild();



 342   }
 343 
 344   if (AlwaysPreTouch) {
 345     // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads,
 346     // before initialize() below zeroes it with initializing thread. For any given region,
 347     // we touch the region and the corresponding bitmaps from the same thread.
 348     ShenandoahPushWorkerScope scope(workers(), _max_workers, false);
 349 
 350     _pretouch_heap_page_size = heap_page_size;
 351     _pretouch_bitmap_page_size = bitmap_page_size;
 352 
 353 #ifdef LINUX
 354     // UseTransparentHugePages would madvise that backing memory can be coalesced into huge
 355     // pages. But, the kernel needs to know that every small page is used, in order to coalesce
 356     // them into huge one. Therefore, we need to pretouch with smaller pages.
 357     if (UseTransparentHugePages) {
 358       _pretouch_heap_page_size = (size_t)os::vm_page_size();
 359       _pretouch_bitmap_page_size = (size_t)os::vm_page_size();
 360     }
 361 #endif

 383   // There should probably be Shenandoah-specific options for these,
 384   // just as there are G1-specific options.
 385   {
 386     ShenandoahSATBMarkQueueSet& satbqs = ShenandoahBarrierSet::satb_mark_queue_set();
 387     satbqs.set_process_completed_buffers_threshold(20); // G1SATBProcessCompletedThreshold
 388     satbqs.set_buffer_enqueue_threshold_percentage(60); // G1SATBBufferEnqueueingThresholdPercent
 389   }
 390 
 391   _monitoring_support = new ShenandoahMonitoringSupport(this);
 392   _phase_timings = new ShenandoahPhaseTimings(max_workers());
 393   ShenandoahCodeRoots::initialize();
 394 
 395   if (ShenandoahPacing) {
 396     _pacer = new ShenandoahPacer(this);
 397     _pacer->setup_for_idle();
 398   } else {
 399     _pacer = nullptr;
 400   }
 401 
 402   _control_thread = new ShenandoahControlThread();

 403 
 404   ShenandoahInitLogger::print();
 405 
 406   return JNI_OK;
 407 }
 408 
 409 void ShenandoahHeap::initialize_mode() {


































 410   if (ShenandoahGCMode != nullptr) {
 411     if (strcmp(ShenandoahGCMode, "satb") == 0) {
 412       _gc_mode = new ShenandoahSATBMode();
 413     } else if (strcmp(ShenandoahGCMode, "iu") == 0) {
 414       _gc_mode = new ShenandoahIUMode();
 415     } else if (strcmp(ShenandoahGCMode, "passive") == 0) {
 416       _gc_mode = new ShenandoahPassiveMode();


 417     } else {
 418       vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option");
 419     }
 420   } else {
 421     vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option (null)");
 422   }
 423   _gc_mode->initialize_flags();
 424   if (_gc_mode->is_diagnostic() && !UnlockDiagnosticVMOptions) {
 425     vm_exit_during_initialization(
 426             err_msg("GC mode \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
 427                     _gc_mode->name()));
 428   }
 429   if (_gc_mode->is_experimental() && !UnlockExperimentalVMOptions) {
 430     vm_exit_during_initialization(
 431             err_msg("GC mode \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
 432                     _gc_mode->name()));
 433   }
 434 }
 435 
 436 void ShenandoahHeap::initialize_heuristics() {
 437   assert(_gc_mode != nullptr, "Must be initialized");
 438   _heuristics = _gc_mode->initialize_heuristics();






 439 
 440   if (_heuristics->is_diagnostic() && !UnlockDiagnosticVMOptions) {
 441     vm_exit_during_initialization(
 442             err_msg("Heuristics \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
 443                     _heuristics->name()));
 444   }
 445   if (_heuristics->is_experimental() && !UnlockExperimentalVMOptions) {
 446     vm_exit_during_initialization(
 447             err_msg("Heuristics \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
 448                     _heuristics->name()));
 449   }

 450 }
 451 
 452 #ifdef _MSC_VER
 453 #pragma warning( push )
 454 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
 455 #endif
 456 
 457 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
 458   CollectedHeap(),

 459   _initial_size(0),
 460   _used(0),
 461   _committed(0),
 462   _bytes_allocated_since_gc_start(0),
 463   _max_workers(MAX2(ConcGCThreads, ParallelGCThreads)),
 464   _workers(nullptr),
 465   _safepoint_workers(nullptr),
 466   _heap_region_special(false),
 467   _num_regions(0),
 468   _regions(nullptr),

 469   _update_refs_iterator(this),









 470   _control_thread(nullptr),

 471   _shenandoah_policy(policy),
 472   _gc_mode(nullptr),
 473   _heuristics(nullptr),
 474   _free_set(nullptr),
 475   _pacer(nullptr),
 476   _verifier(nullptr),
 477   _phase_timings(nullptr),



 478   _monitoring_support(nullptr),
 479   _memory_pool(nullptr),


 480   _stw_memory_manager("Shenandoah Pauses"),
 481   _cycle_memory_manager("Shenandoah Cycles"),
 482   _gc_timer(new ConcurrentGCTimer()),
 483   _soft_ref_policy(),
 484   _log_min_obj_alignment_in_bytes(LogMinObjAlignmentInBytes),
 485   _ref_processor(new ShenandoahReferenceProcessor(MAX2(_max_workers, 1U))),

 486   _marking_context(nullptr),
 487   _bitmap_size(0),
 488   _bitmap_regions_per_slice(0),
 489   _bitmap_bytes_per_slice(0),
 490   _bitmap_region_special(false),
 491   _aux_bitmap_region_special(false),
 492   _liveness_cache(nullptr),
 493   _collection_set(nullptr)

 494 {
 495   // Initialize GC mode early, so we can adjust barrier support
 496   initialize_mode();
 497   BarrierSet::set_barrier_set(new ShenandoahBarrierSet(this));
 498 
 499   _max_workers = MAX2(_max_workers, 1U);
 500   _workers = new ShenandoahWorkerThreads("Shenandoah GC Threads", _max_workers);
 501   if (_workers == nullptr) {
 502     vm_exit_during_initialization("Failed necessary allocation.");
 503   } else {
 504     _workers->initialize_workers();
 505   }
 506 
 507   if (ParallelGCThreads > 1) {
 508     _safepoint_workers = new ShenandoahWorkerThreads("Safepoint Cleanup Thread",
 509                                                 ParallelGCThreads);
 510     _safepoint_workers->initialize_workers();
 511   }
 512 }
 513 
 514 #ifdef _MSC_VER
 515 #pragma warning( pop )
 516 #endif
 517 
 518 class ShenandoahResetBitmapTask : public WorkerTask {
 519 private:
 520   ShenandoahRegionIterator _regions;
 521 
 522 public:
 523   ShenandoahResetBitmapTask() :
 524     WorkerTask("Shenandoah Reset Bitmap") {}
 525 
 526   void work(uint worker_id) {
 527     ShenandoahHeapRegion* region = _regions.next();
 528     ShenandoahHeap* heap = ShenandoahHeap::heap();
 529     ShenandoahMarkingContext* const ctx = heap->marking_context();
 530     while (region != nullptr) {
 531       if (heap->is_bitmap_slice_committed(region)) {
 532         ctx->clear_bitmap(region);
 533       }
 534       region = _regions.next();
 535     }
 536   }
 537 };
 538 
 539 void ShenandoahHeap::reset_mark_bitmap() {
 540   assert_gc_workers(_workers->active_workers());
 541   mark_incomplete_marking_context();
 542 
 543   ShenandoahResetBitmapTask task;
 544   _workers->run_task(&task);
 545 }
 546 
 547 void ShenandoahHeap::print_on(outputStream* st) const {
 548   st->print_cr("Shenandoah Heap");
 549   st->print_cr(" " SIZE_FORMAT "%s max, " SIZE_FORMAT "%s soft max, " SIZE_FORMAT "%s committed, " SIZE_FORMAT "%s used",
 550                byte_size_in_proper_unit(max_capacity()), proper_unit_for_byte_size(max_capacity()),
 551                byte_size_in_proper_unit(soft_max_capacity()), proper_unit_for_byte_size(soft_max_capacity()),
 552                byte_size_in_proper_unit(committed()),    proper_unit_for_byte_size(committed()),
 553                byte_size_in_proper_unit(used()),         proper_unit_for_byte_size(used()));
 554   st->print_cr(" " SIZE_FORMAT " x " SIZE_FORMAT"%s regions",
 555                num_regions(),
 556                byte_size_in_proper_unit(ShenandoahHeapRegion::region_size_bytes()),
 557                proper_unit_for_byte_size(ShenandoahHeapRegion::region_size_bytes()));
 558 
 559   st->print("Status: ");
 560   if (has_forwarded_objects())                 st->print("has forwarded objects, ");
 561   if (is_concurrent_mark_in_progress())        st->print("marking, ");

 562   if (is_evacuation_in_progress())             st->print("evacuating, ");
 563   if (is_update_refs_in_progress())            st->print("updating refs, ");
 564   if (is_degenerated_gc_in_progress())         st->print("degenerated gc, ");
 565   if (is_full_gc_in_progress())                st->print("full gc, ");
 566   if (is_full_gc_move_in_progress())           st->print("full gc move, ");
 567   if (is_concurrent_weak_root_in_progress())   st->print("concurrent weak roots, ");
 568   if (is_concurrent_strong_root_in_progress() &&
 569       !is_concurrent_weak_root_in_progress())  st->print("concurrent strong roots, ");
 570 
 571   if (cancelled_gc()) {
 572     st->print("cancelled");
 573   } else {
 574     st->print("not cancelled");
 575   }
 576   st->cr();
 577 
 578   st->print_cr("Reserved region:");
 579   st->print_cr(" - [" PTR_FORMAT ", " PTR_FORMAT ") ",
 580                p2i(reserved_region().start()),
 581                p2i(reserved_region().end()));

 592   st->cr();
 593   MetaspaceUtils::print_on(st);
 594 
 595   if (Verbose) {
 596     st->cr();
 597     print_heap_regions_on(st);
 598   }
 599 }
 600 
 601 class ShenandoahInitWorkerGCLABClosure : public ThreadClosure {
 602 public:
 603   void do_thread(Thread* thread) {
 604     assert(thread != nullptr, "Sanity");
 605     assert(thread->is_Worker_thread(), "Only worker thread expected");
 606     ShenandoahThreadLocalData::initialize_gclab(thread);
 607   }
 608 };
 609 
 610 void ShenandoahHeap::post_initialize() {
 611   CollectedHeap::post_initialize();


 612   MutexLocker ml(Threads_lock);
 613 
 614   ShenandoahInitWorkerGCLABClosure init_gclabs;
 615   _workers->threads_do(&init_gclabs);
 616 
 617   // gclab can not be initialized early during VM startup, as it can not determinate its max_size.
 618   // Now, we will let WorkerThreads to initialize gclab when new worker is created.
 619   _workers->set_initialize_gclab();
 620   if (_safepoint_workers != nullptr) {
 621     _safepoint_workers->threads_do(&init_gclabs);
 622     _safepoint_workers->set_initialize_gclab();
 623   }
 624 
 625   _heuristics->initialize();
 626 
 627   JFR_ONLY(ShenandoahJFRSupport::register_jfr_type_serializers());
 628 }
 629 
























 630 size_t ShenandoahHeap::used() const {
 631   return Atomic::load(&_used);
 632 }
 633 
 634 size_t ShenandoahHeap::committed() const {
 635   return Atomic::load(&_committed);
 636 }
 637 
 638 void ShenandoahHeap::increase_committed(size_t bytes) {
 639   shenandoah_assert_heaplocked_or_safepoint();
 640   _committed += bytes;
 641 }
 642 
 643 void ShenandoahHeap::decrease_committed(size_t bytes) {
 644   shenandoah_assert_heaplocked_or_safepoint();
 645   _committed -= bytes;
 646 }
 647 
 648 void ShenandoahHeap::increase_used(size_t bytes) {
 649   Atomic::add(&_used, bytes, memory_order_relaxed);









































 650 }
 651 
 652 void ShenandoahHeap::set_used(size_t bytes) {
 653   Atomic::store(&_used, bytes);



 654 }
 655 
 656 void ShenandoahHeap::decrease_used(size_t bytes) {
 657   assert(used() >= bytes, "never decrease heap size by more than we've left");
 658   Atomic::sub(&_used, bytes, memory_order_relaxed);


 659 }
 660 
 661 void ShenandoahHeap::increase_allocated(size_t bytes) {
 662   Atomic::add(&_bytes_allocated_since_gc_start, bytes, memory_order_relaxed);



 663 }
 664 
 665 void ShenandoahHeap::notify_mutator_alloc_words(size_t words, bool waste) {
 666   size_t bytes = words * HeapWordSize;
 667   if (!waste) {
 668     increase_used(bytes);
 669   }
 670   increase_allocated(bytes);


 671   if (ShenandoahPacing) {
 672     control_thread()->pacing_notify_alloc(words);
 673     if (waste) {
 674       pacer()->claim_for_alloc(words, true);
 675     }
 676   }
 677 }
 678 
 679 size_t ShenandoahHeap::capacity() const {
 680   return committed();
 681 }
 682 
 683 size_t ShenandoahHeap::max_capacity() const {
 684   return _num_regions * ShenandoahHeapRegion::region_size_bytes();
 685 }
 686 
 687 size_t ShenandoahHeap::soft_max_capacity() const {
 688   size_t v = Atomic::load(&_soft_max_size);
 689   assert(min_capacity() <= v && v <= max_capacity(),
 690          "Should be in bounds: " SIZE_FORMAT " <= " SIZE_FORMAT " <= " SIZE_FORMAT,
 691          min_capacity(), v, max_capacity());
 692   return v;
 693 }
 694 
 695 void ShenandoahHeap::set_soft_max_capacity(size_t v) {
 696   assert(min_capacity() <= v && v <= max_capacity(),
 697          "Should be in bounds: " SIZE_FORMAT " <= " SIZE_FORMAT " <= " SIZE_FORMAT,
 698          min_capacity(), v, max_capacity());
 699   Atomic::store(&_soft_max_size, v);
 700 }
 701 
 702 size_t ShenandoahHeap::min_capacity() const {
 703   return _minimum_size;
 704 }
 705 
 706 size_t ShenandoahHeap::initial_capacity() const {
 707   return _initial_size;
 708 }
 709 
 710 bool ShenandoahHeap::is_in(const void* p) const {
 711   HeapWord* heap_base = (HeapWord*) base();
 712   HeapWord* last_region_end = heap_base + ShenandoahHeapRegion::region_size_words() * num_regions();
 713   return p >= heap_base && p < last_region_end;
 714 }
 715 
 716 void ShenandoahHeap::op_uncommit(double shrink_before, size_t shrink_until) {
 717   assert (ShenandoahUncommit, "should be enabled");
 718 
 719   // Application allocates from the beginning of the heap, and GC allocates at
 720   // the end of it. It is more efficient to uncommit from the end, so that applications
 721   // could enjoy the near committed regions. GC allocations are much less frequent,
 722   // and therefore can accept the committing costs.
 723 
 724   size_t count = 0;
 725   for (size_t i = num_regions(); i > 0; i--) { // care about size_t underflow
 726     ShenandoahHeapRegion* r = get_region(i - 1);
 727     if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
 728       ShenandoahHeapLocker locker(lock());
 729       if (r->is_empty_committed()) {
 730         if (committed() < shrink_until + ShenandoahHeapRegion::region_size_bytes()) {
 731           break;
 732         }
 733 
 734         r->make_uncommitted();
 735         count++;
 736       }
 737     }
 738     SpinPause(); // allow allocators to take the lock
 739   }
 740 
 741   if (count > 0) {
 742     control_thread()->notify_heap_changed();

































































 743   }
 744 }
 745 
 746 HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) {
 747   // New object should fit the GCLAB size
 748   size_t min_size = MAX2(size, PLAB::min_size());
 749 
 750   // Figure out size of new GCLAB, looking back at heuristics. Expand aggressively.
 751   size_t new_size = ShenandoahThreadLocalData::gclab_size(thread) * 2;








 752   new_size = MIN2(new_size, PLAB::max_size());
 753   new_size = MAX2(new_size, PLAB::min_size());
 754 
 755   // Record new heuristic value even if we take any shortcut. This captures
 756   // the case when moderately-sized objects always take a shortcut. At some point,
 757   // heuristics should catch up with them.
 758   ShenandoahThreadLocalData::set_gclab_size(thread, new_size);
 759 
 760   if (new_size < size) {
 761     // New size still does not fit the object. Fall back to shared allocation.
 762     // This avoids retiring perfectly good GCLABs, when we encounter a large object.

 763     return nullptr;
 764   }
 765 
 766   // Retire current GCLAB, and allocate a new one.
 767   PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
 768   gclab->retire();
 769 
 770   size_t actual_size = 0;
 771   HeapWord* gclab_buf = allocate_new_gclab(min_size, new_size, &actual_size);
 772   if (gclab_buf == nullptr) {
 773     return nullptr;
 774   }
 775 
 776   assert (size <= actual_size, "allocation should fit");
 777 
 778   if (ZeroTLAB) {
 779     // ..and clear it.
 780     Copy::zero_to_words(gclab_buf, actual_size);
 781   } else {
 782     // ...and zap just allocated object.
 783 #ifdef ASSERT
 784     // Skip mangling the space corresponding to the object header to
 785     // ensure that the returned space is not considered parsable by
 786     // any concurrent GC thread.
 787     size_t hdr_size = oopDesc::header_size();
 788     Copy::fill_to_words(gclab_buf + hdr_size, actual_size - hdr_size, badHeapWordVal);
 789 #endif // ASSERT
 790   }
 791   gclab->set_buf(gclab_buf, actual_size);
 792   return gclab->allocate(size);
 793 }
 794 






















































































































































































































































 795 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t min_size,
 796                                             size_t requested_size,
 797                                             size_t* actual_size) {
 798   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_tlab(min_size, requested_size);
 799   HeapWord* res = allocate_memory(req);
 800   if (res != nullptr) {
 801     *actual_size = req.actual_size();
 802   } else {
 803     *actual_size = 0;
 804   }
 805   return res;
 806 }
 807 
 808 HeapWord* ShenandoahHeap::allocate_new_gclab(size_t min_size,
 809                                              size_t word_size,
 810                                              size_t* actual_size) {
 811   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_gclab(min_size, word_size);
 812   HeapWord* res = allocate_memory(req);
 813   if (res != nullptr) {
 814     *actual_size = req.actual_size();
 815   } else {
 816     *actual_size = 0;
 817   }
 818   return res;
 819 }
 820 
 821 HeapWord* ShenandoahHeap::allocate_memory(ShenandoahAllocRequest& req) {






















 822   intptr_t pacer_epoch = 0;
 823   bool in_new_region = false;
 824   HeapWord* result = nullptr;
 825 
 826   if (req.is_mutator_alloc()) {
 827     if (ShenandoahPacing) {
 828       pacer()->pace_for_alloc(req.size());
 829       pacer_epoch = pacer()->epoch();
 830     }
 831 
 832     if (!ShenandoahAllocFailureALot || !should_inject_alloc_failure()) {
 833       result = allocate_memory_under_lock(req, in_new_region);
 834     }
 835 
 836     // Allocation failed, block until control thread reacted, then retry allocation.
 837     //
 838     // It might happen that one of the threads requesting allocation would unblock
 839     // way later after GC happened, only to fail the second allocation, because
 840     // other threads have already depleted the free storage. In this case, a better
 841     // strategy is to try again, as long as GC makes progress (or until at least
 842     // one full GC has completed).
 843     size_t original_count = shenandoah_policy()->full_gc_count();
 844     while (result == nullptr
 845         && (_progress_last_gc.is_set() || original_count == shenandoah_policy()->full_gc_count())) {
 846       control_thread()->handle_alloc_failure(req);
 847       result = allocate_memory_under_lock(req, in_new_region);
 848     }

 849   } else {
 850     assert(req.is_gc_alloc(), "Can only accept GC allocs here");
 851     result = allocate_memory_under_lock(req, in_new_region);
 852     // Do not call handle_alloc_failure() here, because we cannot block.
 853     // The allocation failure would be handled by the LRB slowpath with handle_alloc_failure_evac().
 854   }
 855 
 856   if (in_new_region) {
 857     control_thread()->notify_heap_changed();

 858   }
 859 








 860   if (result != nullptr) {
 861     size_t requested = req.size();
 862     size_t actual = req.actual_size();
 863 
 864     assert (req.is_lab_alloc() || (requested == actual),
 865             "Only LAB allocations are elastic: %s, requested = " SIZE_FORMAT ", actual = " SIZE_FORMAT,
 866             ShenandoahAllocRequest::alloc_type_to_string(req.type()), requested, actual);
 867 
 868     if (req.is_mutator_alloc()) {
 869       notify_mutator_alloc_words(actual, false);
 870 
 871       // If we requested more than we were granted, give the rest back to pacer.
 872       // This only matters if we are in the same pacing epoch: do not try to unpace
 873       // over the budget for the other phase.
 874       if (ShenandoahPacing && (pacer_epoch > 0) && (requested > actual)) {
 875         pacer()->unpace_for_alloc(pacer_epoch, requested - actual);
 876       }
 877     } else {
 878       increase_used(actual*HeapWordSize);
 879     }
 880   }
 881 
 882   return result;
 883 }
 884 
 885 HeapWord* ShenandoahHeap::allocate_memory_under_lock(ShenandoahAllocRequest& req, bool& in_new_region) {
 886   ShenandoahHeapLocker locker(lock());
 887   return _free_set->allocate(req, in_new_region);



















































































































































































 888 }
 889 
 890 HeapWord* ShenandoahHeap::mem_allocate(size_t size,
 891                                         bool*  gc_overhead_limit_was_exceeded) {
 892   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared(size);
 893   return allocate_memory(req);
 894 }
 895 
 896 MetaWord* ShenandoahHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
 897                                                              size_t size,
 898                                                              Metaspace::MetadataType mdtype) {
 899   MetaWord* result;
 900 
 901   // Inform metaspace OOM to GC heuristics if class unloading is possible.
 902   if (heuristics()->can_unload_classes()) {
 903     ShenandoahHeuristics* h = heuristics();
 904     h->record_metaspace_oom();
 905   }
 906 
 907   // Expand and retry allocation
 908   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
 909   if (result != nullptr) {
 910     return result;
 911   }
 912 
 913   // Start full GC
 914   collect(GCCause::_metadata_GC_clear_soft_refs);
 915 
 916   // Retry allocation
 917   result = loader_data->metaspace_non_null()->allocate(size, mdtype);
 918   if (result != nullptr) {
 919     return result;
 920   }
 921 
 922   // Expand and retry allocation
 923   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);

 962 
 963   void work(uint worker_id) {
 964     if (_concurrent) {
 965       ShenandoahConcurrentWorkerSession worker_session(worker_id);
 966       ShenandoahSuspendibleThreadSetJoiner stsj;
 967       ShenandoahEvacOOMScope oom_evac_scope;
 968       do_work();
 969     } else {
 970       ShenandoahParallelWorkerSession worker_session(worker_id);
 971       ShenandoahEvacOOMScope oom_evac_scope;
 972       do_work();
 973     }
 974   }
 975 
 976 private:
 977   void do_work() {
 978     ShenandoahConcurrentEvacuateRegionObjectClosure cl(_sh);
 979     ShenandoahHeapRegion* r;
 980     while ((r =_cs->claim_next()) != nullptr) {
 981       assert(r->has_live(), "Region " SIZE_FORMAT " should have been reclaimed early", r->index());

 982       _sh->marked_object_iterate(r, &cl);
 983 
 984       if (ShenandoahPacing) {
 985         _sh->pacer()->report_evac(r->used() >> LogHeapWordSize);
 986       }


























































































 987 
 988       if (_sh->check_cancelled_gc_and_yield(_concurrent)) {
 989         break;
 990       }
 991     }
 992   }
 993 };
 994 
 995 void ShenandoahHeap::evacuate_collection_set(bool concurrent) {
 996   ShenandoahEvacuationTask task(this, _collection_set, concurrent);
 997   workers()->run_task(&task);






 998 }
 999 
1000 void ShenandoahHeap::trash_cset_regions() {
1001   ShenandoahHeapLocker locker(lock());
1002 
1003   ShenandoahCollectionSet* set = collection_set();
1004   ShenandoahHeapRegion* r;
1005   set->clear_current_index();
1006   while ((r = set->next()) != nullptr) {
1007     r->make_trash();
1008   }
1009   collection_set()->clear();
1010 }
1011 
1012 void ShenandoahHeap::print_heap_regions_on(outputStream* st) const {
1013   st->print_cr("Heap Regions:");
1014   st->print_cr("Region state: EU=empty-uncommitted, EC=empty-committed, R=regular, H=humongous start, HP=pinned humongous start");
1015   st->print_cr("              HC=humongous continuation, CS=collection set, TR=trash, P=pinned, CSP=pinned collection set");
1016   st->print_cr("BTE=bottom/top/end, TAMS=top-at-mark-start");
1017   st->print_cr("UWM=update watermark, U=used");
1018   st->print_cr("T=TLAB allocs, G=GCLAB allocs");
1019   st->print_cr("S=shared allocs, L=live data");
1020   st->print_cr("CP=critical pins");
1021 
1022   for (size_t i = 0; i < num_regions(); i++) {
1023     get_region(i)->print_on(st);
1024   }
1025 }
1026 
1027 void ShenandoahHeap::trash_humongous_region_at(ShenandoahHeapRegion* start) {
1028   assert(start->is_humongous_start(), "reclaim regions starting with the first one");
1029 
1030   oop humongous_obj = cast_to_oop(start->bottom());
1031   size_t size = humongous_obj->size();
1032   size_t required_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize);
1033   size_t index = start->index() + required_regions - 1;
1034 
1035   assert(!start->has_live(), "liveness must be zero");
1036 
1037   for(size_t i = 0; i < required_regions; i++) {
1038     // Reclaim from tail. Otherwise, assertion fails when printing region to trace log,
1039     // as it expects that every region belongs to a humongous region starting with a humongous start region.
1040     ShenandoahHeapRegion* region = get_region(index --);
1041 
1042     assert(region->is_humongous(), "expect correct humongous start or continuation");
1043     assert(!region->is_cset(), "Humongous region should not be in collection set");
1044 
1045     region->make_trash_immediate();
1046   }

1047 }
1048 
1049 class ShenandoahCheckCleanGCLABClosure : public ThreadClosure {
1050 public:
1051   ShenandoahCheckCleanGCLABClosure() {}
1052   void do_thread(Thread* thread) {
1053     PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1054     assert(gclab != nullptr, "GCLAB should be initialized for %s", thread->name());
1055     assert(gclab->words_remaining() == 0, "GCLAB should not need retirement");




1056   }
1057 };
1058 
1059 class ShenandoahRetireGCLABClosure : public ThreadClosure {
1060 private:
1061   bool const _resize;
1062 public:
1063   ShenandoahRetireGCLABClosure(bool resize) : _resize(resize) {}
1064   void do_thread(Thread* thread) {
1065     PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1066     assert(gclab != nullptr, "GCLAB should be initialized for %s", thread->name());
1067     gclab->retire();
1068     if (_resize && ShenandoahThreadLocalData::gclab_size(thread) > 0) {
1069       ShenandoahThreadLocalData::set_gclab_size(thread, 0);
1070     }











1071   }
1072 };
1073 
1074 void ShenandoahHeap::labs_make_parsable() {
1075   assert(UseTLAB, "Only call with UseTLAB");
1076 
1077   ShenandoahRetireGCLABClosure cl(false);
1078 
1079   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1080     ThreadLocalAllocBuffer& tlab = t->tlab();
1081     tlab.make_parsable();
1082     cl.do_thread(t);
1083   }
1084 
1085   workers()->threads_do(&cl);
1086 }
1087 
1088 void ShenandoahHeap::tlabs_retire(bool resize) {
1089   assert(UseTLAB, "Only call with UseTLAB");
1090   assert(!resize || ResizeTLAB, "Only call for resize when ResizeTLAB is enabled");

1110 #endif
1111 }
1112 
1113 void ShenandoahHeap::gclabs_retire(bool resize) {
1114   assert(UseTLAB, "Only call with UseTLAB");
1115   assert(!resize || ResizeTLAB, "Only call for resize when ResizeTLAB is enabled");
1116 
1117   ShenandoahRetireGCLABClosure cl(resize);
1118   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1119     cl.do_thread(t);
1120   }
1121   workers()->threads_do(&cl);
1122 
1123   if (safepoint_workers() != nullptr) {
1124     safepoint_workers()->threads_do(&cl);
1125   }
1126 }
1127 
1128 // Returns size in bytes
1129 size_t ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const {
1130   // Return the max allowed size, and let the allocation path
1131   // figure out the safe size for current allocation.
1132   return ShenandoahHeapRegion::max_tlab_size_bytes();




1133 }
1134 
1135 size_t ShenandoahHeap::max_tlab_size() const {
1136   // Returns size in words
1137   return ShenandoahHeapRegion::max_tlab_size_words();
1138 }
1139 
1140 void ShenandoahHeap::collect(GCCause::Cause cause) {
1141   control_thread()->request_gc(cause);
1142 }
1143 
1144 void ShenandoahHeap::do_full_collection(bool clear_all_soft_refs) {
1145   //assert(false, "Shouldn't need to do full collections");
1146 }
1147 
1148 HeapWord* ShenandoahHeap::block_start(const void* addr) const {
1149   ShenandoahHeapRegion* r = heap_region_containing(addr);
1150   if (r != nullptr) {
1151     return r->block_start(addr);
1152   }
1153   return nullptr;
1154 }
1155 
1156 bool ShenandoahHeap::block_is_obj(const HeapWord* addr) const {
1157   ShenandoahHeapRegion* r = heap_region_containing(addr);
1158   return r->block_is_obj(addr);
1159 }
1160 
1161 bool ShenandoahHeap::print_location(outputStream* st, void* addr) const {
1162   return BlockLocationPrinter<ShenandoahHeap>::print_location(st, addr);
1163 }
1164 
1165 void ShenandoahHeap::prepare_for_verify() {
1166   if (SafepointSynchronize::is_at_safepoint() && UseTLAB) {
1167     labs_make_parsable();
1168   }
1169 }
1170 
1171 void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const {




1172   tcl->do_thread(_control_thread);

1173   workers()->threads_do(tcl);
1174   if (_safepoint_workers != nullptr) {
1175     _safepoint_workers->threads_do(tcl);
1176   }
1177 }
1178 
1179 void ShenandoahHeap::print_tracing_info() const {
1180   LogTarget(Info, gc, stats) lt;
1181   if (lt.is_enabled()) {
1182     ResourceMark rm;
1183     LogStream ls(lt);
1184 
1185     phase_timings()->print_global_on(&ls);
1186 
1187     ls.cr();
1188     ls.cr();
1189 
1190     shenandoah_policy()->print_gc_stats(&ls);
1191 




1192     ls.cr();
1193     ls.cr();
1194   }
1195 }
1196 


















1197 void ShenandoahHeap::verify(VerifyOption vo) {
1198   if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
1199     if (ShenandoahVerify) {
1200       verifier()->verify_generic(vo);
1201     } else {
1202       // TODO: Consider allocating verification bitmaps on demand,
1203       // and turn this on unconditionally.
1204     }
1205   }
1206 }
1207 size_t ShenandoahHeap::tlab_capacity(Thread *thr) const {
1208   return _free_set->capacity();
1209 }
1210 
1211 class ObjectIterateScanRootClosure : public BasicOopIterateClosure {
1212 private:
1213   MarkBitMap* _bitmap;
1214   ShenandoahScanObjectStack* _oop_stack;
1215   ShenandoahHeap* const _heap;
1216   ShenandoahMarkingContext* const _marking_context;

1498       if (start >= max) break;
1499 
1500       for (size_t i = cur; i < end; i++) {
1501         ShenandoahHeapRegion* current = _heap->get_region(i);
1502         _blk->heap_region_do(current);
1503       }
1504     }
1505   }
1506 };
1507 
1508 void ShenandoahHeap::parallel_heap_region_iterate(ShenandoahHeapRegionClosure* blk) const {
1509   assert(blk->is_thread_safe(), "Only thread-safe closures here");
1510   if (num_regions() > ShenandoahParallelRegionStride) {
1511     ShenandoahParallelHeapRegionTask task(blk);
1512     workers()->run_task(&task);
1513   } else {
1514     heap_region_iterate(blk);
1515   }
1516 }
1517 
1518 class ShenandoahInitMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
1519 private:
1520   ShenandoahMarkingContext* const _ctx;
1521 public:
1522   ShenandoahInitMarkUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
1523 
1524   void heap_region_do(ShenandoahHeapRegion* r) {
1525     assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index());
1526     if (r->is_active()) {
1527       // Check if region needs updating its TAMS. We have updated it already during concurrent
1528       // reset, so it is very likely we don't need to do another write here.
1529       if (_ctx->top_at_mark_start(r) != r->top()) {
1530         _ctx->capture_top_at_mark_start(r);
1531       }
1532     } else {
1533       assert(_ctx->top_at_mark_start(r) == r->top(),
1534              "Region " SIZE_FORMAT " should already have correct TAMS", r->index());
1535     }
1536   }
1537 
1538   bool is_thread_safe() { return true; }
1539 };
1540 
1541 class ShenandoahRendezvousClosure : public HandshakeClosure {
1542 public:
1543   inline ShenandoahRendezvousClosure() : HandshakeClosure("ShenandoahRendezvous") {}
1544   inline void do_thread(Thread* thread) {}
1545 };
1546 
1547 void ShenandoahHeap::rendezvous_threads() {
1548   ShenandoahRendezvousClosure cl;
1549   Handshake::execute(&cl);
1550 }
1551 
1552 void ShenandoahHeap::recycle_trash() {
1553   free_set()->recycle_trash();
1554 }
1555 
1556 class ShenandoahResetUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
1557 private:
1558   ShenandoahMarkingContext* const _ctx;
1559 public:
1560   ShenandoahResetUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
1561 
1562   void heap_region_do(ShenandoahHeapRegion* r) {
1563     if (r->is_active()) {
1564       // Reset live data and set TAMS optimistically. We would recheck these under the pause
1565       // anyway to capture any updates that happened since now.
1566       r->clear_live_data();
1567       _ctx->capture_top_at_mark_start(r);
1568     }
1569   }
1570 
1571   bool is_thread_safe() { return true; }
1572 };
1573 
1574 void ShenandoahHeap::prepare_gc() {
1575   reset_mark_bitmap();
1576 
1577   ShenandoahResetUpdateRegionStateClosure cl;
1578   parallel_heap_region_iterate(&cl);
1579 }
1580 
1581 class ShenandoahFinalMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
1582 private:
1583   ShenandoahMarkingContext* const _ctx;
1584   ShenandoahHeapLock* const _lock;
1585 
1586 public:
1587   ShenandoahFinalMarkUpdateRegionStateClosure() :
1588     _ctx(ShenandoahHeap::heap()->complete_marking_context()), _lock(ShenandoahHeap::heap()->lock()) {}
1589 
1590   void heap_region_do(ShenandoahHeapRegion* r) {
1591     if (r->is_active()) {
1592       // All allocations past TAMS are implicitly live, adjust the region data.
1593       // Bitmaps/TAMS are swapped at this point, so we need to poll complete bitmap.
1594       HeapWord *tams = _ctx->top_at_mark_start(r);
1595       HeapWord *top = r->top();
1596       if (top > tams) {
1597         r->increase_live_data_alloc_words(pointer_delta(top, tams));
1598       }
1599 
1600       // We are about to select the collection set, make sure it knows about
1601       // current pinning status. Also, this allows trashing more regions that
1602       // now have their pinning status dropped.
1603       if (r->is_pinned()) {
1604         if (r->pin_count() == 0) {
1605           ShenandoahHeapLocker locker(_lock);
1606           r->make_unpinned();
1607         }
1608       } else {
1609         if (r->pin_count() > 0) {
1610           ShenandoahHeapLocker locker(_lock);
1611           r->make_pinned();
1612         }
1613       }
1614 
1615       // Remember limit for updating refs. It's guaranteed that we get no
1616       // from-space-refs written from here on.
1617       r->set_update_watermark_at_safepoint(r->top());
1618     } else {
1619       assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index());
1620       assert(_ctx->top_at_mark_start(r) == r->top(),
1621              "Region " SIZE_FORMAT " should have correct TAMS", r->index());
1622     }
1623   }
1624 
1625   bool is_thread_safe() { return true; }
1626 };
1627 
1628 void ShenandoahHeap::prepare_regions_and_collection_set(bool concurrent) {
1629   assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
1630   {
1631     ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::final_update_region_states :
1632                                          ShenandoahPhaseTimings::degen_gc_final_update_region_states);
1633     ShenandoahFinalMarkUpdateRegionStateClosure cl;
1634     parallel_heap_region_iterate(&cl);
1635 
1636     assert_pinned_region_status();
1637   }
1638 
1639   {
1640     ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::choose_cset :
1641                                          ShenandoahPhaseTimings::degen_gc_choose_cset);
1642     ShenandoahHeapLocker locker(lock());
1643     _collection_set->clear();
1644     heuristics()->choose_collection_set(_collection_set);
1645   }
1646 
1647   {
1648     ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::final_rebuild_freeset :
1649                                          ShenandoahPhaseTimings::degen_gc_final_rebuild_freeset);
1650     ShenandoahHeapLocker locker(lock());
1651     _free_set->rebuild();
1652   }
1653 }
1654 
1655 void ShenandoahHeap::do_class_unloading() {
1656   _unloader.unload();
1657 }
1658 
1659 void ShenandoahHeap::stw_weak_refs(bool full_gc) {
1660   // Weak refs processing
1661   ShenandoahPhaseTimings::Phase phase = full_gc ? ShenandoahPhaseTimings::full_gc_weakrefs
1662                                                 : ShenandoahPhaseTimings::degen_gc_weakrefs;
1663   ShenandoahTimingsTracker t(phase);
1664   ShenandoahGCWorkerPhase worker_phase(phase);
1665   ref_processor()->process_references(phase, workers(), false /* concurrent */);
1666 }
1667 
1668 void ShenandoahHeap::prepare_update_heap_references(bool concurrent) {
1669   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
1670 
1671   // Evacuation is over, no GCLABs are needed anymore. GCLABs are under URWM, so we need to
1672   // make them parsable for update code to work correctly. Plus, we can compute new sizes
1673   // for future GCLABs here.
1674   if (UseTLAB) {
1675     ShenandoahGCPhase phase(concurrent ?
1676                             ShenandoahPhaseTimings::init_update_refs_manage_gclabs :
1677                             ShenandoahPhaseTimings::degen_gc_init_update_refs_manage_gclabs);
1678     gclabs_retire(ResizeTLAB);
1679   }
1680 
1681   _update_refs_iterator.reset();
1682 }
1683 
1684 void ShenandoahHeap::set_gc_state_all_threads(char state) {
1685   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1686     ShenandoahThreadLocalData::set_gc_state(t, state);
1687   }
1688 }
1689 
1690 void ShenandoahHeap::set_gc_state_mask(uint mask, bool value) {
1691   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should really be Shenandoah safepoint");
1692   _gc_state.set_cond(mask, value);
1693   set_gc_state_all_threads(_gc_state.raw_value());
1694 }
1695 
1696 void ShenandoahHeap::set_concurrent_mark_in_progress(bool in_progress) {
1697   assert(!has_forwarded_objects(), "Not expected before/after mark phase");
1698   set_gc_state_mask(MARKING, in_progress);
1699   ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
























































1700 }
1701 
1702 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
1703   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only call this at safepoint");
1704   set_gc_state_mask(EVACUATION, in_progress);
1705 }
1706 
1707 void ShenandoahHeap::set_concurrent_strong_root_in_progress(bool in_progress) {
1708   if (in_progress) {
1709     _concurrent_strong_root_in_progress.set();
1710   } else {
1711     _concurrent_strong_root_in_progress.unset();
1712   }
1713 }
1714 
1715 void ShenandoahHeap::set_concurrent_weak_root_in_progress(bool cond) {
1716   set_gc_state_mask(WEAK_ROOTS, cond);
1717 }
1718 
1719 GCTracer* ShenandoahHeap::tracer() {
1720   return shenandoah_policy()->tracer();
1721 }
1722 
1723 size_t ShenandoahHeap::tlab_used(Thread* thread) const {
1724   return _free_set->used();
1725 }
1726 
1727 bool ShenandoahHeap::try_cancel_gc() {
1728   jbyte prev = _cancelled_gc.cmpxchg(CANCELLED, CANCELLABLE);
1729   return prev == CANCELLABLE;
1730 }
1731 








1732 void ShenandoahHeap::cancel_gc(GCCause::Cause cause) {
1733   if (try_cancel_gc()) {
1734     FormatBuffer<> msg("Cancelling GC: %s", GCCause::to_string(cause));
1735     log_info(gc)("%s", msg.buffer());
1736     Events::log(Thread::current(), "%s", msg.buffer());

1737   }
1738 }
1739 
1740 uint ShenandoahHeap::max_workers() {
1741   return _max_workers;
1742 }
1743 
1744 void ShenandoahHeap::stop() {
1745   // The shutdown sequence should be able to terminate when GC is running.
1746 
1747   // Step 0. Notify policy to disable event recording.
1748   _shenandoah_policy->record_shutdown();
1749 
1750   // Step 1. Notify control thread that we are in shutdown.



1751   // Note that we cannot do that with stop(), because stop() is blocking and waits for the actual shutdown.
1752   // Doing stop() here would wait for the normal GC cycle to complete, never falling through to cancel below.
1753   control_thread()->prepare_for_graceful_shutdown();
1754 
1755   // Step 2. Notify GC workers that we are cancelling GC.
1756   cancel_gc(GCCause::_shenandoah_stop_vm);
1757 
1758   // Step 3. Wait until GC worker exits normally.
1759   control_thread()->stop();
1760 }
1761 
1762 void ShenandoahHeap::stw_unload_classes(bool full_gc) {
1763   if (!unload_classes()) return;
1764   // Unload classes and purge SystemDictionary.
1765   {
1766     ShenandoahPhaseTimings::Phase phase = full_gc ?
1767                                           ShenandoahPhaseTimings::full_gc_purge_class_unload :
1768                                           ShenandoahPhaseTimings::degen_gc_purge_class_unload;
1769     ShenandoahIsAliveSelector is_alive;
1770     CodeCache::UnloadingScope scope(is_alive.is_alive_closure());
1771     ShenandoahGCPhase gc_phase(phase);
1772     ShenandoahGCWorkerPhase worker_phase(phase);
1773     bool purged_class = SystemDictionary::do_unloading(gc_timer());
1774 
1775     uint num_workers = _workers->active_workers();
1776     ShenandoahClassUnloadingTask unlink_task(phase, num_workers, purged_class);
1777     _workers->run_task(&unlink_task);
1778   }

1832 }
1833 
1834 void ShenandoahHeap::set_has_forwarded_objects(bool cond) {
1835   set_gc_state_mask(HAS_FORWARDED, cond);
1836 }
1837 
1838 void ShenandoahHeap::set_unload_classes(bool uc) {
1839   _unload_classes.set_cond(uc);
1840 }
1841 
1842 bool ShenandoahHeap::unload_classes() const {
1843   return _unload_classes.is_set();
1844 }
1845 
1846 address ShenandoahHeap::in_cset_fast_test_addr() {
1847   ShenandoahHeap* heap = ShenandoahHeap::heap();
1848   assert(heap->collection_set() != nullptr, "Sanity");
1849   return (address) heap->collection_set()->biased_map_address();
1850 }
1851 
1852 size_t ShenandoahHeap::bytes_allocated_since_gc_start() {
1853   return Atomic::load(&_bytes_allocated_since_gc_start);
1854 }
1855 
1856 void ShenandoahHeap::reset_bytes_allocated_since_gc_start() {
1857   Atomic::store(&_bytes_allocated_since_gc_start, (size_t)0);





1858 }
1859 
1860 void ShenandoahHeap::set_degenerated_gc_in_progress(bool in_progress) {
1861   _degenerated_gc_in_progress.set_cond(in_progress);
1862 }
1863 
1864 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) {
1865   _full_gc_in_progress.set_cond(in_progress);
1866 }
1867 
1868 void ShenandoahHeap::set_full_gc_move_in_progress(bool in_progress) {
1869   assert (is_full_gc_in_progress(), "should be");
1870   _full_gc_move_in_progress.set_cond(in_progress);
1871 }
1872 
1873 void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) {
1874   set_gc_state_mask(UPDATEREFS, in_progress);
1875 }
1876 
1877 void ShenandoahHeap::register_nmethod(nmethod* nm) {

1901     if (r->is_active()) {
1902       if (r->is_pinned()) {
1903         if (r->pin_count() == 0) {
1904           r->make_unpinned();
1905         }
1906       } else {
1907         if (r->pin_count() > 0) {
1908           r->make_pinned();
1909         }
1910       }
1911     }
1912   }
1913 
1914   assert_pinned_region_status();
1915 }
1916 
1917 #ifdef ASSERT
1918 void ShenandoahHeap::assert_pinned_region_status() {
1919   for (size_t i = 0; i < num_regions(); i++) {
1920     ShenandoahHeapRegion* r = get_region(i);
1921     assert((r->is_pinned() && r->pin_count() > 0) || (!r->is_pinned() && r->pin_count() == 0),
1922            "Region " SIZE_FORMAT " pinning status is inconsistent", i);


1923   }
1924 }
1925 #endif
1926 
1927 ConcurrentGCTimer* ShenandoahHeap::gc_timer() const {
1928   return _gc_timer;
1929 }
1930 
1931 void ShenandoahHeap::prepare_concurrent_roots() {
1932   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
1933   assert(!is_stw_gc_in_progress(), "Only concurrent GC");
1934   set_concurrent_strong_root_in_progress(!collection_set()->is_empty());
1935   set_concurrent_weak_root_in_progress(true);
1936   if (unload_classes()) {
1937     _unloader.prepare();
1938   }
1939 }
1940 
1941 void ShenandoahHeap::finish_concurrent_roots() {
1942   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");

1962       assert(nworkers <= ConcGCThreads, "Cannot use more than it has");
1963     } else {
1964       // Use ConcGCThreads outside safepoints
1965       assert(nworkers == ConcGCThreads, "Use ConcGCThreads outside safepoints");
1966     }
1967   }
1968 }
1969 #endif
1970 
1971 ShenandoahVerifier* ShenandoahHeap::verifier() {
1972   guarantee(ShenandoahVerify, "Should be enabled");
1973   assert (_verifier != nullptr, "sanity");
1974   return _verifier;
1975 }
1976 
1977 template<bool CONCURRENT>
1978 class ShenandoahUpdateHeapRefsTask : public WorkerTask {
1979 private:
1980   ShenandoahHeap* _heap;
1981   ShenandoahRegionIterator* _regions;


1982 public:
1983   ShenandoahUpdateHeapRefsTask(ShenandoahRegionIterator* regions) :

1984     WorkerTask("Shenandoah Update References"),
1985     _heap(ShenandoahHeap::heap()),
1986     _regions(regions) {



1987   }
1988 
1989   void work(uint worker_id) {
1990     if (CONCURRENT) {
1991       ShenandoahConcurrentWorkerSession worker_session(worker_id);
1992       ShenandoahSuspendibleThreadSetJoiner stsj;
1993       do_work<ShenandoahConcUpdateRefsClosure>();
1994     } else {
1995       ShenandoahParallelWorkerSession worker_session(worker_id);
1996       do_work<ShenandoahSTWUpdateRefsClosure>();
1997     }
1998   }
1999 
2000 private:
2001   template<class T>
2002   void do_work() {
2003     T cl;










2004     ShenandoahHeapRegion* r = _regions->next();
2005     ShenandoahMarkingContext* const ctx = _heap->complete_marking_context();



2006     while (r != nullptr) {
2007       HeapWord* update_watermark = r->get_update_watermark();
2008       assert (update_watermark >= r->bottom(), "sanity");



2009       if (r->is_active() && !r->is_cset()) {
2010         _heap->marked_object_oop_iterate(r, &cl, update_watermark);






























2011       }
2012       if (ShenandoahPacing) {
2013         _heap->pacer()->report_updaterefs(pointer_delta(update_watermark, r->bottom()));
2014       }
2015       if (_heap->check_cancelled_gc_and_yield(CONCURRENT)) {
2016         return;
2017       }
2018       r = _regions->next();
2019     }


















































































































2020   }
2021 };
2022 
2023 void ShenandoahHeap::update_heap_references(bool concurrent) {
2024   assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");


2025 
2026   if (concurrent) {
2027     ShenandoahUpdateHeapRefsTask<true> task(&_update_refs_iterator);
2028     workers()->run_task(&task);
2029   } else {
2030     ShenandoahUpdateHeapRefsTask<false> task(&_update_refs_iterator);
2031     workers()->run_task(&task);
2032   }



2033 }
2034 
2035 
2036 class ShenandoahFinalUpdateRefsUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
2037 private:

2038   ShenandoahHeapLock* const _lock;

2039 
2040 public:
2041   ShenandoahFinalUpdateRefsUpdateRegionStateClosure() : _lock(ShenandoahHeap::heap()->lock()) {}


2042 
2043   void heap_region_do(ShenandoahHeapRegion* r) {





















2044     // Drop unnecessary "pinned" state from regions that does not have CP marks
2045     // anymore, as this would allow trashing them.
2046 
2047     if (r->is_active()) {
2048       if (r->is_pinned()) {
2049         if (r->pin_count() == 0) {
2050           ShenandoahHeapLocker locker(_lock);
2051           r->make_unpinned();
2052         }
2053       } else {
2054         if (r->pin_count() > 0) {
2055           ShenandoahHeapLocker locker(_lock);
2056           r->make_pinned();
2057         }
2058       }
2059     }
2060   }
2061 
2062   bool is_thread_safe() { return true; }
2063 };
2064 
2065 void ShenandoahHeap::update_heap_region_states(bool concurrent) {
2066   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2067   assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
2068 
2069   {
2070     ShenandoahGCPhase phase(concurrent ?
2071                             ShenandoahPhaseTimings::final_update_refs_update_region_states :
2072                             ShenandoahPhaseTimings::degen_gc_final_update_refs_update_region_states);
2073     ShenandoahFinalUpdateRefsUpdateRegionStateClosure cl;
2074     parallel_heap_region_iterate(&cl);
2075 
2076     assert_pinned_region_status();
2077   }
2078 
2079   {
2080     ShenandoahGCPhase phase(concurrent ?
2081                             ShenandoahPhaseTimings::final_update_refs_trash_cset :
2082                             ShenandoahPhaseTimings::degen_gc_final_update_refs_trash_cset);
2083     trash_cset_regions();
2084   }
2085 }
2086 
2087 void ShenandoahHeap::rebuild_free_set(bool concurrent) {
2088   {
2089     ShenandoahGCPhase phase(concurrent ?
2090                             ShenandoahPhaseTimings::final_update_refs_rebuild_freeset :
2091                             ShenandoahPhaseTimings::degen_gc_final_update_refs_rebuild_freeset);
2092     ShenandoahHeapLocker locker(lock());
2093     _free_set->rebuild();














































































2094   }
2095 }
2096 
2097 void ShenandoahHeap::print_extended_on(outputStream *st) const {
2098   print_on(st);
2099   st->cr();
2100   print_heap_regions_on(st);
2101 }
2102 
2103 bool ShenandoahHeap::is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self) {
2104   size_t slice = r->index() / _bitmap_regions_per_slice;
2105 
2106   size_t regions_from = _bitmap_regions_per_slice * slice;
2107   size_t regions_to   = MIN2(num_regions(), _bitmap_regions_per_slice * (slice + 1));
2108   for (size_t g = regions_from; g < regions_to; g++) {
2109     assert (g / _bitmap_regions_per_slice == slice, "same slice");
2110     if (skip_self && g == r->index()) continue;
2111     if (get_region(g)->is_committed()) {
2112       return true;
2113     }

2184   EventMark em("%s", msg);
2185 
2186   op_uncommit(shrink_before, shrink_until);
2187 }
2188 
2189 void ShenandoahHeap::try_inject_alloc_failure() {
2190   if (ShenandoahAllocFailureALot && !cancelled_gc() && ((os::random() % 1000) > 950)) {
2191     _inject_alloc_failure.set();
2192     os::naked_short_sleep(1);
2193     if (cancelled_gc()) {
2194       log_info(gc)("Allocation failure was successfully injected");
2195     }
2196   }
2197 }
2198 
2199 bool ShenandoahHeap::should_inject_alloc_failure() {
2200   return _inject_alloc_failure.is_set() && _inject_alloc_failure.try_unset();
2201 }
2202 
2203 void ShenandoahHeap::initialize_serviceability() {
2204   _memory_pool = new ShenandoahMemoryPool(this);
2205   _cycle_memory_manager.add_pool(_memory_pool);
2206   _stw_memory_manager.add_pool(_memory_pool);









2207 }
2208 
2209 GrowableArray<GCMemoryManager*> ShenandoahHeap::memory_managers() {
2210   GrowableArray<GCMemoryManager*> memory_managers(2);
2211   memory_managers.append(&_cycle_memory_manager);
2212   memory_managers.append(&_stw_memory_manager);
2213   return memory_managers;
2214 }
2215 
2216 GrowableArray<MemoryPool*> ShenandoahHeap::memory_pools() {
2217   GrowableArray<MemoryPool*> memory_pools(1);
2218   memory_pools.append(_memory_pool);





2219   return memory_pools;
2220 }
2221 
2222 MemoryUsage ShenandoahHeap::memory_usage() {
2223   return _memory_pool->get_memory_usage();
2224 }
2225 
2226 ShenandoahRegionIterator::ShenandoahRegionIterator() :
2227   _heap(ShenandoahHeap::heap()),
2228   _index(0) {}
2229 
2230 ShenandoahRegionIterator::ShenandoahRegionIterator(ShenandoahHeap* heap) :
2231   _heap(heap),
2232   _index(0) {}
2233 
2234 void ShenandoahRegionIterator::reset() {
2235   _index = 0;
2236 }
2237 
2238 bool ShenandoahRegionIterator::has_next() const {
2239   return _index < _heap->num_regions();
2240 }
2241 
2242 char ShenandoahHeap::gc_state() const {
2243   return _gc_state.raw_value();
2244 }
2245 
2246 ShenandoahLiveData* ShenandoahHeap::get_liveness_cache(uint worker_id) {
2247 #ifdef ASSERT
2248   assert(_liveness_cache != nullptr, "sanity");
2249   assert(worker_id < _max_workers, "sanity");
2250   for (uint i = 0; i < num_regions(); i++) {
2251     assert(_liveness_cache[worker_id][i] == 0, "liveness cache should be empty");
2252   }
2253 #endif
2254   return _liveness_cache[worker_id];
2255 }
2256 
2257 void ShenandoahHeap::flush_liveness_cache(uint worker_id) {
2258   assert(worker_id < _max_workers, "sanity");
2259   assert(_liveness_cache != nullptr, "sanity");
2260   ShenandoahLiveData* ld = _liveness_cache[worker_id];

2261   for (uint i = 0; i < num_regions(); i++) {
2262     ShenandoahLiveData live = ld[i];
2263     if (live > 0) {
2264       ShenandoahHeapRegion* r = get_region(i);
2265       r->increase_live_data_gc_words(live);
2266       ld[i] = 0;
2267     }
2268   }
2269 }
2270 
2271 bool ShenandoahHeap::requires_barriers(stackChunkOop obj) const {
2272   if (is_idle()) return false;
2273 
2274   // Objects allocated after marking start are implicitly alive, don't need any barriers during
2275   // marking phase.
2276   if (is_concurrent_mark_in_progress() &&
2277      !marking_context()->allocated_after_mark_start(obj)) {
2278     return true;
2279   }
2280 
2281   // Can not guarantee obj is deeply good.
2282   if (has_forwarded_objects()) {
2283     return true;
2284   }
2285 
2286   return false;
2287 }








































































































   1 /*
   2  * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2013, 2022, Red Hat, Inc. All rights reserved.
   4  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
   5  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   6  *
   7  * This code is free software; you can redistribute it and/or modify it
   8  * under the terms of the GNU General Public License version 2 only, as
   9  * published by the Free Software Foundation.
  10  *
  11  * This code is distributed in the hope that it will be useful, but WITHOUT
  12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  14  * version 2 for more details (a copy is included in the LICENSE file that
  15  * accompanied this code).
  16  *
  17  * You should have received a copy of the GNU General Public License version
  18  * 2 along with this work; if not, write to the Free Software Foundation,
  19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20  *
  21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22  * or visit www.oracle.com if you need additional information or have any
  23  * questions.
  24  *
  25  */
  26 
  27 #include "precompiled.hpp"
  28 #include "memory/allocation.hpp"
  29 #include "memory/universe.hpp"
  30 
  31 #include "gc/shared/gcArguments.hpp"
  32 #include "gc/shared/gcTimer.hpp"
  33 #include "gc/shared/gcTraceTime.inline.hpp"
  34 #include "gc/shared/locationPrinter.inline.hpp"
  35 #include "gc/shared/memAllocator.hpp"
  36 #include "gc/shared/plab.hpp"
  37 #include "gc/shared/tlab_globals.hpp"
  38 
  39 #include "gc/shenandoah/shenandoahAgeCensus.hpp"
  40 #include "gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp"
  41 #include "gc/shenandoah/heuristics/shenandoahYoungHeuristics.hpp"
  42 #include "gc/shenandoah/shenandoahAllocRequest.hpp"
  43 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  44 #include "gc/shenandoah/shenandoahCardTable.hpp"
  45 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
  46 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  47 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  48 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
  49 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  50 #include "gc/shenandoah/shenandoahControlThread.hpp"
  51 #include "gc/shenandoah/shenandoahRegulatorThread.hpp"
  52 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  53 #include "gc/shenandoah/shenandoahGlobalGeneration.hpp"
  54 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  55 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  56 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
  57 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  58 #include "gc/shenandoah/shenandoahInitLogger.hpp"
  59 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  60 #include "gc/shenandoah/shenandoahMemoryPool.hpp"
  61 #include "gc/shenandoah/shenandoahMetrics.hpp"
  62 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  63 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
  64 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  65 #include "gc/shenandoah/shenandoahPacer.inline.hpp"
  66 #include "gc/shenandoah/shenandoahPadding.hpp"
  67 #include "gc/shenandoah/shenandoahParallelCleaning.inline.hpp"
  68 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
  69 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
  70 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
  71 #include "gc/shenandoah/shenandoahSTWMark.hpp"
  72 #include "gc/shenandoah/shenandoahUtils.hpp"
  73 #include "gc/shenandoah/shenandoahVerifier.hpp"
  74 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
  75 #include "gc/shenandoah/shenandoahVMOperations.hpp"
  76 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
  77 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  78 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
  79 #include "gc/shenandoah/mode/shenandoahGenerationalMode.hpp"
  80 #include "gc/shenandoah/mode/shenandoahIUMode.hpp"
  81 #include "gc/shenandoah/mode/shenandoahPassiveMode.hpp"
  82 #include "gc/shenandoah/mode/shenandoahSATBMode.hpp"
  83 #include "utilities/globalDefinitions.hpp"
  84 
  85 #if INCLUDE_JFR
  86 #include "gc/shenandoah/shenandoahJfrSupport.hpp"
  87 #endif
  88 
  89 #include "classfile/systemDictionary.hpp"
  90 #include "code/codeCache.hpp"
  91 #include "memory/classLoaderMetaspace.hpp"
  92 #include "memory/metaspaceUtils.hpp"
  93 #include "oops/compressedOops.inline.hpp"
  94 #include "prims/jvmtiTagMap.hpp"
  95 #include "runtime/atomic.hpp"
  96 #include "runtime/globals.hpp"
  97 #include "runtime/interfaceSupport.inline.hpp"
  98 #include "runtime/java.hpp"
  99 #include "runtime/orderAccess.hpp"
 100 #include "runtime/safepointMechanism.hpp"
 101 #include "runtime/vmThread.hpp"
 102 #include "services/mallocTracker.hpp"
 103 #include "services/memTracker.hpp"
 104 #include "utilities/events.hpp"

 156 jint ShenandoahHeap::initialize() {
 157   //
 158   // Figure out heap sizing
 159   //
 160 
 161   size_t init_byte_size = InitialHeapSize;
 162   size_t min_byte_size  = MinHeapSize;
 163   size_t max_byte_size  = MaxHeapSize;
 164   size_t heap_alignment = HeapAlignment;
 165 
 166   size_t reg_size_bytes = ShenandoahHeapRegion::region_size_bytes();
 167 
 168   Universe::check_alignment(max_byte_size,  reg_size_bytes, "Shenandoah heap");
 169   Universe::check_alignment(init_byte_size, reg_size_bytes, "Shenandoah heap");
 170 
 171   _num_regions = ShenandoahHeapRegion::region_count();
 172   assert(_num_regions == (max_byte_size / reg_size_bytes),
 173          "Regions should cover entire heap exactly: " SIZE_FORMAT " != " SIZE_FORMAT "/" SIZE_FORMAT,
 174          _num_regions, max_byte_size, reg_size_bytes);
 175 



 176   size_t num_committed_regions = init_byte_size / reg_size_bytes;
 177   num_committed_regions = MIN2(num_committed_regions, _num_regions);
 178   assert(num_committed_regions <= _num_regions, "sanity");
 179   _initial_size = num_committed_regions * reg_size_bytes;
 180 
 181   size_t num_min_regions = min_byte_size / reg_size_bytes;
 182   num_min_regions = MIN2(num_min_regions, _num_regions);
 183   assert(num_min_regions <= _num_regions, "sanity");
 184   _minimum_size = num_min_regions * reg_size_bytes;
 185 
 186   // Default to max heap size.
 187   _soft_max_size = _num_regions * reg_size_bytes;
 188 
 189   _committed = _initial_size;
 190 
 191   // Now we know the number of regions and heap sizes, initialize the heuristics.
 192   initialize_heuristics_generations();
 193 
 194   size_t heap_page_size   = UseLargePages ? os::large_page_size() : os::vm_page_size();
 195   size_t bitmap_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
 196   size_t region_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
 197 
 198   //
 199   // Reserve and commit memory for heap
 200   //
 201 
 202   ReservedHeapSpace heap_rs = Universe::reserve_heap(max_byte_size, heap_alignment);
 203   initialize_reserved_region(heap_rs);
 204   _heap_region = MemRegion((HeapWord*)heap_rs.base(), heap_rs.size() / HeapWordSize);
 205   _heap_region_special = heap_rs.special();
 206 
 207   assert((((size_t) base()) & ShenandoahHeapRegion::region_size_bytes_mask()) == 0,
 208          "Misaligned heap: " PTR_FORMAT, p2i(base()));
 209   os::trace_page_sizes_for_requested_size("Heap",
 210                                           max_byte_size, heap_rs.page_size(), heap_alignment,
 211                                           heap_rs.base(), heap_rs.size());
 212 
 213 #if SHENANDOAH_OPTIMIZED_MARKTASK
 214   // The optimized ShenandoahMarkTask takes some bits away from the full object bits.
 215   // Fail if we ever attempt to address more than we can.
 216   if ((uintptr_t)heap_rs.end() >= ShenandoahMarkTask::max_addressable()) {
 217     FormatBuffer<512> buf("Shenandoah reserved [" PTR_FORMAT ", " PTR_FORMAT") for the heap, \n"
 218                           "but max object address is " PTR_FORMAT ". Try to reduce heap size, or try other \n"
 219                           "VM options that allocate heap at lower addresses (HeapBaseMinAddress, AllocateHeapAt, etc).",
 220                 p2i(heap_rs.base()), p2i(heap_rs.end()), ShenandoahMarkTask::max_addressable());
 221     vm_exit_during_initialization("Fatal Error", buf);
 222   }
 223 #endif
 224 
 225   ReservedSpace sh_rs = heap_rs.first_part(max_byte_size);
 226   if (!_heap_region_special) {
 227     os::commit_memory_or_exit(sh_rs.base(), _initial_size, heap_alignment, false,
 228                               "Cannot commit heap memory");
 229   }
 230 
 231   BarrierSet::set_barrier_set(new ShenandoahBarrierSet(this, _heap_region));
 232 
 233   //
 234   // After reserving the Java heap, create the card table, barriers, and workers, in dependency order
 235   //
 236   if (mode()->is_generational()) {
 237     ShenandoahDirectCardMarkRememberedSet *rs;
 238     ShenandoahCardTable* card_table = ShenandoahBarrierSet::barrier_set()->card_table();
 239     size_t card_count = card_table->cards_required(heap_rs.size() / HeapWordSize);
 240     rs = new ShenandoahDirectCardMarkRememberedSet(ShenandoahBarrierSet::barrier_set()->card_table(), card_count);
 241     _card_scan = new ShenandoahScanRemembered<ShenandoahDirectCardMarkRememberedSet>(rs);
 242 
 243     // Age census structure
 244     _age_census = new ShenandoahAgeCensus();
 245   }
 246 
 247   _workers = new ShenandoahWorkerThreads("Shenandoah GC Threads", _max_workers);
 248   if (_workers == nullptr) {
 249     vm_exit_during_initialization("Failed necessary allocation.");
 250   } else {
 251     _workers->initialize_workers();
 252   }
 253 
 254   if (ParallelGCThreads > 1) {
 255     _safepoint_workers = new ShenandoahWorkerThreads("Safepoint Cleanup Thread", ParallelGCThreads);
 256     _safepoint_workers->initialize_workers();
 257   }
 258 
 259   //
 260   // Reserve and commit memory for bitmap(s)
 261   //
 262 
 263   size_t bitmap_size_orig = ShenandoahMarkBitMap::compute_size(heap_rs.size());
 264   _bitmap_size = align_up(bitmap_size_orig, bitmap_page_size);
 265 
 266   size_t bitmap_bytes_per_region = reg_size_bytes / ShenandoahMarkBitMap::heap_map_factor();
 267 
 268   guarantee(bitmap_bytes_per_region != 0,
 269             "Bitmap bytes per region should not be zero");
 270   guarantee(is_power_of_2(bitmap_bytes_per_region),
 271             "Bitmap bytes per region should be power of two: " SIZE_FORMAT, bitmap_bytes_per_region);
 272 
 273   if (bitmap_page_size > bitmap_bytes_per_region) {
 274     _bitmap_regions_per_slice = bitmap_page_size / bitmap_bytes_per_region;
 275     _bitmap_bytes_per_slice = bitmap_page_size;
 276   } else {
 277     _bitmap_regions_per_slice = 1;
 278     _bitmap_bytes_per_slice = bitmap_bytes_per_region;
 279   }
 280 
 281   guarantee(_bitmap_regions_per_slice >= 1,
 282             "Should have at least one region per slice: " SIZE_FORMAT,
 283             _bitmap_regions_per_slice);
 284 
 285   guarantee(((_bitmap_bytes_per_slice) % bitmap_page_size) == 0,
 286             "Bitmap slices should be page-granular: bps = " SIZE_FORMAT ", page size = " SIZE_FORMAT,
 287             _bitmap_bytes_per_slice, bitmap_page_size);
 288 
 289   ReservedSpace bitmap(_bitmap_size, bitmap_page_size);
 290   os::trace_page_sizes_for_requested_size("Mark Bitmap",
 291                                           bitmap_size_orig, bitmap.page_size(), bitmap_page_size,
 292                                           bitmap.base(),
 293                                           bitmap.size());
 294   MemTracker::record_virtual_memory_type(bitmap.base(), mtGC);
 295   _bitmap_region = MemRegion((HeapWord*) bitmap.base(), bitmap.size() / HeapWordSize);
 296   _bitmap_region_special = bitmap.special();
 297 
 298   size_t bitmap_init_commit = _bitmap_bytes_per_slice *
 299                               align_up(num_committed_regions, _bitmap_regions_per_slice) / _bitmap_regions_per_slice;
 300   bitmap_init_commit = MIN2(_bitmap_size, bitmap_init_commit);
 301   if (!_bitmap_region_special) {
 302     os::commit_memory_or_exit((char *) _bitmap_region.start(), bitmap_init_commit, bitmap_page_size, false,
 303                               "Cannot commit bitmap memory");
 304   }
 305 
 306   _marking_context = new ShenandoahMarkingContext(_heap_region, _bitmap_region, _num_regions);
 307 
 308   if (ShenandoahVerify) {
 309     ReservedSpace verify_bitmap(_bitmap_size, bitmap_page_size);
 310     os::trace_page_sizes_for_requested_size("Verify Bitmap",
 311                                             bitmap_size_orig, verify_bitmap.page_size(), bitmap_page_size,
 312                                             verify_bitmap.base(),
 313                                             verify_bitmap.size());
 314     if (!verify_bitmap.special()) {
 315       os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), bitmap_page_size, false,
 316                                 "Cannot commit verification bitmap memory");
 317     }
 318     MemTracker::record_virtual_memory_type(verify_bitmap.base(), mtGC);
 319     MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize);
 320     _verification_bit_map.initialize(_heap_region, verify_bitmap_region);
 321     _verifier = new ShenandoahVerifier(this, &_verification_bit_map);
 322   }
 323 
 324   // Reserve aux bitmap for use in object_iterate(). We don't commit it here.
 325   size_t aux_bitmap_page_size = bitmap_page_size;
 326 #ifdef LINUX
 327   // In THP "advise" mode, we refrain from advising the system to use large pages
 328   // since we know these commits will be short lived, and there is no reason to trash
 329   // the THP area with this bitmap.
 330   if (UseTransparentHugePages) {
 331     aux_bitmap_page_size = os::vm_page_size();
 332   }
 333 #endif
 334   ReservedSpace aux_bitmap(_bitmap_size, aux_bitmap_page_size);
 335   os::trace_page_sizes_for_requested_size("Aux Bitmap",
 336                                           bitmap_size_orig, aux_bitmap.page_size(), aux_bitmap_page_size,
 337                                           aux_bitmap.base(), aux_bitmap.size());
 338   MemTracker::record_virtual_memory_type(aux_bitmap.base(), mtGC);
 339   _aux_bitmap_region = MemRegion((HeapWord*) aux_bitmap.base(), aux_bitmap.size() / HeapWordSize);
 340   _aux_bitmap_region_special = aux_bitmap.special();
 341   _aux_bit_map.initialize(_heap_region, _aux_bitmap_region);
 342 
 343   //
 344   // Create regions and region sets
 345   //
 346   size_t region_align = align_up(sizeof(ShenandoahHeapRegion), SHENANDOAH_CACHE_LINE_SIZE);
 347   size_t region_storage_size_orig = region_align * _num_regions;
 348   size_t region_storage_size = align_up(region_storage_size_orig,
 349                                         MAX2(region_page_size, os::vm_allocation_granularity()));
 350 
 351   ReservedSpace region_storage(region_storage_size, region_page_size);
 352   os::trace_page_sizes_for_requested_size("Region Storage",
 353                                           region_storage_size_orig, region_storage.page_size(), region_page_size,
 354                                           region_storage.base(), region_storage.size());
 355   MemTracker::record_virtual_memory_type(region_storage.base(), mtGC);
 356   if (!region_storage.special()) {
 357     os::commit_memory_or_exit(region_storage.base(), region_storage_size, region_page_size, false,
 358                               "Cannot commit region memory");
 359   }
 360 
 361   // Try to fit the collection set bitmap at lower addresses. This optimizes code generation for cset checks.
 362   // Go up until a sensible limit (subject to encoding constraints) and try to reserve the space there.
 363   // If not successful, bite a bullet and allocate at whatever address.
 364   {
 365     const size_t cset_align = MAX2<size_t>(os::vm_page_size(), os::vm_allocation_granularity());
 366     const size_t cset_size = align_up(((size_t) sh_rs.base() + sh_rs.size()) >> ShenandoahHeapRegion::region_size_bytes_shift(), cset_align);
 367     const size_t cset_page_size = os::vm_page_size();
 368 
 369     uintptr_t min = round_up_power_of_2(cset_align);
 370     uintptr_t max = (1u << 30u);
 371     ReservedSpace cset_rs;
 372 
 373     for (uintptr_t addr = min; addr <= max; addr <<= 1u) {
 374       char* req_addr = (char*)addr;
 375       assert(is_aligned(req_addr, cset_align), "Should be aligned");
 376       cset_rs = ReservedSpace(cset_size, cset_align, cset_page_size, req_addr);
 377       if (cset_rs.is_reserved()) {
 378         assert(cset_rs.base() == req_addr, "Allocated where requested: " PTR_FORMAT ", " PTR_FORMAT, p2i(cset_rs.base()), addr);
 379         _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
 380         break;
 381       }
 382     }
 383 
 384     if (_collection_set == nullptr) {
 385       cset_rs = ReservedSpace(cset_size, cset_align, os::vm_page_size());
 386       _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
 387     }
 388     os::trace_page_sizes_for_requested_size("Collection Set",
 389                                             cset_size, cset_rs.page_size(), cset_page_size,
 390                                             cset_rs.base(),
 391                                             cset_rs.size());
 392   }
 393 
 394   _regions = NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, _num_regions, mtGC);
 395   _affiliations = NEW_C_HEAP_ARRAY(uint8_t, _num_regions, mtGC);
 396   _free_set = new ShenandoahFreeSet(this, _num_regions);
 397 
 398   {
 399     ShenandoahHeapLocker locker(lock());
 400 
 401 
 402     for (size_t i = 0; i < _num_regions; i++) {
 403       HeapWord* start = (HeapWord*)sh_rs.base() + ShenandoahHeapRegion::region_size_words() * i;
 404       bool is_committed = i < num_committed_regions;
 405       void* loc = region_storage.base() + i * region_align;
 406 
 407       ShenandoahHeapRegion* r = new (loc) ShenandoahHeapRegion(start, i, is_committed);
 408       assert(is_aligned(r, SHENANDOAH_CACHE_LINE_SIZE), "Sanity");
 409 
 410       _marking_context->initialize_top_at_mark_start(r);
 411       _regions[i] = r;
 412       assert(!collection_set()->is_in(i), "New region should not be in collection set");
 413 
 414       _affiliations[i] = ShenandoahAffiliation::FREE;
 415     }
 416 
 417     // Initialize to complete
 418     _marking_context->mark_complete();
 419     size_t young_cset_regions, old_cset_regions;
 420 
 421     // We are initializing free set.  We ignore cset region tallies.
 422     size_t first_old, last_old, num_old;
 423     _free_set->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old, last_old, num_old);
 424     _free_set->rebuild(young_cset_regions, old_cset_regions);
 425   }
 426 
 427   if (AlwaysPreTouch) {
 428     // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads,
 429     // before initialize() below zeroes it with initializing thread. For any given region,
 430     // we touch the region and the corresponding bitmaps from the same thread.
 431     ShenandoahPushWorkerScope scope(workers(), _max_workers, false);
 432 
 433     _pretouch_heap_page_size = heap_page_size;
 434     _pretouch_bitmap_page_size = bitmap_page_size;
 435 
 436 #ifdef LINUX
 437     // UseTransparentHugePages would madvise that backing memory can be coalesced into huge
 438     // pages. But, the kernel needs to know that every small page is used, in order to coalesce
 439     // them into huge one. Therefore, we need to pretouch with smaller pages.
 440     if (UseTransparentHugePages) {
 441       _pretouch_heap_page_size = (size_t)os::vm_page_size();
 442       _pretouch_bitmap_page_size = (size_t)os::vm_page_size();
 443     }
 444 #endif

 466   // There should probably be Shenandoah-specific options for these,
 467   // just as there are G1-specific options.
 468   {
 469     ShenandoahSATBMarkQueueSet& satbqs = ShenandoahBarrierSet::satb_mark_queue_set();
 470     satbqs.set_process_completed_buffers_threshold(20); // G1SATBProcessCompletedThreshold
 471     satbqs.set_buffer_enqueue_threshold_percentage(60); // G1SATBBufferEnqueueingThresholdPercent
 472   }
 473 
 474   _monitoring_support = new ShenandoahMonitoringSupport(this);
 475   _phase_timings = new ShenandoahPhaseTimings(max_workers());
 476   ShenandoahCodeRoots::initialize();
 477 
 478   if (ShenandoahPacing) {
 479     _pacer = new ShenandoahPacer(this);
 480     _pacer->setup_for_idle();
 481   } else {
 482     _pacer = nullptr;
 483   }
 484 
 485   _control_thread = new ShenandoahControlThread();
 486   _regulator_thread = new ShenandoahRegulatorThread(_control_thread);
 487 
 488   print_init_logger();
 489 
 490   return JNI_OK;
 491 }
 492 
 493 void ShenandoahHeap::print_init_logger() const {
 494   ShenandoahInitLogger::print();
 495 }
 496 
 497 size_t ShenandoahHeap::max_size_for(ShenandoahGeneration* generation) const {
 498   switch (generation->type()) {
 499     case YOUNG:
 500       return _generation_sizer.max_young_size();
 501     case OLD:
 502       return max_capacity() - _generation_sizer.min_young_size();
 503     case GLOBAL_GEN:
 504     case GLOBAL_NON_GEN:
 505       return max_capacity();
 506     default:
 507       ShouldNotReachHere();
 508       return 0;
 509   }
 510 }
 511 
 512 size_t ShenandoahHeap::min_size_for(ShenandoahGeneration* generation) const {
 513   switch (generation->type()) {
 514     case YOUNG:
 515       return _generation_sizer.min_young_size();
 516     case OLD:
 517       return max_capacity() - _generation_sizer.max_young_size();
 518     case GLOBAL_GEN:
 519     case GLOBAL_NON_GEN:
 520       return min_capacity();
 521     default:
 522       ShouldNotReachHere();
 523       return 0;
 524   }
 525 }
 526 
 527 void ShenandoahHeap::initialize_heuristics_generations() {
 528   if (ShenandoahGCMode != nullptr) {
 529     if (strcmp(ShenandoahGCMode, "satb") == 0) {
 530       _gc_mode = new ShenandoahSATBMode();
 531     } else if (strcmp(ShenandoahGCMode, "iu") == 0) {
 532       _gc_mode = new ShenandoahIUMode();
 533     } else if (strcmp(ShenandoahGCMode, "passive") == 0) {
 534       _gc_mode = new ShenandoahPassiveMode();
 535     } else if (strcmp(ShenandoahGCMode, "generational") == 0) {
 536       _gc_mode = new ShenandoahGenerationalMode();
 537     } else {
 538       vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option");
 539     }
 540   } else {
 541     vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option (null)");
 542   }
 543   _gc_mode->initialize_flags();
 544   if (_gc_mode->is_diagnostic() && !UnlockDiagnosticVMOptions) {
 545     vm_exit_during_initialization(
 546             err_msg("GC mode \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
 547                     _gc_mode->name()));
 548   }
 549   if (_gc_mode->is_experimental() && !UnlockExperimentalVMOptions) {
 550     vm_exit_during_initialization(
 551             err_msg("GC mode \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
 552                     _gc_mode->name()));
 553   }

 554 
 555   // Max capacity is the maximum _allowed_ capacity. That is, the maximum allowed capacity
 556   // for old would be total heap - minimum capacity of young. This means the sum of the maximum
 557   // allowed for old and young could exceed the total heap size. It remains the case that the
 558   // _actual_ capacity of young + old = total.
 559   _generation_sizer.heap_size_changed(max_capacity());
 560   size_t initial_capacity_young = _generation_sizer.max_young_size();
 561   size_t max_capacity_young = _generation_sizer.max_young_size();
 562   size_t initial_capacity_old = max_capacity() - max_capacity_young;
 563   size_t max_capacity_old = max_capacity() - initial_capacity_young;
 564 
 565   _young_generation = new ShenandoahYoungGeneration(_max_workers, max_capacity_young, initial_capacity_young);
 566   _old_generation = new ShenandoahOldGeneration(_max_workers, max_capacity_old, initial_capacity_old);
 567   _global_generation = new ShenandoahGlobalGeneration(_gc_mode->is_generational(), _max_workers, max_capacity(), max_capacity());
 568   _global_generation->initialize_heuristics(_gc_mode);
 569   if (mode()->is_generational()) {
 570     _young_generation->initialize_heuristics(_gc_mode);
 571     _old_generation->initialize_heuristics(_gc_mode);


 572   }
 573   _evac_tracker = new ShenandoahEvacuationTracker(mode()->is_generational());
 574 }
 575 
 576 #ifdef _MSC_VER
 577 #pragma warning( push )
 578 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
 579 #endif
 580 
 581 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
 582   CollectedHeap(),
 583   _gc_generation(nullptr),
 584   _initial_size(0),
 585   _promotion_potential(0),
 586   _committed(0),
 587   _max_workers(MAX3(ConcGCThreads, ParallelGCThreads, 1U)),

 588   _workers(nullptr),
 589   _safepoint_workers(nullptr),
 590   _heap_region_special(false),
 591   _num_regions(0),
 592   _regions(nullptr),
 593   _affiliations(nullptr),
 594   _update_refs_iterator(this),
 595   _promoted_reserve(0),
 596   _old_evac_reserve(0),
 597   _young_evac_reserve(0),
 598   _age_census(nullptr),
 599   _has_evacuation_reserve_quantities(false),
 600   _cancel_requested_time(0),
 601   _young_generation(nullptr),
 602   _global_generation(nullptr),
 603   _old_generation(nullptr),
 604   _control_thread(nullptr),
 605   _regulator_thread(nullptr),
 606   _shenandoah_policy(policy),


 607   _free_set(nullptr),
 608   _pacer(nullptr),
 609   _verifier(nullptr),
 610   _phase_timings(nullptr),
 611   _evac_tracker(nullptr),
 612   _mmu_tracker(),
 613   _generation_sizer(),
 614   _monitoring_support(nullptr),
 615   _memory_pool(nullptr),
 616   _young_gen_memory_pool(nullptr),
 617   _old_gen_memory_pool(nullptr),
 618   _stw_memory_manager("Shenandoah Pauses"),
 619   _cycle_memory_manager("Shenandoah Cycles"),
 620   _gc_timer(new ConcurrentGCTimer()),
 621   _soft_ref_policy(),
 622   _log_min_obj_alignment_in_bytes(LogMinObjAlignmentInBytes),
 623   _old_regions_surplus(0),
 624   _old_regions_deficit(0),
 625   _marking_context(nullptr),
 626   _bitmap_size(0),
 627   _bitmap_regions_per_slice(0),
 628   _bitmap_bytes_per_slice(0),
 629   _bitmap_region_special(false),
 630   _aux_bitmap_region_special(false),
 631   _liveness_cache(nullptr),
 632   _collection_set(nullptr),
 633   _card_scan(nullptr)
 634 {

















 635 }
 636 
 637 #ifdef _MSC_VER
 638 #pragma warning( pop )
 639 #endif
 640 





























 641 void ShenandoahHeap::print_on(outputStream* st) const {
 642   st->print_cr("Shenandoah Heap");
 643   st->print_cr(" " SIZE_FORMAT "%s max, " SIZE_FORMAT "%s soft max, " SIZE_FORMAT "%s committed, " SIZE_FORMAT "%s used",
 644                byte_size_in_proper_unit(max_capacity()), proper_unit_for_byte_size(max_capacity()),
 645                byte_size_in_proper_unit(soft_max_capacity()), proper_unit_for_byte_size(soft_max_capacity()),
 646                byte_size_in_proper_unit(committed()),    proper_unit_for_byte_size(committed()),
 647                byte_size_in_proper_unit(used()),         proper_unit_for_byte_size(used()));
 648   st->print_cr(" " SIZE_FORMAT " x " SIZE_FORMAT"%s regions",
 649                num_regions(),
 650                byte_size_in_proper_unit(ShenandoahHeapRegion::region_size_bytes()),
 651                proper_unit_for_byte_size(ShenandoahHeapRegion::region_size_bytes()));
 652 
 653   st->print("Status: ");
 654   if (has_forwarded_objects())                 st->print("has forwarded objects, ");
 655   if (is_concurrent_old_mark_in_progress())    st->print("old marking, ");
 656   if (is_concurrent_young_mark_in_progress())  st->print("young marking, ");
 657   if (is_evacuation_in_progress())             st->print("evacuating, ");
 658   if (is_update_refs_in_progress())            st->print("updating refs, ");
 659   if (is_degenerated_gc_in_progress())         st->print("degenerated gc, ");
 660   if (is_full_gc_in_progress())                st->print("full gc, ");
 661   if (is_full_gc_move_in_progress())           st->print("full gc move, ");
 662   if (is_concurrent_weak_root_in_progress())   st->print("concurrent weak roots, ");
 663   if (is_concurrent_strong_root_in_progress() &&
 664       !is_concurrent_weak_root_in_progress())  st->print("concurrent strong roots, ");
 665 
 666   if (cancelled_gc()) {
 667     st->print("cancelled");
 668   } else {
 669     st->print("not cancelled");
 670   }
 671   st->cr();
 672 
 673   st->print_cr("Reserved region:");
 674   st->print_cr(" - [" PTR_FORMAT ", " PTR_FORMAT ") ",
 675                p2i(reserved_region().start()),
 676                p2i(reserved_region().end()));

 687   st->cr();
 688   MetaspaceUtils::print_on(st);
 689 
 690   if (Verbose) {
 691     st->cr();
 692     print_heap_regions_on(st);
 693   }
 694 }
 695 
 696 class ShenandoahInitWorkerGCLABClosure : public ThreadClosure {
 697 public:
 698   void do_thread(Thread* thread) {
 699     assert(thread != nullptr, "Sanity");
 700     assert(thread->is_Worker_thread(), "Only worker thread expected");
 701     ShenandoahThreadLocalData::initialize_gclab(thread);
 702   }
 703 };
 704 
 705 void ShenandoahHeap::post_initialize() {
 706   CollectedHeap::post_initialize();
 707   _mmu_tracker.initialize();
 708 
 709   MutexLocker ml(Threads_lock);
 710 
 711   ShenandoahInitWorkerGCLABClosure init_gclabs;
 712   _workers->threads_do(&init_gclabs);
 713 
 714   // gclab can not be initialized early during VM startup, as it can not determinate its max_size.
 715   // Now, we will let WorkerThreads to initialize gclab when new worker is created.
 716   _workers->set_initialize_gclab();
 717   if (_safepoint_workers != nullptr) {
 718     _safepoint_workers->threads_do(&init_gclabs);
 719     _safepoint_workers->set_initialize_gclab();
 720   }
 721 


 722   JFR_ONLY(ShenandoahJFRSupport::register_jfr_type_serializers());
 723 }
 724 
 725 ShenandoahHeuristics* ShenandoahHeap::heuristics() {
 726   return _global_generation->heuristics();
 727 }
 728 
 729 ShenandoahOldHeuristics* ShenandoahHeap::old_heuristics() {
 730   return (ShenandoahOldHeuristics*) _old_generation->heuristics();
 731 }
 732 
 733 ShenandoahYoungHeuristics* ShenandoahHeap::young_heuristics() {
 734   return (ShenandoahYoungHeuristics*) _young_generation->heuristics();
 735 }
 736 
 737 bool ShenandoahHeap::doing_mixed_evacuations() {
 738   return _old_generation->state() == ShenandoahOldGeneration::EVACUATING;
 739 }
 740 
 741 bool ShenandoahHeap::is_old_bitmap_stable() const {
 742   return _old_generation->is_mark_complete();
 743 }
 744 
 745 bool ShenandoahHeap::is_gc_generation_young() const {
 746   return _gc_generation != nullptr && _gc_generation->is_young();
 747 }
 748 
 749 size_t ShenandoahHeap::used() const {
 750   return global_generation()->used();
 751 }
 752 
 753 size_t ShenandoahHeap::committed() const {
 754   return Atomic::load(&_committed);
 755 }
 756 
 757 void ShenandoahHeap::increase_committed(size_t bytes) {
 758   shenandoah_assert_heaplocked_or_safepoint();
 759   _committed += bytes;
 760 }
 761 
 762 void ShenandoahHeap::decrease_committed(size_t bytes) {
 763   shenandoah_assert_heaplocked_or_safepoint();
 764   _committed -= bytes;
 765 }
 766 
 767 // For tracking usage based on allocations, it should be the case that:
 768 // * The sum of regions::used == heap::used
 769 // * The sum of a generation's regions::used == generation::used
 770 // * The sum of a generation's humongous regions::free == generation::humongous_waste
 771 // These invariants are checked by the verifier on GC safepoints.
 772 //
 773 // Additional notes:
 774 // * When a mutator's allocation request causes a region to be retired, the
 775 //   free memory left in that region is considered waste. It does not contribute
 776 //   to the usage, but it _does_ contribute to allocation rate.
 777 // * The bottom of a PLAB must be aligned on card size. In some cases this will
 778 //   require padding in front of the PLAB (a filler object). Because this padding
 779 //   is included in the region's used memory we include the padding in the usage
 780 //   accounting as waste.
 781 // * Mutator allocations are used to compute an allocation rate. They are also
 782 //   sent to the Pacer for those purposes.
 783 // * There are three sources of waste:
 784 //  1. The padding used to align a PLAB on card size
 785 //  2. Region's free is less than minimum TLAB size and is retired
 786 //  3. The unused portion of memory in the last region of a humongous object
 787 void ShenandoahHeap::increase_used(const ShenandoahAllocRequest& req) {
 788   size_t actual_bytes = req.actual_size() * HeapWordSize;
 789   size_t wasted_bytes = req.waste() * HeapWordSize;
 790   ShenandoahGeneration* generation = generation_for(req.affiliation());
 791 
 792   if (req.is_gc_alloc()) {
 793     assert(wasted_bytes == 0 || req.type() == ShenandoahAllocRequest::_alloc_plab, "Only PLABs have waste");
 794     increase_used(generation, actual_bytes + wasted_bytes);
 795   } else {
 796     assert(req.is_mutator_alloc(), "Expected mutator alloc here");
 797     // padding and actual size both count towards allocation counter
 798     generation->increase_allocated(actual_bytes + wasted_bytes);
 799 
 800     // only actual size counts toward usage for mutator allocations
 801     increase_used(generation, actual_bytes);
 802 
 803     // notify pacer of both actual size and waste
 804     notify_mutator_alloc_words(req.actual_size(), req.waste());
 805 
 806     if (wasted_bytes > 0 && req.actual_size() > ShenandoahHeapRegion::humongous_threshold_words()) {
 807       increase_humongous_waste(generation,wasted_bytes);
 808     }
 809   }
 810 }
 811 
 812 void ShenandoahHeap::increase_humongous_waste(ShenandoahGeneration* generation, size_t bytes) {
 813   generation->increase_humongous_waste(bytes);
 814   if (!generation->is_global()) {
 815     global_generation()->increase_humongous_waste(bytes);
 816   }
 817 }
 818 
 819 void ShenandoahHeap::decrease_humongous_waste(ShenandoahGeneration* generation, size_t bytes) {
 820   generation->decrease_humongous_waste(bytes);
 821   if (!generation->is_global()) {
 822     global_generation()->decrease_humongous_waste(bytes);
 823   }
 824 }
 825 
 826 void ShenandoahHeap::increase_used(ShenandoahGeneration* generation, size_t bytes) {
 827   generation->increase_used(bytes);
 828   if (!generation->is_global()) {
 829     global_generation()->increase_used(bytes);
 830   }
 831 }
 832 
 833 void ShenandoahHeap::decrease_used(ShenandoahGeneration* generation, size_t bytes) {
 834   generation->decrease_used(bytes);
 835   if (!generation->is_global()) {
 836     global_generation()->decrease_used(bytes);
 837   }
 838 }
 839 
 840 void ShenandoahHeap::notify_mutator_alloc_words(size_t words, size_t waste) {
 841   if (ShenandoahPacing) {
 842     control_thread()->pacing_notify_alloc(words);
 843     if (waste > 0) {
 844       pacer()->claim_for_alloc(waste, true);
 845     }
 846   }
 847 }
 848 
 849 size_t ShenandoahHeap::capacity() const {
 850   return committed();
 851 }
 852 
 853 size_t ShenandoahHeap::max_capacity() const {
 854   return _num_regions * ShenandoahHeapRegion::region_size_bytes();
 855 }
 856 
 857 size_t ShenandoahHeap::soft_max_capacity() const {
 858   size_t v = Atomic::load(&_soft_max_size);
 859   assert(min_capacity() <= v && v <= max_capacity(),
 860          "Should be in bounds: " SIZE_FORMAT " <= " SIZE_FORMAT " <= " SIZE_FORMAT,
 861          min_capacity(), v, max_capacity());
 862   return v;
 863 }
 864 
 865 void ShenandoahHeap::set_soft_max_capacity(size_t v) {
 866   assert(min_capacity() <= v && v <= max_capacity(),
 867          "Should be in bounds: " SIZE_FORMAT " <= " SIZE_FORMAT " <= " SIZE_FORMAT,
 868          min_capacity(), v, max_capacity());
 869   Atomic::store(&_soft_max_size, v);
 870 }
 871 
 872 size_t ShenandoahHeap::min_capacity() const {
 873   return _minimum_size;
 874 }
 875 
 876 size_t ShenandoahHeap::initial_capacity() const {
 877   return _initial_size;
 878 }
 879 






 880 void ShenandoahHeap::op_uncommit(double shrink_before, size_t shrink_until) {
 881   assert (ShenandoahUncommit, "should be enabled");
 882 
 883   // Application allocates from the beginning of the heap, and GC allocates at
 884   // the end of it. It is more efficient to uncommit from the end, so that applications
 885   // could enjoy the near committed regions. GC allocations are much less frequent,
 886   // and therefore can accept the committing costs.
 887 
 888   size_t count = 0;
 889   for (size_t i = num_regions(); i > 0; i--) { // care about size_t underflow
 890     ShenandoahHeapRegion* r = get_region(i - 1);
 891     if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
 892       ShenandoahHeapLocker locker(lock());
 893       if (r->is_empty_committed()) {
 894         if (committed() < shrink_until + ShenandoahHeapRegion::region_size_bytes()) {
 895           break;
 896         }
 897 
 898         r->make_uncommitted();
 899         count++;
 900       }
 901     }
 902     SpinPause(); // allow allocators to take the lock
 903   }
 904 
 905   if (count > 0) {
 906     control_thread()->notify_heap_changed();
 907     regulator_thread()->notify_heap_changed();
 908   }
 909 }
 910 
 911 void ShenandoahHeap::handle_old_evacuation(HeapWord* obj, size_t words, bool promotion) {
 912   // Only register the copy of the object that won the evacuation race.
 913   card_scan()->register_object_without_lock(obj);
 914 
 915   // Mark the entire range of the evacuated object as dirty.  At next remembered set scan,
 916   // we will clear dirty bits that do not hold interesting pointers.  It's more efficient to
 917   // do this in batch, in a background GC thread than to try to carefully dirty only cards
 918   // that hold interesting pointers right now.
 919   card_scan()->mark_range_as_dirty(obj, words);
 920 
 921   if (promotion) {
 922     // This evacuation was a promotion, track this as allocation against old gen
 923     old_generation()->increase_allocated(words * HeapWordSize);
 924   }
 925 }
 926 
 927 void ShenandoahHeap::handle_old_evacuation_failure() {
 928   if (_old_gen_oom_evac.try_set()) {
 929     log_info(gc)("Old gen evac failure.");
 930   }
 931 }
 932 
 933 void ShenandoahHeap::report_promotion_failure(Thread* thread, size_t size) {
 934   // We squelch excessive reports to reduce noise in logs.
 935   const size_t MaxReportsPerEpoch = 4;
 936   static size_t last_report_epoch = 0;
 937   static size_t epoch_report_count = 0;
 938 
 939   size_t promotion_reserve;
 940   size_t promotion_expended;
 941 
 942   size_t gc_id = control_thread()->get_gc_id();
 943 
 944   if ((gc_id != last_report_epoch) || (epoch_report_count++ < MaxReportsPerEpoch)) {
 945     {
 946       // Promotion failures should be very rare.  Invest in providing useful diagnostic info.
 947       ShenandoahHeapLocker locker(lock());
 948       promotion_reserve = get_promoted_reserve();
 949       promotion_expended = get_promoted_expended();
 950     }
 951     PLAB* plab = ShenandoahThreadLocalData::plab(thread);
 952     size_t words_remaining = (plab == nullptr)? 0: plab->words_remaining();
 953     const char* promote_enabled = ShenandoahThreadLocalData::allow_plab_promotions(thread)? "enabled": "disabled";
 954     ShenandoahGeneration* old_gen = old_generation();
 955     size_t old_capacity = old_gen->max_capacity();
 956     size_t old_usage = old_gen->used();
 957     size_t old_free_regions = old_gen->free_unaffiliated_regions();
 958 
 959     log_info(gc, ergo)("Promotion failed, size " SIZE_FORMAT ", has plab? %s, PLAB remaining: " SIZE_FORMAT
 960                        ", plab promotions %s, promotion reserve: " SIZE_FORMAT ", promotion expended: " SIZE_FORMAT
 961                        ", old capacity: " SIZE_FORMAT ", old_used: " SIZE_FORMAT ", old unaffiliated regions: " SIZE_FORMAT,
 962                        size * HeapWordSize, plab == nullptr? "no": "yes",
 963                        words_remaining * HeapWordSize, promote_enabled, promotion_reserve, promotion_expended,
 964                        old_capacity, old_usage, old_free_regions);
 965 
 966     if ((gc_id == last_report_epoch) && (epoch_report_count >= MaxReportsPerEpoch)) {
 967       log_info(gc, ergo)("Squelching additional promotion failure reports for current epoch");
 968     } else if (gc_id != last_report_epoch) {
 969       last_report_epoch = gc_id;
 970       epoch_report_count = 1;
 971     }
 972   }
 973 }
 974 
 975 HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) {
 976   // New object should fit the GCLAB size
 977   size_t min_size = MAX2(size, PLAB::min_size());
 978 
 979   // Figure out size of new GCLAB, looking back at heuristics. Expand aggressively.
 980   size_t new_size = ShenandoahThreadLocalData::gclab_size(thread) * 2;
 981 
 982   // Limit growth of GCLABs to ShenandoahMaxEvacLABRatio * the minimum size.  This enables more equitable distribution of
 983   // available evacuation buidget between the many threads that are coordinating in the evacuation effort.
 984   if (ShenandoahMaxEvacLABRatio > 0) {
 985     log_debug(gc, free)("Allocate new gclab: " SIZE_FORMAT ", " SIZE_FORMAT, new_size, PLAB::min_size() * ShenandoahMaxEvacLABRatio);
 986     new_size = MIN2(new_size, PLAB::min_size() * ShenandoahMaxEvacLABRatio);
 987   }
 988 
 989   new_size = MIN2(new_size, PLAB::max_size());
 990   new_size = MAX2(new_size, PLAB::min_size());
 991 
 992   // Record new heuristic value even if we take any shortcut. This captures
 993   // the case when moderately-sized objects always take a shortcut. At some point,
 994   // heuristics should catch up with them.
 995   ShenandoahThreadLocalData::set_gclab_size(thread, new_size);
 996 
 997   if (new_size < size) {
 998     // New size still does not fit the object. Fall back to shared allocation.
 999     // This avoids retiring perfectly good GCLABs, when we encounter a large object.
1000     log_debug(gc, free)("New gclab size (" SIZE_FORMAT ") is too small for " SIZE_FORMAT, new_size, size);
1001     return nullptr;
1002   }
1003 
1004   // Retire current GCLAB, and allocate a new one.
1005   PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1006   gclab->retire();
1007 
1008   size_t actual_size = 0;
1009   HeapWord* gclab_buf = allocate_new_gclab(min_size, new_size, &actual_size);
1010   if (gclab_buf == nullptr) {
1011     return nullptr;
1012   }
1013 
1014   assert (size <= actual_size, "allocation should fit");
1015 
1016   if (ZeroTLAB) {
1017     // ..and clear it.
1018     Copy::zero_to_words(gclab_buf, actual_size);
1019   } else {
1020     // ...and zap just allocated object.
1021 #ifdef ASSERT
1022     // Skip mangling the space corresponding to the object header to
1023     // ensure that the returned space is not considered parsable by
1024     // any concurrent GC thread.
1025     size_t hdr_size = oopDesc::header_size();
1026     Copy::fill_to_words(gclab_buf + hdr_size, actual_size - hdr_size, badHeapWordVal);
1027 #endif // ASSERT
1028   }
1029   gclab->set_buf(gclab_buf, actual_size);
1030   return gclab->allocate(size);
1031 }
1032 
1033 // Establish a new PLAB and allocate size HeapWords within it.
1034 HeapWord* ShenandoahHeap::allocate_from_plab_slow(Thread* thread, size_t size, bool is_promotion) {
1035   // New object should fit the PLAB size
1036   size_t min_size = MAX2(size, PLAB::min_size());
1037 
1038   // Figure out size of new PLAB, looking back at heuristics. Expand aggressively.
1039   size_t cur_size = ShenandoahThreadLocalData::plab_size(thread);
1040   if (cur_size == 0) {
1041     cur_size = PLAB::min_size();
1042   }
1043   size_t future_size = cur_size * 2;
1044   // Limit growth of PLABs to ShenandoahMaxEvacLABRatio * the minimum size.  This enables more equitable distribution of
1045   // available evacuation buidget between the many threads that are coordinating in the evacuation effort.
1046   if (ShenandoahMaxEvacLABRatio > 0) {
1047     future_size = MIN2(future_size, PLAB::min_size() * ShenandoahMaxEvacLABRatio);
1048   }
1049   future_size = MIN2(future_size, PLAB::max_size());
1050   future_size = MAX2(future_size, PLAB::min_size());
1051 
1052   size_t unalignment = future_size % CardTable::card_size_in_words();
1053   if (unalignment != 0) {
1054     future_size = future_size - unalignment + CardTable::card_size_in_words();
1055   }
1056 
1057   // Record new heuristic value even if we take any shortcut. This captures
1058   // the case when moderately-sized objects always take a shortcut. At some point,
1059   // heuristics should catch up with them.  Note that the requested cur_size may
1060   // not be honored, but we remember that this is the preferred size.
1061   ShenandoahThreadLocalData::set_plab_size(thread, future_size);
1062   if (cur_size < size) {
1063     // The PLAB to be allocated is still not large enough to hold the object. Fall back to shared allocation.
1064     // This avoids retiring perfectly good PLABs in order to represent a single large object allocation.
1065     return nullptr;
1066   }
1067 
1068   // Retire current PLAB, and allocate a new one.
1069   PLAB* plab = ShenandoahThreadLocalData::plab(thread);
1070   if (plab->words_remaining() < PLAB::min_size()) {
1071     // Retire current PLAB, and allocate a new one.
1072     // CAUTION: retire_plab may register the remnant filler object with the remembered set scanner without a lock.  This
1073     // is safe iff it is assured that each PLAB is a whole-number multiple of card-mark memory size and each PLAB is
1074     // aligned with the start of a card's memory range.
1075     retire_plab(plab, thread);
1076 
1077     size_t actual_size = 0;
1078     // allocate_new_plab resets plab_evacuated and plab_promoted and disables promotions if old-gen available is
1079     // less than the remaining evacuation need.  It also adjusts plab_preallocated and expend_promoted if appropriate.
1080     HeapWord* plab_buf = allocate_new_plab(min_size, cur_size, &actual_size);
1081     if (plab_buf == nullptr) {
1082       if (min_size == PLAB::min_size()) {
1083         // Disable plab promotions for this thread because we cannot even allocate a plab of minimal size.  This allows us
1084         // to fail faster on subsequent promotion attempts.
1085         ShenandoahThreadLocalData::disable_plab_promotions(thread);
1086       }
1087       return NULL;
1088     } else {
1089       ShenandoahThreadLocalData::enable_plab_retries(thread);
1090     }
1091     assert (size <= actual_size, "allocation should fit");
1092     if (ZeroTLAB) {
1093       // ..and clear it.
1094       Copy::zero_to_words(plab_buf, actual_size);
1095     } else {
1096       // ...and zap just allocated object.
1097 #ifdef ASSERT
1098       // Skip mangling the space corresponding to the object header to
1099       // ensure that the returned space is not considered parsable by
1100       // any concurrent GC thread.
1101       size_t hdr_size = oopDesc::header_size();
1102       Copy::fill_to_words(plab_buf + hdr_size, actual_size - hdr_size, badHeapWordVal);
1103 #endif // ASSERT
1104     }
1105     plab->set_buf(plab_buf, actual_size);
1106     if (is_promotion && !ShenandoahThreadLocalData::allow_plab_promotions(thread)) {
1107       return nullptr;
1108     }
1109     return plab->allocate(size);
1110   } else {
1111     // If there's still at least min_size() words available within the current plab, don't retire it.  Let's gnaw
1112     // away on this plab as long as we can.  Meanwhile, return nullptr to force this particular allocation request
1113     // to be satisfied with a shared allocation.  By packing more promotions into the previously allocated PLAB, we
1114     // reduce the likelihood of evacuation failures, and we we reduce the need for downsizing our PLABs.
1115     return nullptr;
1116   }
1117 }
1118 
1119 // TODO: It is probably most efficient to register all objects (both promotions and evacuations) that were allocated within
1120 // this plab at the time we retire the plab.  A tight registration loop will run within both code and data caches.  This change
1121 // would allow smaller and faster in-line implementation of alloc_from_plab().  Since plabs are aligned on card-table boundaries,
1122 // this object registration loop can be performed without acquiring a lock.
1123 void ShenandoahHeap::retire_plab(PLAB* plab, Thread* thread) {
1124   // We don't enforce limits on plab_evacuated.  We let it consume all available old-gen memory in order to reduce
1125   // probability of an evacuation failure.  We do enforce limits on promotion, to make sure that excessive promotion
1126   // does not result in an old-gen evacuation failure.  Note that a failed promotion is relatively harmless.  Any
1127   // object that fails to promote in the current cycle will be eligible for promotion in a subsequent cycle.
1128 
1129   // When the plab was instantiated, its entirety was treated as if the entire buffer was going to be dedicated to
1130   // promotions.  Now that we are retiring the buffer, we adjust for the reality that the plab is not entirely promotions.
1131   //  1. Some of the plab may have been dedicated to evacuations.
1132   //  2. Some of the plab may have been abandoned due to waste (at the end of the plab).
1133   size_t not_promoted =
1134     ShenandoahThreadLocalData::get_plab_preallocated_promoted(thread) - ShenandoahThreadLocalData::get_plab_promoted(thread);
1135   ShenandoahThreadLocalData::reset_plab_promoted(thread);
1136   ShenandoahThreadLocalData::reset_plab_evacuated(thread);
1137   ShenandoahThreadLocalData::set_plab_preallocated_promoted(thread, 0);
1138   if (not_promoted > 0) {
1139     unexpend_promoted(not_promoted);
1140   }
1141   size_t waste = plab->waste();
1142   HeapWord* top = plab->top();
1143   plab->retire();
1144   if (top != nullptr && plab->waste() > waste && is_in_old(top)) {
1145     // If retiring the plab created a filler object, then we
1146     // need to register it with our card scanner so it can
1147     // safely walk the region backing the plab.
1148     log_debug(gc)("retire_plab() is registering remnant of size " SIZE_FORMAT " at " PTR_FORMAT,
1149                   plab->waste() - waste, p2i(top));
1150     card_scan()->register_object_without_lock(top);
1151   }
1152 }
1153 
1154 void ShenandoahHeap::retire_plab(PLAB* plab) {
1155   Thread* thread = Thread::current();
1156   retire_plab(plab, thread);
1157 }
1158 
1159 void ShenandoahHeap::cancel_old_gc() {
1160   shenandoah_assert_safepoint();
1161   assert(_old_generation != nullptr, "Should only have mixed collections in generation mode.");
1162   if (_old_generation->state() == ShenandoahOldGeneration::WAITING_FOR_BOOTSTRAP) {
1163     assert(!old_generation()->is_concurrent_mark_in_progress(), "Cannot be marking in IDLE");
1164     assert(!old_heuristics()->has_coalesce_and_fill_candidates(), "Cannot have coalesce and fill candidates in IDLE");
1165     assert(!old_heuristics()->unprocessed_old_collection_candidates(), "Cannot have mixed collection candidates in IDLE");
1166     assert(!young_generation()->is_bootstrap_cycle(), "Cannot have old mark queues if IDLE");
1167   } else {
1168     log_info(gc)("Terminating old gc cycle.");
1169     // Stop marking
1170     old_generation()->cancel_marking();
1171     // Stop tracking old regions
1172     old_heuristics()->abandon_collection_candidates();
1173     // Remove old generation access to young generation mark queues
1174     young_generation()->set_old_gen_task_queues(nullptr);
1175     // Transition to IDLE now.
1176     _old_generation->transition_to(ShenandoahOldGeneration::WAITING_FOR_BOOTSTRAP);
1177   }
1178 }
1179 
1180 // Make sure old-generation is large enough, but no larger than is necessary, to hold mixed evacuations
1181 // and promotions, if we anticipate either. Any deficit is provided by the young generation, subject to
1182 // xfer_limit, and any excess is transferred to the young generation.
1183 // xfer_limit is the maximum we're able to transfer from young to old.
1184 void ShenandoahHeap::adjust_generation_sizes_for_next_cycle(
1185   size_t xfer_limit, size_t young_cset_regions, size_t old_cset_regions) {
1186 
1187   // We can limit the old reserve to the size of anticipated promotions:
1188   // max_old_reserve is an upper bound on memory evacuated from old and promoted to old,
1189   // clamped by the old generation space available.
1190   //
1191   // Here's the algebra.
1192   // Let SOEP = ShenandoahOldEvacRatioPercent,
1193   //     OE = old evac,
1194   //     YE = young evac, and
1195   //     TE = total evac = OE + YE
1196   // By definition:
1197   //            SOEP/100 = OE/TE
1198   //                     = OE/(OE+YE)
1199   //  => SOEP/(100-SOEP) = OE/((OE+YE)-OE)      // componendo-dividendo: If a/b = c/d, then a/(b-a) = c/(d-c)
1200   //                     = OE/YE
1201   //  =>              OE = YE*SOEP/(100-SOEP)
1202 
1203   // We have to be careful in the event that SOEP is set to 100 by the user.
1204   assert(ShenandoahOldEvacRatioPercent <= 100, "Error");
1205   const size_t old_available = old_generation()->available();
1206   // The free set will reserve this amount of memory to hold young evacuations
1207   const size_t young_reserve = (young_generation()->max_capacity() * ShenandoahEvacReserve) / 100;
1208   const size_t max_old_reserve = (ShenandoahOldEvacRatioPercent == 100) ?
1209      old_available : MIN2((young_reserve * ShenandoahOldEvacRatioPercent) / (100 - ShenandoahOldEvacRatioPercent),
1210                           old_available);
1211 
1212   const size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
1213 
1214   // Decide how much old space we should reserve for a mixed collection
1215   size_t reserve_for_mixed = 0;
1216   const size_t mixed_candidates = old_heuristics()->unprocessed_old_collection_candidates();
1217   const bool doing_mixed = (mixed_candidates > 0);
1218   if (doing_mixed) {
1219     // We want this much memory to be unfragmented in order to reliably evacuate old.  This is conservative because we
1220     // may not evacuate the entirety of unprocessed candidates in a single mixed evacuation.
1221     size_t max_evac_need = (size_t)
1222       (old_heuristics()->unprocessed_old_collection_candidates_live_memory() * ShenandoahOldEvacWaste);
1223     assert(old_available >= old_generation()->free_unaffiliated_regions() * region_size_bytes,
1224            "Unaffiliated available must be less than total available");
1225     size_t old_fragmented_available =
1226       old_available - old_generation()->free_unaffiliated_regions() * region_size_bytes;
1227     reserve_for_mixed = max_evac_need + old_fragmented_available;
1228     if (reserve_for_mixed > max_old_reserve) {
1229       reserve_for_mixed = max_old_reserve;
1230     }
1231   }
1232 
1233   // Decide how much space we should reserve for promotions from young
1234   size_t reserve_for_promo = 0;
1235   const size_t promo_load = get_promotion_potential();
1236   const bool doing_promotions = promo_load > 0;
1237   if (doing_promotions) {
1238     // We're promoting and have a bound on the maximum amount that can be promoted
1239     const size_t available_for_promotions = max_old_reserve - reserve_for_mixed;
1240     reserve_for_promo = MIN2((size_t)(promo_load * ShenandoahPromoEvacWaste), available_for_promotions);
1241   }
1242 
1243   // This is the total old we want to ideally reserve
1244   const size_t old_reserve = reserve_for_mixed + reserve_for_promo;
1245   assert(old_reserve <= max_old_reserve, "cannot reserve more than max for old evacuations");
1246 
1247   // We now check if the old generation is running a surplus or a deficit.
1248   size_t old_region_deficit = 0;
1249   size_t old_region_surplus = 0;
1250 
1251   const size_t max_old_available = old_generation()->available() + old_cset_regions * region_size_bytes;
1252   if (max_old_available >= old_reserve) {
1253     // We are running a surplus, so the old region surplus can go to young
1254     const size_t old_surplus = max_old_available - old_reserve;
1255     old_region_surplus = old_surplus / region_size_bytes;
1256     const size_t unaffiliated_old_regions = old_generation()->free_unaffiliated_regions() + old_cset_regions;
1257     old_region_surplus = MIN2(old_region_surplus, unaffiliated_old_regions);
1258   } else {
1259     // We are running a deficit which we'd like to fill from young.
1260     // Ignore that this will directly impact young_generation()->max_capacity(),
1261     // indirectly impacting young_reserve and old_reserve.  These computations are conservative.
1262     const size_t old_need = old_reserve - max_old_available;
1263     // The old region deficit (rounded up) will come from young
1264     old_region_deficit = (old_need + region_size_bytes - 1) / region_size_bytes;
1265 
1266     // Round down the regions we can transfer from young to old. If we're running short
1267     // on young-gen memory, we restrict the xfer. Old-gen collection activities will be
1268     // curtailed if the budget is restricted.
1269     const size_t max_old_region_xfer = xfer_limit / region_size_bytes;
1270     old_region_deficit = MIN2(old_region_deficit, max_old_region_xfer);
1271   }
1272   assert(old_region_deficit == 0 || old_region_surplus == 0, "Only surplus or deficit, never both");
1273 
1274   set_old_region_surplus(old_region_surplus);
1275   set_old_region_deficit(old_region_deficit);
1276 }
1277 
1278 // Called from stubs in JIT code or interpreter
1279 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t min_size,
1280                                             size_t requested_size,
1281                                             size_t* actual_size) {
1282   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_tlab(min_size, requested_size);
1283   HeapWord* res = allocate_memory(req, false);
1284   if (res != nullptr) {
1285     *actual_size = req.actual_size();
1286   } else {
1287     *actual_size = 0;
1288   }
1289   return res;
1290 }
1291 
1292 HeapWord* ShenandoahHeap::allocate_new_gclab(size_t min_size,
1293                                              size_t word_size,
1294                                              size_t* actual_size) {
1295   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_gclab(min_size, word_size);
1296   HeapWord* res = allocate_memory(req, false);
1297   if (res != nullptr) {
1298     *actual_size = req.actual_size();
1299   } else {
1300     *actual_size = 0;
1301   }
1302   return res;
1303 }
1304 
1305 HeapWord* ShenandoahHeap::allocate_new_plab(size_t min_size,
1306                                             size_t word_size,
1307                                             size_t* actual_size) {
1308   // Align requested sizes to card sized multiples
1309   size_t words_in_card = CardTable::card_size_in_words();
1310   size_t align_mask = ~(words_in_card - 1);
1311   min_size = (min_size + words_in_card - 1) & align_mask;
1312   word_size = (word_size + words_in_card - 1) & align_mask;
1313   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_plab(min_size, word_size);
1314   // Note that allocate_memory() sets a thread-local flag to prohibit further promotions by this thread
1315   // if we are at risk of infringing on the old-gen evacuation budget.
1316   HeapWord* res = allocate_memory(req, false);
1317   if (res != nullptr) {
1318     *actual_size = req.actual_size();
1319   } else {
1320     *actual_size = 0;
1321   }
1322   return res;
1323 }
1324 
1325 // is_promotion is true iff this allocation is known for sure to hold the result of young-gen evacuation
1326 // to old-gen.  plab allocates are not known as such, since they may hold old-gen evacuations.
1327 HeapWord* ShenandoahHeap::allocate_memory(ShenandoahAllocRequest& req, bool is_promotion) {
1328   intptr_t pacer_epoch = 0;
1329   bool in_new_region = false;
1330   HeapWord* result = nullptr;
1331 
1332   if (req.is_mutator_alloc()) {
1333     if (ShenandoahPacing) {
1334       pacer()->pace_for_alloc(req.size());
1335       pacer_epoch = pacer()->epoch();
1336     }
1337 
1338     if (!ShenandoahAllocFailureALot || !should_inject_alloc_failure()) {
1339       result = allocate_memory_under_lock(req, in_new_region, is_promotion);
1340     }
1341 
1342     // Allocation failed, block until control thread reacted, then retry allocation.
1343     //
1344     // It might happen that one of the threads requesting allocation would unblock
1345     // way later after GC happened, only to fail the second allocation, because
1346     // other threads have already depleted the free storage. In this case, a better
1347     // strategy is to try again, as long as GC makes progress (or until at least
1348     // one full GC has completed).
1349     size_t original_count = shenandoah_policy()->full_gc_count();
1350     while (result == nullptr
1351         && (_progress_last_gc.is_set() || original_count == shenandoah_policy()->full_gc_count())) {
1352       control_thread()->handle_alloc_failure(req);
1353       result = allocate_memory_under_lock(req, in_new_region, is_promotion);
1354     }
1355 
1356   } else {
1357     assert(req.is_gc_alloc(), "Can only accept GC allocs here");
1358     result = allocate_memory_under_lock(req, in_new_region, is_promotion);
1359     // Do not call handle_alloc_failure() here, because we cannot block.
1360     // The allocation failure would be handled by the LRB slowpath with handle_alloc_failure_evac().
1361   }
1362 
1363   if (in_new_region) {
1364     control_thread()->notify_heap_changed();
1365     regulator_thread()->notify_heap_changed();
1366   }
1367 
1368   if (result == nullptr) {
1369     req.set_actual_size(0);
1370   }
1371 
1372   // This is called regardless of the outcome of the allocation to account
1373   // for any waste created by retiring regions with this request.
1374   increase_used(req);
1375 
1376   if (result != nullptr) {
1377     size_t requested = req.size();
1378     size_t actual = req.actual_size();
1379 
1380     assert (req.is_lab_alloc() || (requested == actual),
1381             "Only LAB allocations are elastic: %s, requested = " SIZE_FORMAT ", actual = " SIZE_FORMAT,
1382             ShenandoahAllocRequest::alloc_type_to_string(req.type()), requested, actual);
1383 
1384     if (req.is_mutator_alloc()) {


1385       // If we requested more than we were granted, give the rest back to pacer.
1386       // This only matters if we are in the same pacing epoch: do not try to unpace
1387       // over the budget for the other phase.
1388       if (ShenandoahPacing && (pacer_epoch > 0) && (requested > actual)) {
1389         pacer()->unpace_for_alloc(pacer_epoch, requested - actual);
1390       }


1391     }
1392   }
1393 
1394   return result;
1395 }
1396 
1397 HeapWord* ShenandoahHeap::allocate_memory_under_lock(ShenandoahAllocRequest& req, bool& in_new_region, bool is_promotion) {
1398   bool try_smaller_lab_size = false;
1399   size_t smaller_lab_size;
1400   {
1401     // promotion_eligible pertains only to PLAB allocations, denoting that the PLAB is allowed to allocate for promotions.
1402     bool promotion_eligible = false;
1403     bool allow_allocation = true;
1404     bool plab_alloc = false;
1405     size_t requested_bytes = req.size() * HeapWordSize;
1406     HeapWord* result = nullptr;
1407     ShenandoahHeapLocker locker(lock());
1408     Thread* thread = Thread::current();
1409 
1410     if (mode()->is_generational()) {
1411       if (req.affiliation() == YOUNG_GENERATION) {
1412         if (req.is_mutator_alloc()) {
1413           size_t young_words_available = young_generation()->available() / HeapWordSize;
1414           if (req.is_lab_alloc() && (req.min_size() < young_words_available)) {
1415             // Allow ourselves to try a smaller lab size even if requested_bytes <= young_available.  We may need a smaller
1416             // lab size because young memory has become too fragmented.
1417             try_smaller_lab_size = true;
1418             smaller_lab_size = (young_words_available < req.size())? young_words_available: req.size();
1419           } else if (req.size() > young_words_available) {
1420             // Can't allocate because even min_size() is larger than remaining young_available
1421             log_info(gc, ergo)("Unable to shrink %s alloc request of minimum size: " SIZE_FORMAT
1422                                ", young words available: " SIZE_FORMAT, req.type_string(),
1423                                HeapWordSize * (req.is_lab_alloc()? req.min_size(): req.size()), young_words_available);
1424             return nullptr;
1425           }
1426         }
1427       } else {                    // reg.affiliation() == OLD_GENERATION
1428         assert(req.type() != ShenandoahAllocRequest::_alloc_gclab, "GCLAB pertains only to young-gen memory");
1429         if (req.type() ==  ShenandoahAllocRequest::_alloc_plab) {
1430           plab_alloc = true;
1431           size_t promotion_avail = get_promoted_reserve();
1432           size_t promotion_expended = get_promoted_expended();
1433           if (promotion_expended + requested_bytes > promotion_avail) {
1434             promotion_avail = 0;
1435             if (get_old_evac_reserve() == 0) {
1436               // There are no old-gen evacuations in this pass.  There's no value in creating a plab that cannot
1437               // be used for promotions.
1438               allow_allocation = false;
1439             }
1440           } else {
1441             promotion_avail = promotion_avail - (promotion_expended + requested_bytes);
1442             promotion_eligible = true;
1443           }
1444         } else if (is_promotion) {
1445           // This is a shared alloc for promotion
1446           size_t promotion_avail = get_promoted_reserve();
1447           size_t promotion_expended = get_promoted_expended();
1448           if (promotion_expended + requested_bytes > promotion_avail) {
1449             promotion_avail = 0;
1450           } else {
1451             promotion_avail = promotion_avail - (promotion_expended + requested_bytes);
1452           }
1453           if (promotion_avail == 0) {
1454             // We need to reserve the remaining memory for evacuation.  Reject this allocation.  The object will be
1455             // evacuated to young-gen memory and promoted during a future GC pass.
1456             return nullptr;
1457           }
1458           // Else, we'll allow the allocation to proceed.  (Since we hold heap lock, the tested condition remains true.)
1459         } else {
1460           // This is a shared allocation for evacuation.  Memory has already been reserved for this purpose.
1461         }
1462       }
1463     } // This ends the is_generational() block
1464 
1465     // First try the original request.  If TLAB request size is greater than available, allocate() will attempt to downsize
1466     // request to fit within available memory.
1467     result = (allow_allocation)? _free_set->allocate(req, in_new_region): nullptr;
1468     if (result != nullptr) {
1469       if (req.is_old()) {
1470         ShenandoahThreadLocalData::reset_plab_promoted(thread);
1471         if (req.is_gc_alloc()) {
1472           bool disable_plab_promotions = false;
1473           if (req.type() ==  ShenandoahAllocRequest::_alloc_plab) {
1474             if (promotion_eligible) {
1475               size_t actual_size = req.actual_size() * HeapWordSize;
1476               // The actual size of the allocation may be larger than the requested bytes (due to alignment on card boundaries).
1477               // If this puts us over our promotion budget, we need to disable future PLAB promotions for this thread.
1478               if (get_promoted_expended() + actual_size <= get_promoted_reserve()) {
1479                 // Assume the entirety of this PLAB will be used for promotion.  This prevents promotion from overreach.
1480                 // When we retire this plab, we'll unexpend what we don't really use.
1481                 ShenandoahThreadLocalData::enable_plab_promotions(thread);
1482                 expend_promoted(actual_size);
1483                 assert(get_promoted_expended() <= get_promoted_reserve(), "Do not expend more promotion than budgeted");
1484                 ShenandoahThreadLocalData::set_plab_preallocated_promoted(thread, actual_size);
1485               } else {
1486                 disable_plab_promotions = true;
1487               }
1488             } else {
1489               disable_plab_promotions = true;
1490             }
1491             if (disable_plab_promotions) {
1492               // Disable promotions in this thread because entirety of this PLAB must be available to hold old-gen evacuations.
1493               ShenandoahThreadLocalData::disable_plab_promotions(thread);
1494               ShenandoahThreadLocalData::set_plab_preallocated_promoted(thread, 0);
1495             }
1496           } else if (is_promotion) {
1497             // Shared promotion.  Assume size is requested_bytes.
1498             expend_promoted(requested_bytes);
1499             assert(get_promoted_expended() <= get_promoted_reserve(), "Do not expend more promotion than budgeted");
1500           }
1501         }
1502 
1503         // Register the newly allocated object while we're holding the global lock since there's no synchronization
1504         // built in to the implementation of register_object().  There are potential races when multiple independent
1505         // threads are allocating objects, some of which might span the same card region.  For example, consider
1506         // a card table's memory region within which three objects are being allocated by three different threads:
1507         //
1508         // objects being "concurrently" allocated:
1509         //    [-----a------][-----b-----][--------------c------------------]
1510         //            [---- card table memory range --------------]
1511         //
1512         // Before any objects are allocated, this card's memory range holds no objects.  Note that allocation of object a
1513         //   wants to set the starts-object, first-start, and last-start attributes of the preceding card region.
1514         //   allocation of object b wants to set the starts-object, first-start, and last-start attributes of this card region.
1515         //   allocation of object c also wants to set the starts-object, first-start, and last-start attributes of this
1516         //   card region.
1517         //
1518         // The thread allocating b and the thread allocating c can "race" in various ways, resulting in confusion, such as
1519         // last-start representing object b while first-start represents object c.  This is why we need to require all
1520         // register_object() invocations to be "mutually exclusive" with respect to each card's memory range.
1521         ShenandoahHeap::heap()->card_scan()->register_object(result);
1522       }
1523     } else {
1524       // The allocation failed.  If this was a plab allocation, We've already retired it and no longer have a plab.
1525       if (req.is_old() && req.is_gc_alloc() && (req.type() == ShenandoahAllocRequest::_alloc_plab)) {
1526         // We don't need to disable PLAB promotions because there is no PLAB.  We leave promotions enabled because
1527         // this allows the surrounding infrastructure to retry alloc_plab_slow() with a smaller PLAB size.
1528         ShenandoahThreadLocalData::set_plab_preallocated_promoted(thread, 0);
1529       }
1530     }
1531     if ((result != nullptr) || !try_smaller_lab_size) {
1532       return result;
1533     }
1534     // else, fall through to try_smaller_lab_size
1535   } // This closes the block that holds the heap lock, releasing the lock.
1536 
1537   // We failed to allocate the originally requested lab size.  Let's see if we can allocate a smaller lab size.
1538   if (req.size() == smaller_lab_size) {
1539     // If we were already trying to allocate min size, no value in attempting to repeat the same.  End the recursion.
1540     return nullptr;
1541   }
1542 
1543   // We arrive here if the tlab allocation request can be resized to fit within young_available
1544   assert((req.affiliation() == YOUNG_GENERATION) && req.is_lab_alloc() && req.is_mutator_alloc() &&
1545          (smaller_lab_size < req.size()), "Only shrink allocation request size for TLAB allocations");
1546 
1547   // By convention, ShenandoahAllocationRequest is primarily read-only.  The only mutable instance data is represented by
1548   // actual_size(), which is overwritten with the size of the allocaion when the allocation request is satisfied.  We use a
1549   // recursive call here rather than introducing new methods to mutate the existing ShenandoahAllocationRequest argument.
1550   // Mutation of the existing object might result in astonishing results if calling contexts assume the content of immutable
1551   // fields remain constant.  The original TLAB allocation request was for memory that exceeded the current capacity.  We'll
1552   // attempt to allocate a smaller TLAB.  If this is successful, we'll update actual_size() of our incoming
1553   // ShenandoahAllocRequest.  If the recursive request fails, we'll simply return nullptr.
1554 
1555   // Note that we've relinquished the HeapLock and some other thread may perform additional allocation before our recursive
1556   // call reacquires the lock.  If that happens, we will need another recursive call to further reduce the size of our request
1557   // for each time another thread allocates young memory during the brief intervals that the heap lock is available to
1558   // interfering threads.  We expect this interference to be rare.  The recursion bottoms out when young_available is
1559   // smaller than req.min_size().  The inner-nested call to allocate_memory_under_lock() uses the same min_size() value
1560   // as this call, but it uses a preferred size() that is smaller than our preferred size, and is no larger than what we most
1561   // recently saw as the memory currently available within the young generation.
1562 
1563   // TODO: At the expense of code clarity, we could rewrite this recursive solution to use iteration.  We need at most one
1564   // extra instance of the ShenandoahAllocRequest, which we can re-initialize multiple times inside a loop, with one iteration
1565   // of the loop required for each time the existing solution would recurse.  An iterative solution would be more efficient
1566   // in CPU time and stack memory utilization.  The expectation is that it is very rare that we would recurse more than once
1567   // so making this change is not currently seen as a high priority.
1568 
1569   ShenandoahAllocRequest smaller_req = ShenandoahAllocRequest::for_tlab(req.min_size(), smaller_lab_size);
1570 
1571   // Note that shrinking the preferred size gets us past the gatekeeper that checks whether there's available memory to
1572   // satisfy the allocation request.  The reality is the actual TLAB size is likely to be even smaller, because it will
1573   // depend on how much memory is available within mutator regions that are not yet fully used.
1574   HeapWord* result = allocate_memory_under_lock(smaller_req, in_new_region, is_promotion);
1575   if (result != nullptr) {
1576     req.set_actual_size(smaller_req.actual_size());
1577   }
1578   return result;
1579 }
1580 
1581 HeapWord* ShenandoahHeap::mem_allocate(size_t size,
1582                                         bool*  gc_overhead_limit_was_exceeded) {
1583   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared(size);
1584   return allocate_memory(req, false);
1585 }
1586 
1587 MetaWord* ShenandoahHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
1588                                                              size_t size,
1589                                                              Metaspace::MetadataType mdtype) {
1590   MetaWord* result;
1591 
1592   // Inform metaspace OOM to GC heuristics if class unloading is possible.
1593   ShenandoahHeuristics* h = global_generation()->heuristics();
1594   if (h->can_unload_classes()) {
1595     h->record_metaspace_oom();
1596   }
1597 
1598   // Expand and retry allocation
1599   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
1600   if (result != nullptr) {
1601     return result;
1602   }
1603 
1604   // Start full GC
1605   collect(GCCause::_metadata_GC_clear_soft_refs);
1606 
1607   // Retry allocation
1608   result = loader_data->metaspace_non_null()->allocate(size, mdtype);
1609   if (result != nullptr) {
1610     return result;
1611   }
1612 
1613   // Expand and retry allocation
1614   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);

1653 
1654   void work(uint worker_id) {
1655     if (_concurrent) {
1656       ShenandoahConcurrentWorkerSession worker_session(worker_id);
1657       ShenandoahSuspendibleThreadSetJoiner stsj;
1658       ShenandoahEvacOOMScope oom_evac_scope;
1659       do_work();
1660     } else {
1661       ShenandoahParallelWorkerSession worker_session(worker_id);
1662       ShenandoahEvacOOMScope oom_evac_scope;
1663       do_work();
1664     }
1665   }
1666 
1667 private:
1668   void do_work() {
1669     ShenandoahConcurrentEvacuateRegionObjectClosure cl(_sh);
1670     ShenandoahHeapRegion* r;
1671     while ((r =_cs->claim_next()) != nullptr) {
1672       assert(r->has_live(), "Region " SIZE_FORMAT " should have been reclaimed early", r->index());
1673 
1674       _sh->marked_object_iterate(r, &cl);
1675 
1676       if (ShenandoahPacing) {
1677         _sh->pacer()->report_evac(r->used() >> LogHeapWordSize);
1678       }
1679       if (_sh->check_cancelled_gc_and_yield(_concurrent)) {
1680         break;
1681       }
1682     }
1683   }
1684 };
1685 
1686 // Unlike ShenandoahEvacuationTask, this iterates over all regions rather than just the collection set.
1687 // This is needed in order to promote humongous start regions if age() >= tenure threshold.
1688 class ShenandoahGenerationalEvacuationTask : public WorkerTask {
1689 private:
1690   ShenandoahHeap* const _sh;
1691   ShenandoahRegionIterator *_regions;
1692   bool _concurrent;
1693   uint _tenuring_threshold;
1694 
1695 public:
1696   ShenandoahGenerationalEvacuationTask(ShenandoahHeap* sh,
1697                                        ShenandoahRegionIterator* iterator,
1698                                        bool concurrent) :
1699     WorkerTask("Shenandoah Evacuation"),
1700     _sh(sh),
1701     _regions(iterator),
1702     _concurrent(concurrent),
1703     _tenuring_threshold(0)
1704   {
1705     if (_sh->mode()->is_generational()) {
1706       _tenuring_threshold = _sh->age_census()->tenuring_threshold();
1707     }
1708   }
1709 
1710   void work(uint worker_id) {
1711     if (_concurrent) {
1712       ShenandoahConcurrentWorkerSession worker_session(worker_id);
1713       ShenandoahSuspendibleThreadSetJoiner stsj;
1714       ShenandoahEvacOOMScope oom_evac_scope;
1715       do_work();
1716     } else {
1717       ShenandoahParallelWorkerSession worker_session(worker_id);
1718       ShenandoahEvacOOMScope oom_evac_scope;
1719       do_work();
1720     }
1721   }
1722 
1723 private:
1724   void do_work() {
1725     ShenandoahConcurrentEvacuateRegionObjectClosure cl(_sh);
1726     ShenandoahHeapRegion* r;
1727     ShenandoahMarkingContext* const ctx = ShenandoahHeap::heap()->marking_context();
1728     size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
1729     size_t old_garbage_threshold = (region_size_bytes * ShenandoahOldGarbageThreshold) / 100;
1730     while ((r = _regions->next()) != nullptr) {
1731       log_debug(gc)("GenerationalEvacuationTask do_work(), looking at %s region " SIZE_FORMAT ", (age: %d) [%s, %s, %s]",
1732                     r->is_old()? "old": r->is_young()? "young": "free", r->index(), r->age(),
1733                     r->is_active()? "active": "inactive",
1734                     r->is_humongous()? (r->is_humongous_start()? "humongous_start": "humongous_continuation"): "regular",
1735                     r->is_cset()? "cset": "not-cset");
1736 
1737       if (r->is_cset()) {
1738         assert(r->has_live(), "Region " SIZE_FORMAT " should have been reclaimed early", r->index());
1739         _sh->marked_object_iterate(r, &cl);
1740         if (ShenandoahPacing) {
1741           _sh->pacer()->report_evac(r->used() >> LogHeapWordSize);
1742         }
1743       } else if (r->is_young() && r->is_active() && (r->age() >= _tenuring_threshold)) {
1744         HeapWord* tams = ctx->top_at_mark_start(r);
1745         if (r->is_humongous_start()) {
1746           // We promote humongous_start regions along with their affiliated continuations during evacuation rather than
1747           // doing this work during a safepoint.  We cannot put humongous regions into the collection set because that
1748           // triggers the load-reference barrier (LRB) to copy on reference fetch.
1749           r->promote_humongous();
1750         } else if (r->is_regular() && (r->get_top_before_promote() != nullptr)) {
1751           assert(r->garbage_before_padded_for_promote() < old_garbage_threshold,
1752                  "Region " SIZE_FORMAT " has too much garbage for promotion", r->index());
1753           assert(r->get_top_before_promote() == tams,
1754                  "Region " SIZE_FORMAT " has been used for allocations before promotion", r->index());
1755           // Likewise, we cannot put promote-in-place regions into the collection set because that would also trigger
1756           // the LRB to copy on reference fetch.
1757           r->promote_in_place();
1758         }
1759         // Aged humongous continuation regions are handled with their start region.  If an aged regular region has
1760         // more garbage than ShenandoahOldGarbageTrheshold, we'll promote by evacuation.  If there is room for evacuation
1761         // in this cycle, the region will be in the collection set.  If there is not room, the region will be promoted
1762         // by evacuation in some future GC cycle.
1763 
1764         // If an aged regular region has received allocations during the current cycle, we do not promote because the
1765         // newly allocated objects do not have appropriate age; this region's age will be reset to zero at end of cycle.
1766       }
1767       // else, region is free, or OLD, or not in collection set, or humongous_continuation,
1768       // or is young humongous_start that is too young to be promoted
1769 
1770       if (_sh->check_cancelled_gc_and_yield(_concurrent)) {
1771         break;
1772       }
1773     }
1774   }
1775 };
1776 
1777 void ShenandoahHeap::evacuate_collection_set(bool concurrent) {
1778   if (ShenandoahHeap::heap()->mode()->is_generational()) {
1779     ShenandoahRegionIterator regions;
1780     ShenandoahGenerationalEvacuationTask task(this, &regions, concurrent);
1781     workers()->run_task(&task);
1782   } else {
1783     ShenandoahEvacuationTask task(this, _collection_set, concurrent);
1784     workers()->run_task(&task);
1785   }
1786 }
1787 
1788 void ShenandoahHeap::trash_cset_regions() {
1789   ShenandoahHeapLocker locker(lock());
1790 
1791   ShenandoahCollectionSet* set = collection_set();
1792   ShenandoahHeapRegion* r;
1793   set->clear_current_index();
1794   while ((r = set->next()) != nullptr) {
1795     r->make_trash();
1796   }
1797   collection_set()->clear();
1798 }
1799 
1800 void ShenandoahHeap::print_heap_regions_on(outputStream* st) const {
1801   st->print_cr("Heap Regions:");
1802   st->print_cr("Region state: EU=empty-uncommitted, EC=empty-committed, R=regular, H=humongous start, HP=pinned humongous start");
1803   st->print_cr("              HC=humongous continuation, CS=collection set, TR=trash, P=pinned, CSP=pinned collection set");
1804   st->print_cr("BTE=bottom/top/end, TAMS=top-at-mark-start");
1805   st->print_cr("UWM=update watermark, U=used");
1806   st->print_cr("T=TLAB allocs, G=GCLAB allocs");
1807   st->print_cr("S=shared allocs, L=live data");
1808   st->print_cr("CP=critical pins");
1809 
1810   for (size_t i = 0; i < num_regions(); i++) {
1811     get_region(i)->print_on(st);
1812   }
1813 }
1814 
1815 size_t ShenandoahHeap::trash_humongous_region_at(ShenandoahHeapRegion* start) {
1816   assert(start->is_humongous_start(), "reclaim regions starting with the first one");
1817 
1818   oop humongous_obj = cast_to_oop(start->bottom());
1819   size_t size = humongous_obj->size();
1820   size_t required_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize);
1821   size_t index = start->index() + required_regions - 1;
1822 
1823   assert(!start->has_live(), "liveness must be zero");
1824 
1825   for(size_t i = 0; i < required_regions; i++) {
1826     // Reclaim from tail. Otherwise, assertion fails when printing region to trace log,
1827     // as it expects that every region belongs to a humongous region starting with a humongous start region.
1828     ShenandoahHeapRegion* region = get_region(index --);
1829 
1830     assert(region->is_humongous(), "expect correct humongous start or continuation");
1831     assert(!region->is_cset(), "Humongous region should not be in collection set");
1832 
1833     region->make_trash_immediate();
1834   }
1835   return required_regions;
1836 }
1837 
1838 class ShenandoahCheckCleanGCLABClosure : public ThreadClosure {
1839 public:
1840   ShenandoahCheckCleanGCLABClosure() {}
1841   void do_thread(Thread* thread) {
1842     PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1843     assert(gclab != nullptr, "GCLAB should be initialized for %s", thread->name());
1844     assert(gclab->words_remaining() == 0, "GCLAB should not need retirement");
1845 
1846     PLAB* plab = ShenandoahThreadLocalData::plab(thread);
1847     assert(plab != nullptr, "PLAB should be initialized for %s", thread->name());
1848     assert(plab->words_remaining() == 0, "PLAB should not need retirement");
1849   }
1850 };
1851 
1852 class ShenandoahRetireGCLABClosure : public ThreadClosure {
1853 private:
1854   bool const _resize;
1855 public:
1856   ShenandoahRetireGCLABClosure(bool resize) : _resize(resize) {}
1857   void do_thread(Thread* thread) {
1858     PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1859     assert(gclab != nullptr, "GCLAB should be initialized for %s", thread->name());
1860     gclab->retire();
1861     if (_resize && ShenandoahThreadLocalData::gclab_size(thread) > 0) {
1862       ShenandoahThreadLocalData::set_gclab_size(thread, 0);
1863     }
1864 
1865     PLAB* plab = ShenandoahThreadLocalData::plab(thread);
1866     assert(plab != nullptr, "PLAB should be initialized for %s", thread->name());
1867 
1868     // There are two reasons to retire all plabs between old-gen evacuation passes.
1869     //  1. We need to make the plab memory parsable by remembered-set scanning.
1870     //  2. We need to establish a trustworthy UpdateWaterMark value within each old-gen heap region
1871     ShenandoahHeap::heap()->retire_plab(plab, thread);
1872     if (_resize && ShenandoahThreadLocalData::plab_size(thread) > 0) {
1873       ShenandoahThreadLocalData::set_plab_size(thread, 0);
1874     }
1875   }
1876 };
1877 
1878 void ShenandoahHeap::labs_make_parsable() {
1879   assert(UseTLAB, "Only call with UseTLAB");
1880 
1881   ShenandoahRetireGCLABClosure cl(false);
1882 
1883   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1884     ThreadLocalAllocBuffer& tlab = t->tlab();
1885     tlab.make_parsable();
1886     cl.do_thread(t);
1887   }
1888 
1889   workers()->threads_do(&cl);
1890 }
1891 
1892 void ShenandoahHeap::tlabs_retire(bool resize) {
1893   assert(UseTLAB, "Only call with UseTLAB");
1894   assert(!resize || ResizeTLAB, "Only call for resize when ResizeTLAB is enabled");

1914 #endif
1915 }
1916 
1917 void ShenandoahHeap::gclabs_retire(bool resize) {
1918   assert(UseTLAB, "Only call with UseTLAB");
1919   assert(!resize || ResizeTLAB, "Only call for resize when ResizeTLAB is enabled");
1920 
1921   ShenandoahRetireGCLABClosure cl(resize);
1922   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1923     cl.do_thread(t);
1924   }
1925   workers()->threads_do(&cl);
1926 
1927   if (safepoint_workers() != nullptr) {
1928     safepoint_workers()->threads_do(&cl);
1929   }
1930 }
1931 
1932 // Returns size in bytes
1933 size_t ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const {
1934   if (mode()->is_generational()) {
1935     return MIN2(ShenandoahHeapRegion::max_tlab_size_bytes(), young_generation()->available());
1936   } else {
1937     // Return the max allowed size, and let the allocation path
1938     // figure out the safe size for current allocation.
1939     return ShenandoahHeapRegion::max_tlab_size_bytes();
1940   }
1941 }
1942 
1943 size_t ShenandoahHeap::max_tlab_size() const {
1944   // Returns size in words
1945   return ShenandoahHeapRegion::max_tlab_size_words();
1946 }
1947 
1948 void ShenandoahHeap::collect(GCCause::Cause cause) {
1949   control_thread()->request_gc(cause);
1950 }
1951 
1952 void ShenandoahHeap::do_full_collection(bool clear_all_soft_refs) {
1953   //assert(false, "Shouldn't need to do full collections");
1954 }
1955 
1956 HeapWord* ShenandoahHeap::block_start(const void* addr) const {
1957   ShenandoahHeapRegion* r = heap_region_containing(addr);
1958   if (r != nullptr) {
1959     return r->block_start(addr);
1960   }
1961   return nullptr;
1962 }
1963 
1964 bool ShenandoahHeap::block_is_obj(const HeapWord* addr) const {
1965   ShenandoahHeapRegion* r = heap_region_containing(addr);
1966   return r->block_is_obj(addr);
1967 }
1968 
1969 bool ShenandoahHeap::print_location(outputStream* st, void* addr) const {
1970   return BlockLocationPrinter<ShenandoahHeap>::print_location(st, addr);
1971 }
1972 
1973 void ShenandoahHeap::prepare_for_verify() {
1974   if (SafepointSynchronize::is_at_safepoint() && UseTLAB) {
1975     labs_make_parsable();
1976   }
1977 }
1978 
1979 void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const {
1980   if (_shenandoah_policy->is_at_shutdown()) {
1981     return;
1982   }
1983 
1984   tcl->do_thread(_control_thread);
1985   tcl->do_thread(_regulator_thread);
1986   workers()->threads_do(tcl);
1987   if (_safepoint_workers != nullptr) {
1988     _safepoint_workers->threads_do(tcl);
1989   }
1990 }
1991 
1992 void ShenandoahHeap::print_tracing_info() const {
1993   LogTarget(Info, gc, stats) lt;
1994   if (lt.is_enabled()) {
1995     ResourceMark rm;
1996     LogStream ls(lt);
1997 
1998     phase_timings()->print_global_on(&ls);
1999 
2000     ls.cr();
2001     ls.cr();
2002 
2003     shenandoah_policy()->print_gc_stats(&ls);
2004 
2005     ls.cr();
2006 
2007     evac_tracker()->print_global_on(&ls);
2008 
2009     ls.cr();
2010     ls.cr();
2011   }
2012 }
2013 
2014 void ShenandoahHeap::on_cycle_start(GCCause::Cause cause, ShenandoahGeneration* generation) {
2015   set_gc_cause(cause);
2016   set_gc_generation(generation);
2017 
2018   shenandoah_policy()->record_cycle_start();
2019   generation->heuristics()->record_cycle_start();
2020 }
2021 
2022 void ShenandoahHeap::on_cycle_end(ShenandoahGeneration* generation) {
2023   generation->heuristics()->record_cycle_end();
2024   if (mode()->is_generational() && generation->is_global()) {
2025     // If we just completed a GLOBAL GC, claim credit for completion of young-gen and old-gen GC as well
2026     young_generation()->heuristics()->record_cycle_end();
2027     old_generation()->heuristics()->record_cycle_end();
2028   }
2029   set_gc_cause(GCCause::_no_gc);
2030 }
2031 
2032 void ShenandoahHeap::verify(VerifyOption vo) {
2033   if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
2034     if (ShenandoahVerify) {
2035       verifier()->verify_generic(vo);
2036     } else {
2037       // TODO: Consider allocating verification bitmaps on demand,
2038       // and turn this on unconditionally.
2039     }
2040   }
2041 }
2042 size_t ShenandoahHeap::tlab_capacity(Thread *thr) const {
2043   return _free_set->capacity();
2044 }
2045 
2046 class ObjectIterateScanRootClosure : public BasicOopIterateClosure {
2047 private:
2048   MarkBitMap* _bitmap;
2049   ShenandoahScanObjectStack* _oop_stack;
2050   ShenandoahHeap* const _heap;
2051   ShenandoahMarkingContext* const _marking_context;

2333       if (start >= max) break;
2334 
2335       for (size_t i = cur; i < end; i++) {
2336         ShenandoahHeapRegion* current = _heap->get_region(i);
2337         _blk->heap_region_do(current);
2338       }
2339     }
2340   }
2341 };
2342 
2343 void ShenandoahHeap::parallel_heap_region_iterate(ShenandoahHeapRegionClosure* blk) const {
2344   assert(blk->is_thread_safe(), "Only thread-safe closures here");
2345   if (num_regions() > ShenandoahParallelRegionStride) {
2346     ShenandoahParallelHeapRegionTask task(blk);
2347     workers()->run_task(&task);
2348   } else {
2349     heap_region_iterate(blk);
2350   }
2351 }
2352 























2353 class ShenandoahRendezvousClosure : public HandshakeClosure {
2354 public:
2355   inline ShenandoahRendezvousClosure() : HandshakeClosure("ShenandoahRendezvous") {}
2356   inline void do_thread(Thread* thread) {}
2357 };
2358 
2359 void ShenandoahHeap::rendezvous_threads() {
2360   ShenandoahRendezvousClosure cl;
2361   Handshake::execute(&cl);
2362 }
2363 
2364 void ShenandoahHeap::recycle_trash() {
2365   free_set()->recycle_trash();
2366 }
2367 



































































































2368 void ShenandoahHeap::do_class_unloading() {
2369   _unloader.unload();
2370 }
2371 
2372 void ShenandoahHeap::stw_weak_refs(bool full_gc) {
2373   // Weak refs processing
2374   ShenandoahPhaseTimings::Phase phase = full_gc ? ShenandoahPhaseTimings::full_gc_weakrefs
2375                                                 : ShenandoahPhaseTimings::degen_gc_weakrefs;
2376   ShenandoahTimingsTracker t(phase);
2377   ShenandoahGCWorkerPhase worker_phase(phase);
2378   active_generation()->ref_processor()->process_references(phase, workers(), false /* concurrent */);
2379 }
2380 
2381 void ShenandoahHeap::prepare_update_heap_references(bool concurrent) {
2382   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
2383 
2384   // Evacuation is over, no GCLABs are needed anymore. GCLABs are under URWM, so we need to
2385   // make them parsable for update code to work correctly. Plus, we can compute new sizes
2386   // for future GCLABs here.
2387   if (UseTLAB) {
2388     ShenandoahGCPhase phase(concurrent ?
2389                             ShenandoahPhaseTimings::init_update_refs_manage_gclabs :
2390                             ShenandoahPhaseTimings::degen_gc_init_update_refs_manage_gclabs);
2391     gclabs_retire(ResizeTLAB);
2392   }
2393 
2394   _update_refs_iterator.reset();
2395 }
2396 
2397 void ShenandoahHeap::set_gc_state_all_threads(char state) {
2398   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
2399     ShenandoahThreadLocalData::set_gc_state(t, state);
2400   }
2401 }
2402 
2403 void ShenandoahHeap::set_gc_state_mask(uint mask, bool value) {
2404   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should really be Shenandoah safepoint");
2405   _gc_state.set_cond(mask, value);
2406   set_gc_state_all_threads(_gc_state.raw_value());
2407 }
2408 
2409 void ShenandoahHeap::set_evacuation_reserve_quantities(bool is_valid) {
2410   _has_evacuation_reserve_quantities = is_valid;
2411 }
2412 
2413 void ShenandoahHeap::set_concurrent_young_mark_in_progress(bool in_progress) {
2414   uint mask;
2415   assert(!has_forwarded_objects(), "Young marking is not concurrent with evacuation");
2416   if (!in_progress && is_concurrent_old_mark_in_progress()) {
2417     assert(mode()->is_generational(), "Only generational GC has old marking");
2418     assert(_gc_state.is_set(MARKING), "concurrent_old_marking_in_progress implies MARKING");
2419     // If old-marking is in progress when we turn off YOUNG_MARKING, leave MARKING (and OLD_MARKING) on
2420     mask = YOUNG_MARKING;
2421   } else {
2422     mask = MARKING | YOUNG_MARKING;
2423   }
2424   set_gc_state_mask(mask, in_progress);
2425   manage_satb_barrier(in_progress);
2426 }
2427 
2428 void ShenandoahHeap::set_concurrent_old_mark_in_progress(bool in_progress) {
2429 #ifdef ASSERT
2430   // has_forwarded_objects() iff UPDATEREFS or EVACUATION
2431   bool has_forwarded = has_forwarded_objects();
2432   bool updating_or_evacuating = _gc_state.is_set(UPDATEREFS | EVACUATION);
2433   bool evacuating = _gc_state.is_set(EVACUATION);
2434   assert ((has_forwarded == updating_or_evacuating) || (evacuating && !has_forwarded && collection_set()->is_empty()),
2435           "Updating or evacuating iff has forwarded objects, or if evacuation phase is promoting in place without forwarding");
2436 #endif
2437   if (!in_progress && is_concurrent_young_mark_in_progress()) {
2438     // If young-marking is in progress when we turn off OLD_MARKING, leave MARKING (and YOUNG_MARKING) on
2439     assert(_gc_state.is_set(MARKING), "concurrent_young_marking_in_progress implies MARKING");
2440     set_gc_state_mask(OLD_MARKING, in_progress);
2441   } else {
2442     set_gc_state_mask(MARKING | OLD_MARKING, in_progress);
2443   }
2444   manage_satb_barrier(in_progress);
2445 }
2446 
2447 bool ShenandoahHeap::is_prepare_for_old_mark_in_progress() const {
2448   return old_generation()->state() == ShenandoahOldGeneration::FILLING;
2449 }
2450 
2451 void ShenandoahHeap::set_aging_cycle(bool in_progress) {
2452   _is_aging_cycle.set_cond(in_progress);
2453 }
2454 
2455 void ShenandoahHeap::manage_satb_barrier(bool active) {
2456   if (is_concurrent_mark_in_progress()) {
2457     // Ignore request to deactivate barrier while concurrent mark is in progress.
2458     // Do not attempt to re-activate the barrier if it is already active.
2459     if (active && !ShenandoahBarrierSet::satb_mark_queue_set().is_active()) {
2460       ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(active, !active);
2461     }
2462   } else {
2463     // No concurrent marking is in progress so honor request to deactivate,
2464     // but only if the barrier is already active.
2465     if (!active && ShenandoahBarrierSet::satb_mark_queue_set().is_active()) {
2466       ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(active, !active);
2467     }
2468   }
2469 }
2470 
2471 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
2472   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only call this at safepoint");
2473   set_gc_state_mask(EVACUATION, in_progress);
2474 }
2475 
2476 void ShenandoahHeap::set_concurrent_strong_root_in_progress(bool in_progress) {
2477   if (in_progress) {
2478     _concurrent_strong_root_in_progress.set();
2479   } else {
2480     _concurrent_strong_root_in_progress.unset();
2481   }
2482 }
2483 
2484 void ShenandoahHeap::set_concurrent_weak_root_in_progress(bool cond) {
2485   set_gc_state_mask(WEAK_ROOTS, cond);
2486 }
2487 
2488 GCTracer* ShenandoahHeap::tracer() {
2489   return shenandoah_policy()->tracer();
2490 }
2491 
2492 size_t ShenandoahHeap::tlab_used(Thread* thread) const {
2493   return _free_set->used();
2494 }
2495 
2496 bool ShenandoahHeap::try_cancel_gc() {
2497   jbyte prev = _cancelled_gc.cmpxchg(CANCELLED, CANCELLABLE);
2498   return prev == CANCELLABLE;
2499 }
2500 
2501 void ShenandoahHeap::cancel_concurrent_mark() {
2502   _young_generation->cancel_marking();
2503   _old_generation->cancel_marking();
2504   _global_generation->cancel_marking();
2505 
2506   ShenandoahBarrierSet::satb_mark_queue_set().abandon_partial_marking();
2507 }
2508 
2509 void ShenandoahHeap::cancel_gc(GCCause::Cause cause) {
2510   if (try_cancel_gc()) {
2511     FormatBuffer<> msg("Cancelling GC: %s", GCCause::to_string(cause));
2512     log_info(gc)("%s", msg.buffer());
2513     Events::log(Thread::current(), "%s", msg.buffer());
2514     _cancel_requested_time = os::elapsedTime();
2515   }
2516 }
2517 
2518 uint ShenandoahHeap::max_workers() {
2519   return _max_workers;
2520 }
2521 
2522 void ShenandoahHeap::stop() {
2523   // The shutdown sequence should be able to terminate when GC is running.
2524 
2525   // Step 1. Notify policy to disable event recording and prevent visiting gc threads during shutdown
2526   _shenandoah_policy->record_shutdown();
2527 
2528   // Step 2. Stop requesting collections.
2529   regulator_thread()->stop();
2530 
2531   // Step 3. Notify control thread that we are in shutdown.
2532   // Note that we cannot do that with stop(), because stop() is blocking and waits for the actual shutdown.
2533   // Doing stop() here would wait for the normal GC cycle to complete, never falling through to cancel below.
2534   control_thread()->prepare_for_graceful_shutdown();
2535 
2536   // Step 4. Notify GC workers that we are cancelling GC.
2537   cancel_gc(GCCause::_shenandoah_stop_vm);
2538 
2539   // Step 5. Wait until GC worker exits normally.
2540   control_thread()->stop();
2541 }
2542 
2543 void ShenandoahHeap::stw_unload_classes(bool full_gc) {
2544   if (!unload_classes()) return;
2545   // Unload classes and purge SystemDictionary.
2546   {
2547     ShenandoahPhaseTimings::Phase phase = full_gc ?
2548                                           ShenandoahPhaseTimings::full_gc_purge_class_unload :
2549                                           ShenandoahPhaseTimings::degen_gc_purge_class_unload;
2550     ShenandoahIsAliveSelector is_alive;
2551     CodeCache::UnloadingScope scope(is_alive.is_alive_closure());
2552     ShenandoahGCPhase gc_phase(phase);
2553     ShenandoahGCWorkerPhase worker_phase(phase);
2554     bool purged_class = SystemDictionary::do_unloading(gc_timer());
2555 
2556     uint num_workers = _workers->active_workers();
2557     ShenandoahClassUnloadingTask unlink_task(phase, num_workers, purged_class);
2558     _workers->run_task(&unlink_task);
2559   }

2613 }
2614 
2615 void ShenandoahHeap::set_has_forwarded_objects(bool cond) {
2616   set_gc_state_mask(HAS_FORWARDED, cond);
2617 }
2618 
2619 void ShenandoahHeap::set_unload_classes(bool uc) {
2620   _unload_classes.set_cond(uc);
2621 }
2622 
2623 bool ShenandoahHeap::unload_classes() const {
2624   return _unload_classes.is_set();
2625 }
2626 
2627 address ShenandoahHeap::in_cset_fast_test_addr() {
2628   ShenandoahHeap* heap = ShenandoahHeap::heap();
2629   assert(heap->collection_set() != nullptr, "Sanity");
2630   return (address) heap->collection_set()->biased_map_address();
2631 }
2632 




2633 void ShenandoahHeap::reset_bytes_allocated_since_gc_start() {
2634   if (mode()->is_generational()) {
2635     young_generation()->reset_bytes_allocated_since_gc_start();
2636     old_generation()->reset_bytes_allocated_since_gc_start();
2637   }
2638 
2639   global_generation()->reset_bytes_allocated_since_gc_start();
2640 }
2641 
2642 void ShenandoahHeap::set_degenerated_gc_in_progress(bool in_progress) {
2643   _degenerated_gc_in_progress.set_cond(in_progress);
2644 }
2645 
2646 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) {
2647   _full_gc_in_progress.set_cond(in_progress);
2648 }
2649 
2650 void ShenandoahHeap::set_full_gc_move_in_progress(bool in_progress) {
2651   assert (is_full_gc_in_progress(), "should be");
2652   _full_gc_move_in_progress.set_cond(in_progress);
2653 }
2654 
2655 void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) {
2656   set_gc_state_mask(UPDATEREFS, in_progress);
2657 }
2658 
2659 void ShenandoahHeap::register_nmethod(nmethod* nm) {

2683     if (r->is_active()) {
2684       if (r->is_pinned()) {
2685         if (r->pin_count() == 0) {
2686           r->make_unpinned();
2687         }
2688       } else {
2689         if (r->pin_count() > 0) {
2690           r->make_pinned();
2691         }
2692       }
2693     }
2694   }
2695 
2696   assert_pinned_region_status();
2697 }
2698 
2699 #ifdef ASSERT
2700 void ShenandoahHeap::assert_pinned_region_status() {
2701   for (size_t i = 0; i < num_regions(); i++) {
2702     ShenandoahHeapRegion* r = get_region(i);
2703     if (active_generation()->contains(r)) {
2704       assert((r->is_pinned() && r->pin_count() > 0) || (!r->is_pinned() && r->pin_count() == 0),
2705              "Region " SIZE_FORMAT " pinning status is inconsistent", i);
2706     }
2707   }
2708 }
2709 #endif
2710 
2711 ConcurrentGCTimer* ShenandoahHeap::gc_timer() const {
2712   return _gc_timer;
2713 }
2714 
2715 void ShenandoahHeap::prepare_concurrent_roots() {
2716   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2717   assert(!is_stw_gc_in_progress(), "Only concurrent GC");
2718   set_concurrent_strong_root_in_progress(!collection_set()->is_empty());
2719   set_concurrent_weak_root_in_progress(true);
2720   if (unload_classes()) {
2721     _unloader.prepare();
2722   }
2723 }
2724 
2725 void ShenandoahHeap::finish_concurrent_roots() {
2726   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");

2746       assert(nworkers <= ConcGCThreads, "Cannot use more than it has");
2747     } else {
2748       // Use ConcGCThreads outside safepoints
2749       assert(nworkers == ConcGCThreads, "Use ConcGCThreads outside safepoints");
2750     }
2751   }
2752 }
2753 #endif
2754 
2755 ShenandoahVerifier* ShenandoahHeap::verifier() {
2756   guarantee(ShenandoahVerify, "Should be enabled");
2757   assert (_verifier != nullptr, "sanity");
2758   return _verifier;
2759 }
2760 
2761 template<bool CONCURRENT>
2762 class ShenandoahUpdateHeapRefsTask : public WorkerTask {
2763 private:
2764   ShenandoahHeap* _heap;
2765   ShenandoahRegionIterator* _regions;
2766   ShenandoahRegionChunkIterator* _work_chunks;
2767 
2768 public:
2769   explicit ShenandoahUpdateHeapRefsTask(ShenandoahRegionIterator* regions,
2770                                         ShenandoahRegionChunkIterator* work_chunks) :
2771     WorkerTask("Shenandoah Update References"),
2772     _heap(ShenandoahHeap::heap()),
2773     _regions(regions),
2774     _work_chunks(work_chunks)
2775   {
2776     log_info(gc, remset)("Scan remembered set using bitmap: %s", BOOL_TO_STR(_heap->is_old_bitmap_stable()));
2777   }
2778 
2779   void work(uint worker_id) {
2780     if (CONCURRENT) {
2781       ShenandoahConcurrentWorkerSession worker_session(worker_id);
2782       ShenandoahSuspendibleThreadSetJoiner stsj;
2783       do_work<ShenandoahConcUpdateRefsClosure>(worker_id);
2784     } else {
2785       ShenandoahParallelWorkerSession worker_session(worker_id);
2786       do_work<ShenandoahSTWUpdateRefsClosure>(worker_id);
2787     }
2788   }
2789 
2790 private:
2791   template<class T>
2792   void do_work(uint worker_id) {
2793     T cl;
2794     if (CONCURRENT && (worker_id == 0)) {
2795       // We ask the first worker to replenish the Mutator free set by moving regions previously reserved to hold the
2796       // results of evacuation.  These reserves are no longer necessary because evacuation has completed.
2797       size_t cset_regions = _heap->collection_set()->count();
2798       // We cannot transfer any more regions than will be reclaimed when the existing collection set is recycled, because
2799       // we need the reclaimed collection set regions to replenish the collector reserves
2800       _heap->free_set()->move_collector_sets_to_mutator(cset_regions);
2801     }
2802     // If !CONCURRENT, there's no value in expanding Mutator free set
2803 
2804     ShenandoahHeapRegion* r = _regions->next();
2805     // We update references for global, old, and young collections.
2806     assert(_heap->active_generation()->is_mark_complete(), "Expected complete marking");
2807     ShenandoahMarkingContext* const ctx = _heap->marking_context();
2808     bool is_mixed = _heap->collection_set()->has_old_regions();
2809     while (r != nullptr) {
2810       HeapWord* update_watermark = r->get_update_watermark();
2811       assert (update_watermark >= r->bottom(), "sanity");
2812 
2813       log_debug(gc)("ShenandoahUpdateHeapRefsTask::do_work(%u) looking at region " SIZE_FORMAT, worker_id, r->index());
2814       bool region_progress = false;
2815       if (r->is_active() && !r->is_cset()) {
2816         if (!_heap->mode()->is_generational() || r->is_young()) {
2817           _heap->marked_object_oop_iterate(r, &cl, update_watermark);
2818           region_progress = true;
2819         } else if (r->is_old()) {
2820           if (_heap->active_generation()->is_global()) {
2821             // Note that GLOBAL collection is not as effectively balanced as young and mixed cycles.  This is because
2822             // concurrent GC threads are parceled out entire heap regions of work at a time and there
2823             // is no "catchup phase" consisting of remembered set scanning, during which parcels of work are smaller
2824             // and more easily distributed more fairly across threads.
2825 
2826             // TODO: Consider an improvement to load balance GLOBAL GC.
2827             _heap->marked_object_oop_iterate(r, &cl, update_watermark);
2828             region_progress = true;
2829           }
2830           // Otherwise, this is an old region in a young or mixed cycle.  Process it during a second phase, below.
2831           // Don't bother to report pacing progress in this case.
2832         } else {
2833           // Because updating of references runs concurrently, it is possible that a FREE inactive region transitions
2834           // to a non-free active region while this loop is executing.  Whenever this happens, the changing of a region's
2835           // active status may propagate at a different speed than the changing of the region's affiliation.
2836 
2837           // When we reach this control point, it is because a race has allowed a region's is_active() status to be seen
2838           // by this thread before the region's affiliation() is seen by this thread.
2839 
2840           // It's ok for this race to occur because the newly transformed region does not have any references to be
2841           // updated.
2842 
2843           assert(r->get_update_watermark() == r->bottom(),
2844                  "%s Region " SIZE_FORMAT " is_active but not recognized as YOUNG or OLD so must be newly transitioned from FREE",
2845                  r->affiliation_name(), r->index());
2846         }
2847       }
2848       if (region_progress && ShenandoahPacing) {
2849         _heap->pacer()->report_updaterefs(pointer_delta(update_watermark, r->bottom()));
2850       }
2851       if (_heap->check_cancelled_gc_and_yield(CONCURRENT)) {
2852         return;
2853       }
2854       r = _regions->next();
2855     }
2856 
2857     if (_heap->mode()->is_generational() && !_heap->active_generation()->is_global()) {
2858       // Since this is generational and not GLOBAL, we have to process the remembered set.  There's no remembered
2859       // set processing if not in generational mode or if GLOBAL mode.
2860 
2861       // After this thread has exhausted its traditional update-refs work, it continues with updating refs within remembered set.
2862       // The remembered set workload is better balanced between threads, so threads that are "behind" can catch up with other
2863       // threads during this phase, allowing all threads to work more effectively in parallel.
2864       struct ShenandoahRegionChunk assignment;
2865       RememberedScanner* scanner = _heap->card_scan();
2866 
2867       while (!_heap->check_cancelled_gc_and_yield(CONCURRENT) && _work_chunks->next(&assignment)) {
2868         // Keep grabbing next work chunk to process until finished, or asked to yield
2869         ShenandoahHeapRegion* r = assignment._r;
2870         if (r->is_active() && !r->is_cset() && r->is_old()) {
2871           HeapWord* start_of_range = r->bottom() + assignment._chunk_offset;
2872           HeapWord* end_of_range = r->get_update_watermark();
2873           if (end_of_range > start_of_range + assignment._chunk_size) {
2874             end_of_range = start_of_range + assignment._chunk_size;
2875           }
2876 
2877           // Old region in a young cycle or mixed cycle.
2878           if (is_mixed) {
2879             // TODO: For mixed evac, consider building an old-gen remembered set that allows restricted updating
2880             // within old-gen HeapRegions.  This remembered set can be constructed by old-gen concurrent marking
2881             // and augmented by card marking.  For example, old-gen concurrent marking can remember for each old-gen
2882             // card which other old-gen regions it refers to: none, one-other specifically, multiple-other non-specific.
2883             // Update-references when _mixed_evac processess each old-gen memory range that has a traditional DIRTY
2884             // card or if the "old-gen remembered set" indicates that this card holds pointers specifically to an
2885             // old-gen region in the most recent collection set, or if this card holds pointers to other non-specific
2886             // old-gen heap regions.
2887 
2888             if (r->is_humongous()) {
2889               if (start_of_range < end_of_range) {
2890                 // Need to examine both dirty and clean cards during mixed evac.
2891                 r->oop_iterate_humongous_slice(&cl, false, start_of_range, assignment._chunk_size, true);
2892               }
2893             } else {
2894               // Since this is mixed evacuation, old regions that are candidates for collection have not been coalesced
2895               // and filled.  Use mark bits to find objects that need to be updated.
2896               //
2897               // Future TODO: establish a second remembered set to identify which old-gen regions point to other old-gen
2898               // regions which are in the collection set for a particular mixed evacuation.
2899               if (start_of_range < end_of_range) {
2900                 HeapWord* p = nullptr;
2901                 size_t card_index = scanner->card_index_for_addr(start_of_range);
2902                 // In case last object in my range spans boundary of my chunk, I may need to scan all the way to top()
2903                 ShenandoahObjectToOopBoundedClosure<T> objs(&cl, start_of_range, r->top());
2904 
2905                 // Any object that begins in a previous range is part of a different scanning assignment.  Any object that
2906                 // starts after end_of_range is also not my responsibility.  (Either allocated during evacuation, so does
2907                 // not hold pointers to from-space, or is beyond the range of my assigned work chunk.)
2908 
2909                 // Find the first object that begins in my range, if there is one.
2910                 p = start_of_range;
2911                 oop obj = cast_to_oop(p);
2912                 HeapWord* tams = ctx->top_at_mark_start(r);
2913                 if (p >= tams) {
2914                   // We cannot use ctx->is_marked(obj) to test whether an object begins at this address.  Instead,
2915                   // we need to use the remembered set crossing map to advance p to the first object that starts
2916                   // within the enclosing card.
2917 
2918                   while (true) {
2919                     HeapWord* first_object = scanner->first_object_in_card(card_index);
2920                     if (first_object != nullptr) {
2921                       p = first_object;
2922                       break;
2923                     } else if (scanner->addr_for_card_index(card_index + 1) < end_of_range) {
2924                       card_index++;
2925                     } else {
2926                       // Force the loop that follows to immediately terminate.
2927                       p = end_of_range;
2928                       break;
2929                     }
2930                   }
2931                   obj = cast_to_oop(p);
2932                   // Note: p may be >= end_of_range
2933                 } else if (!ctx->is_marked(obj)) {
2934                   p = ctx->get_next_marked_addr(p, tams);
2935                   obj = cast_to_oop(p);
2936                   // If there are no more marked objects before tams, this returns tams.
2937                   // Note that tams is either >= end_of_range, or tams is the start of an object that is marked.
2938                 }
2939                 while (p < end_of_range) {
2940                   // p is known to point to the beginning of marked object obj
2941                   objs.do_object(obj);
2942                   HeapWord* prev_p = p;
2943                   p += obj->size();
2944                   if (p < tams) {
2945                     p = ctx->get_next_marked_addr(p, tams);
2946                     // If there are no more marked objects before tams, this returns tams.  Note that tams is
2947                     // either >= end_of_range, or tams is the start of an object that is marked.
2948                   }
2949                   assert(p != prev_p, "Lack of forward progress");
2950                   obj = cast_to_oop(p);
2951                 }
2952               }
2953             }
2954           } else {
2955             // This is a young evac..
2956             if (start_of_range < end_of_range) {
2957               size_t cluster_size =
2958                 CardTable::card_size_in_words() * ShenandoahCardCluster<ShenandoahDirectCardMarkRememberedSet>::CardsPerCluster;
2959               size_t clusters = assignment._chunk_size / cluster_size;
2960               assert(clusters * cluster_size == assignment._chunk_size, "Chunk assignment must align on cluster boundaries");
2961               scanner->process_region_slice(r, assignment._chunk_offset, clusters, end_of_range, &cl, true, worker_id);
2962             }
2963           }
2964           if (ShenandoahPacing && (start_of_range < end_of_range)) {
2965             _heap->pacer()->report_updaterefs(pointer_delta(end_of_range, start_of_range));
2966           }
2967         }
2968       }
2969     }
2970   }
2971 };
2972 
2973 void ShenandoahHeap::update_heap_references(bool concurrent) {
2974   assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
2975   uint nworkers = workers()->active_workers();
2976   ShenandoahRegionChunkIterator work_list(nworkers);
2977 
2978   if (concurrent) {
2979     ShenandoahUpdateHeapRefsTask<true> task(&_update_refs_iterator, &work_list);
2980     workers()->run_task(&task);
2981   } else {
2982     ShenandoahUpdateHeapRefsTask<false> task(&_update_refs_iterator, &work_list);
2983     workers()->run_task(&task);
2984   }
2985   if (ShenandoahEnableCardStats && card_scan()!=nullptr) { // generational check proxy
2986     card_scan()->log_card_stats(nworkers, CARD_STAT_UPDATE_REFS);
2987   }
2988 }
2989 

2990 class ShenandoahFinalUpdateRefsUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
2991 private:
2992   ShenandoahMarkingContext* _ctx;
2993   ShenandoahHeapLock* const _lock;
2994   bool _is_generational;
2995 
2996 public:
2997   ShenandoahFinalUpdateRefsUpdateRegionStateClosure(
2998     ShenandoahMarkingContext* ctx) : _ctx(ctx), _lock(ShenandoahHeap::heap()->lock()),
2999                                      _is_generational(ShenandoahHeap::heap()->mode()->is_generational()) { }
3000 
3001   void heap_region_do(ShenandoahHeapRegion* r) {
3002 
3003     // Maintenance of region age must follow evacuation in order to account for evacuation allocations within survivor
3004     // regions.  We consult region age during the subsequent evacuation to determine whether certain objects need to
3005     // be promoted.
3006     if (_is_generational && r->is_young() && r->is_active()) {
3007       HeapWord *tams = _ctx->top_at_mark_start(r);
3008       HeapWord *top = r->top();
3009 
3010       // Allocations move the watermark when top moves.  However compacting
3011       // objects will sometimes lower top beneath the watermark, after which,
3012       // attempts to read the watermark will assert out (watermark should not be
3013       // higher than top).
3014       if (top > tams) {
3015         // There have been allocations in this region since the start of the cycle.
3016         // Any objects new to this region must not assimilate elevated age.
3017         r->reset_age();
3018       } else if (ShenandoahHeap::heap()->is_aging_cycle()) {
3019         r->increment_age();
3020       }
3021     }
3022 
3023     // Drop unnecessary "pinned" state from regions that does not have CP marks
3024     // anymore, as this would allow trashing them.

3025     if (r->is_active()) {
3026       if (r->is_pinned()) {
3027         if (r->pin_count() == 0) {
3028           ShenandoahHeapLocker locker(_lock);
3029           r->make_unpinned();
3030         }
3031       } else {
3032         if (r->pin_count() > 0) {
3033           ShenandoahHeapLocker locker(_lock);
3034           r->make_pinned();
3035         }
3036       }
3037     }
3038   }
3039 
3040   bool is_thread_safe() { return true; }
3041 };
3042 
3043 void ShenandoahHeap::update_heap_region_states(bool concurrent) {
3044   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
3045   assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
3046 
3047   {
3048     ShenandoahGCPhase phase(concurrent ?
3049                             ShenandoahPhaseTimings::final_update_refs_update_region_states :
3050                             ShenandoahPhaseTimings::degen_gc_final_update_refs_update_region_states);
3051     ShenandoahFinalUpdateRefsUpdateRegionStateClosure cl (active_generation()->complete_marking_context());
3052     parallel_heap_region_iterate(&cl);
3053 
3054     assert_pinned_region_status();
3055   }
3056 
3057   {
3058     ShenandoahGCPhase phase(concurrent ?
3059                             ShenandoahPhaseTimings::final_update_refs_trash_cset :
3060                             ShenandoahPhaseTimings::degen_gc_final_update_refs_trash_cset);
3061     trash_cset_regions();
3062   }
3063 }
3064 
3065 void ShenandoahHeap::rebuild_free_set(bool concurrent) {
3066   ShenandoahGCPhase phase(concurrent ?
3067                           ShenandoahPhaseTimings::final_update_refs_rebuild_freeset :
3068                           ShenandoahPhaseTimings::degen_gc_final_update_refs_rebuild_freeset);
3069   size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
3070   ShenandoahHeapLocker locker(lock());
3071   size_t young_cset_regions, old_cset_regions;
3072   size_t first_old_region, last_old_region, old_region_count;
3073   _free_set->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old_region, last_old_region, old_region_count);
3074   // If there are no old regions, first_old_region will be greater than last_old_region
3075   assert((first_old_region > last_old_region) ||
3076          ((last_old_region + 1 - first_old_region >= old_region_count) &&
3077           get_region(first_old_region)->is_old() && get_region(last_old_region)->is_old()),
3078          "sanity: old_region_count: " SIZE_FORMAT ", first_old_region: " SIZE_FORMAT ", last_old_region: " SIZE_FORMAT,
3079          old_region_count, first_old_region, last_old_region);
3080 
3081   if (mode()->is_generational()) {
3082     assert(verify_generation_usage(true, old_generation()->used_regions(),
3083                                    old_generation()->used(), old_generation()->get_humongous_waste(),
3084                                    true, young_generation()->used_regions(),
3085                                    young_generation()->used(), young_generation()->get_humongous_waste()),
3086            "Generation accounts are inaccurate");
3087 
3088     // The computation of bytes_of_allocation_runway_before_gc_trigger is quite conservative so consider all of this
3089     // available for transfer to old. Note that transfer of humongous regions does not impact available.
3090     size_t allocation_runway = young_heuristics()->bytes_of_allocation_runway_before_gc_trigger(young_cset_regions);
3091     adjust_generation_sizes_for_next_cycle(allocation_runway, young_cset_regions, old_cset_regions);
3092 
3093     // Total old_available may have been expanded to hold anticipated promotions.  We trigger if the fragmented available
3094     // memory represents more than 16 regions worth of data.  Note that fragmentation may increase when we promote regular
3095     // regions in place when many of these regular regions have an abundant amount of available memory within them.  Fragmentation
3096     // will decrease as promote-by-copy consumes the available memory within these partially consumed regions.
3097     //
3098     // We consider old-gen to have excessive fragmentation if more than 12.5% of old-gen is free memory that resides
3099     // within partially consumed regions of memory.
3100   }
3101   // Rebuild free set based on adjusted generation sizes.
3102   _free_set->rebuild(young_cset_regions, old_cset_regions);
3103 
3104   if (mode()->is_generational() && (ShenandoahGenerationalHumongousReserve > 0)) {
3105     size_t old_region_span = (first_old_region <= last_old_region)? (last_old_region + 1 - first_old_region): 0;
3106     size_t allowed_old_gen_span = num_regions() - (ShenandoahGenerationalHumongousReserve * num_regions() / 100);
3107 
3108     // Tolerate lower density if total span is small.  Here's the implementation:
3109     //   if old_gen spans more than 100% and density < 75%, trigger old-defrag
3110     //   else if old_gen spans more than 87.5% and density < 62.5%, trigger old-defrag
3111     //   else if old_gen spans more than 75% and density < 50%, trigger old-defrag
3112     //   else if old_gen spans more than 62.5% and density < 37.5%, trigger old-defrag
3113     //   else if old_gen spans more than 50% and density < 25%, trigger old-defrag
3114     //
3115     // A previous implementation was more aggressive in triggering, resulting in degraded throughput when
3116     // humongous allocation was not required.
3117 
3118     ShenandoahGeneration* old_gen = old_generation();
3119     size_t old_available = old_gen->available();
3120     size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
3121     size_t old_unaffiliated_available = old_gen->free_unaffiliated_regions() * region_size_bytes;
3122     assert(old_available >= old_unaffiliated_available, "sanity");
3123     size_t old_fragmented_available = old_available - old_unaffiliated_available;
3124 
3125     size_t old_bytes_consumed = old_region_count * region_size_bytes - old_fragmented_available;
3126     size_t old_bytes_spanned = old_region_span * region_size_bytes;
3127     double old_density = ((double) old_bytes_consumed) / old_bytes_spanned;
3128 
3129     uint eighths = 8;
3130     for (uint i = 0; i < 5; i++) {
3131       size_t span_threshold = eighths * allowed_old_gen_span / 8;
3132       double density_threshold = (eighths - 2) / 8.0;
3133       if ((old_region_span >= span_threshold) && (old_density < density_threshold)) {
3134         old_heuristics()->trigger_old_is_fragmented(old_density, first_old_region, last_old_region);
3135         break;
3136       }
3137       eighths--;
3138     }
3139 
3140     size_t old_used = old_generation()->used() + old_generation()->get_humongous_waste();
3141     size_t trigger_threshold = old_generation()->usage_trigger_threshold();
3142     // Detects unsigned arithmetic underflow
3143     assert(old_used <= capacity(),
3144            "Old used (" SIZE_FORMAT ", " SIZE_FORMAT") must not be more than heap capacity (" SIZE_FORMAT ")",
3145            old_generation()->used(), old_generation()->get_humongous_waste(), capacity());
3146 
3147     if (old_used > trigger_threshold) {
3148       old_heuristics()->trigger_old_has_grown();
3149     }
3150   }
3151 }
3152 
3153 void ShenandoahHeap::print_extended_on(outputStream *st) const {
3154   print_on(st);
3155   st->cr();
3156   print_heap_regions_on(st);
3157 }
3158 
3159 bool ShenandoahHeap::is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self) {
3160   size_t slice = r->index() / _bitmap_regions_per_slice;
3161 
3162   size_t regions_from = _bitmap_regions_per_slice * slice;
3163   size_t regions_to   = MIN2(num_regions(), _bitmap_regions_per_slice * (slice + 1));
3164   for (size_t g = regions_from; g < regions_to; g++) {
3165     assert (g / _bitmap_regions_per_slice == slice, "same slice");
3166     if (skip_self && g == r->index()) continue;
3167     if (get_region(g)->is_committed()) {
3168       return true;
3169     }

3240   EventMark em("%s", msg);
3241 
3242   op_uncommit(shrink_before, shrink_until);
3243 }
3244 
3245 void ShenandoahHeap::try_inject_alloc_failure() {
3246   if (ShenandoahAllocFailureALot && !cancelled_gc() && ((os::random() % 1000) > 950)) {
3247     _inject_alloc_failure.set();
3248     os::naked_short_sleep(1);
3249     if (cancelled_gc()) {
3250       log_info(gc)("Allocation failure was successfully injected");
3251     }
3252   }
3253 }
3254 
3255 bool ShenandoahHeap::should_inject_alloc_failure() {
3256   return _inject_alloc_failure.is_set() && _inject_alloc_failure.try_unset();
3257 }
3258 
3259 void ShenandoahHeap::initialize_serviceability() {
3260   if (mode()->is_generational()) {
3261     _young_gen_memory_pool = new ShenandoahYoungGenMemoryPool(this);
3262     _old_gen_memory_pool = new ShenandoahOldGenMemoryPool(this);
3263     _cycle_memory_manager.add_pool(_young_gen_memory_pool);
3264     _cycle_memory_manager.add_pool(_old_gen_memory_pool);
3265     _stw_memory_manager.add_pool(_young_gen_memory_pool);
3266     _stw_memory_manager.add_pool(_old_gen_memory_pool);
3267   } else {
3268     _memory_pool = new ShenandoahMemoryPool(this);
3269     _cycle_memory_manager.add_pool(_memory_pool);
3270     _stw_memory_manager.add_pool(_memory_pool);
3271   }
3272 }
3273 
3274 GrowableArray<GCMemoryManager*> ShenandoahHeap::memory_managers() {
3275   GrowableArray<GCMemoryManager*> memory_managers(2);
3276   memory_managers.append(&_cycle_memory_manager);
3277   memory_managers.append(&_stw_memory_manager);
3278   return memory_managers;
3279 }
3280 
3281 GrowableArray<MemoryPool*> ShenandoahHeap::memory_pools() {
3282   GrowableArray<MemoryPool*> memory_pools(1);
3283   if (mode()->is_generational()) {
3284     memory_pools.append(_young_gen_memory_pool);
3285     memory_pools.append(_old_gen_memory_pool);
3286   } else {
3287     memory_pools.append(_memory_pool);
3288   }
3289   return memory_pools;
3290 }
3291 
3292 MemoryUsage ShenandoahHeap::memory_usage() {
3293   return MemoryUsage(_initial_size, used(), committed(), max_capacity());
3294 }
3295 
3296 ShenandoahRegionIterator::ShenandoahRegionIterator() :
3297   _heap(ShenandoahHeap::heap()),
3298   _index(0) {}
3299 
3300 ShenandoahRegionIterator::ShenandoahRegionIterator(ShenandoahHeap* heap) :
3301   _heap(heap),
3302   _index(0) {}
3303 
3304 void ShenandoahRegionIterator::reset() {
3305   _index = 0;
3306 }
3307 
3308 bool ShenandoahRegionIterator::has_next() const {
3309   return _index < _heap->num_regions();
3310 }
3311 
3312 char ShenandoahHeap::gc_state() const {
3313   return _gc_state.raw_value();
3314 }
3315 
3316 ShenandoahLiveData* ShenandoahHeap::get_liveness_cache(uint worker_id) {
3317 #ifdef ASSERT
3318   assert(_liveness_cache != nullptr, "sanity");
3319   assert(worker_id < _max_workers, "sanity");
3320   for (uint i = 0; i < num_regions(); i++) {
3321     assert(_liveness_cache[worker_id][i] == 0, "liveness cache should be empty");
3322   }
3323 #endif
3324   return _liveness_cache[worker_id];
3325 }
3326 
3327 void ShenandoahHeap::flush_liveness_cache(uint worker_id) {
3328   assert(worker_id < _max_workers, "sanity");
3329   assert(_liveness_cache != nullptr, "sanity");
3330   ShenandoahLiveData* ld = _liveness_cache[worker_id];
3331 
3332   for (uint i = 0; i < num_regions(); i++) {
3333     ShenandoahLiveData live = ld[i];
3334     if (live > 0) {
3335       ShenandoahHeapRegion* r = get_region(i);
3336       r->increase_live_data_gc_words(live);
3337       ld[i] = 0;
3338     }
3339   }
3340 }
3341 
3342 bool ShenandoahHeap::requires_barriers(stackChunkOop obj) const {
3343   if (is_idle()) return false;
3344 
3345   // Objects allocated after marking start are implicitly alive, don't need any barriers during
3346   // marking phase.
3347   if (is_concurrent_mark_in_progress() &&
3348      !marking_context()->allocated_after_mark_start(obj)) {
3349     return true;
3350   }
3351 
3352   // Can not guarantee obj is deeply good.
3353   if (has_forwarded_objects()) {
3354     return true;
3355   }
3356 
3357   return false;
3358 }
3359 
3360 void ShenandoahHeap::transfer_old_pointers_from_satb() {
3361   _old_generation->transfer_pointers_from_satb();
3362 }
3363 
3364 template<>
3365 void ShenandoahGenerationRegionClosure<YOUNG>::heap_region_do(ShenandoahHeapRegion* region) {
3366   // Visit young regions
3367   if (region->is_young()) {
3368     _cl->heap_region_do(region);
3369   }
3370 }
3371 
3372 template<>
3373 void ShenandoahGenerationRegionClosure<OLD>::heap_region_do(ShenandoahHeapRegion* region) {
3374   // Visit old regions
3375   if (region->is_old()) {
3376     _cl->heap_region_do(region);
3377   }
3378 }
3379 
3380 template<>
3381 void ShenandoahGenerationRegionClosure<GLOBAL_GEN>::heap_region_do(ShenandoahHeapRegion* region) {
3382   _cl->heap_region_do(region);
3383 }
3384 
3385 template<>
3386 void ShenandoahGenerationRegionClosure<GLOBAL_NON_GEN>::heap_region_do(ShenandoahHeapRegion* region) {
3387   _cl->heap_region_do(region);
3388 }
3389 
3390 bool ShenandoahHeap::verify_generation_usage(bool verify_old, size_t old_regions, size_t old_bytes, size_t old_waste,
3391                                              bool verify_young, size_t young_regions, size_t young_bytes, size_t young_waste) {
3392   size_t tally_old_regions = 0;
3393   size_t tally_old_bytes = 0;
3394   size_t tally_old_waste = 0;
3395   size_t tally_young_regions = 0;
3396   size_t tally_young_bytes = 0;
3397   size_t tally_young_waste = 0;
3398 
3399   shenandoah_assert_heaplocked_or_safepoint();
3400   for (size_t i = 0; i < num_regions(); i++) {
3401     ShenandoahHeapRegion* r = get_region(i);
3402     if (r->is_old()) {
3403       tally_old_regions++;
3404       tally_old_bytes += r->used();
3405       if (r->is_humongous()) {
3406         ShenandoahHeapRegion* start = r->humongous_start_region();
3407         HeapWord* obj_addr = start->bottom();
3408         oop obj = cast_to_oop(obj_addr);
3409         size_t word_size = obj->size();
3410         HeapWord* end_addr = obj_addr + word_size;
3411         if (end_addr <= r->end()) {
3412           tally_old_waste += (r->end() - end_addr) * HeapWordSize;
3413         }
3414       }
3415     } else if (r->is_young()) {
3416       tally_young_regions++;
3417       tally_young_bytes += r->used();
3418       if (r->is_humongous()) {
3419         ShenandoahHeapRegion* start = r->humongous_start_region();
3420         HeapWord* obj_addr = start->bottom();
3421         oop obj = cast_to_oop(obj_addr);
3422         size_t word_size = obj->size();
3423         HeapWord* end_addr = obj_addr + word_size;
3424         if (end_addr <= r->end()) {
3425           tally_young_waste += (r->end() - end_addr) * HeapWordSize;
3426         }
3427       }
3428     }
3429   }
3430   if (verify_young &&
3431       ((young_regions != tally_young_regions) || (young_bytes != tally_young_bytes) || (young_waste != tally_young_waste))) {
3432     return false;
3433   } else if (verify_old &&
3434              ((old_regions != tally_old_regions) || (old_bytes != tally_old_bytes) || (old_waste != tally_old_waste))) {
3435     return false;
3436   } else {
3437     return true;
3438   }
3439 }
3440 
3441 ShenandoahGeneration* ShenandoahHeap::generation_for(ShenandoahAffiliation affiliation) const {
3442   if (!mode()->is_generational()) {
3443     return global_generation();
3444   } else if (affiliation == YOUNG_GENERATION) {
3445     return young_generation();
3446   } else if (affiliation == OLD_GENERATION) {
3447     return old_generation();
3448   }
3449 
3450   ShouldNotReachHere();
3451   return nullptr;
3452 }
3453 
3454 void ShenandoahHeap::log_heap_status(const char* msg) const {
3455   if (mode()->is_generational()) {
3456     young_generation()->log_status(msg);
3457     old_generation()->log_status(msg);
3458   } else {
3459     global_generation()->log_status(msg);
3460   }
3461 }
< prev index next >