< prev index next >

src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp

Print this page

   1 /*
   2  * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2013, 2022, Red Hat, Inc. All rights reserved.

   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "memory/allocation.hpp"
  28 #include "memory/universe.hpp"
  29 
  30 #include "gc/shared/classUnloadingContext.hpp"
  31 #include "gc/shared/gcArguments.hpp"
  32 #include "gc/shared/gcTimer.hpp"
  33 #include "gc/shared/gcTraceTime.inline.hpp"
  34 #include "gc/shared/locationPrinter.inline.hpp"
  35 #include "gc/shared/memAllocator.hpp"
  36 #include "gc/shared/plab.hpp"
  37 #include "gc/shared/tlab_globals.hpp"
  38 



  39 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  40 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
  41 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  42 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  43 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
  44 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  45 #include "gc/shenandoah/shenandoahControlThread.hpp"
  46 #include "gc/shenandoah/shenandoahFreeSet.hpp"



  47 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  48 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  49 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
  50 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  51 #include "gc/shenandoah/shenandoahInitLogger.hpp"
  52 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  53 #include "gc/shenandoah/shenandoahMemoryPool.hpp"
  54 #include "gc/shenandoah/shenandoahMetrics.hpp"
  55 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"

  56 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  57 #include "gc/shenandoah/shenandoahPacer.inline.hpp"
  58 #include "gc/shenandoah/shenandoahPadding.hpp"
  59 #include "gc/shenandoah/shenandoahParallelCleaning.inline.hpp"
  60 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
  61 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"

  62 #include "gc/shenandoah/shenandoahSTWMark.hpp"
  63 #include "gc/shenandoah/shenandoahUtils.hpp"
  64 #include "gc/shenandoah/shenandoahVerifier.hpp"
  65 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
  66 #include "gc/shenandoah/shenandoahVMOperations.hpp"
  67 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
  68 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"


  69 #include "gc/shenandoah/mode/shenandoahIUMode.hpp"
  70 #include "gc/shenandoah/mode/shenandoahPassiveMode.hpp"
  71 #include "gc/shenandoah/mode/shenandoahSATBMode.hpp"


  72 #if INCLUDE_JFR
  73 #include "gc/shenandoah/shenandoahJfrSupport.hpp"
  74 #endif
  75 
  76 #include "classfile/systemDictionary.hpp"
  77 #include "code/codeCache.hpp"
  78 #include "memory/classLoaderMetaspace.hpp"
  79 #include "memory/metaspaceUtils.hpp"
  80 #include "oops/compressedOops.inline.hpp"
  81 #include "prims/jvmtiTagMap.hpp"
  82 #include "runtime/atomic.hpp"
  83 #include "runtime/globals.hpp"
  84 #include "runtime/interfaceSupport.inline.hpp"
  85 #include "runtime/java.hpp"
  86 #include "runtime/orderAccess.hpp"
  87 #include "runtime/safepointMechanism.hpp"
  88 #include "runtime/vmThread.hpp"
  89 #include "services/mallocTracker.hpp"
  90 #include "services/memTracker.hpp"
  91 #include "utilities/events.hpp"

 143 jint ShenandoahHeap::initialize() {
 144   //
 145   // Figure out heap sizing
 146   //
 147 
 148   size_t init_byte_size = InitialHeapSize;
 149   size_t min_byte_size  = MinHeapSize;
 150   size_t max_byte_size  = MaxHeapSize;
 151   size_t heap_alignment = HeapAlignment;
 152 
 153   size_t reg_size_bytes = ShenandoahHeapRegion::region_size_bytes();
 154 
 155   Universe::check_alignment(max_byte_size,  reg_size_bytes, "Shenandoah heap");
 156   Universe::check_alignment(init_byte_size, reg_size_bytes, "Shenandoah heap");
 157 
 158   _num_regions = ShenandoahHeapRegion::region_count();
 159   assert(_num_regions == (max_byte_size / reg_size_bytes),
 160          "Regions should cover entire heap exactly: " SIZE_FORMAT " != " SIZE_FORMAT "/" SIZE_FORMAT,
 161          _num_regions, max_byte_size, reg_size_bytes);
 162 
 163   // Now we know the number of regions, initialize the heuristics.
 164   initialize_heuristics();
 165 
 166   size_t num_committed_regions = init_byte_size / reg_size_bytes;
 167   num_committed_regions = MIN2(num_committed_regions, _num_regions);
 168   assert(num_committed_regions <= _num_regions, "sanity");
 169   _initial_size = num_committed_regions * reg_size_bytes;
 170 
 171   size_t num_min_regions = min_byte_size / reg_size_bytes;
 172   num_min_regions = MIN2(num_min_regions, _num_regions);
 173   assert(num_min_regions <= _num_regions, "sanity");
 174   _minimum_size = num_min_regions * reg_size_bytes;
 175 
 176   // Default to max heap size.
 177   _soft_max_size = _num_regions * reg_size_bytes;
 178 
 179   _committed = _initial_size;
 180 
 181   size_t heap_page_size   = UseLargePages ? os::large_page_size() : os::vm_page_size();
 182   size_t bitmap_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
 183   size_t region_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
 184 
 185   //
 186   // Reserve and commit memory for heap
 187   //
 188 
 189   ReservedHeapSpace heap_rs = Universe::reserve_heap(max_byte_size, heap_alignment);
 190   initialize_reserved_region(heap_rs);
 191   _heap_region = MemRegion((HeapWord*)heap_rs.base(), heap_rs.size() / HeapWordSize);
 192   _heap_region_special = heap_rs.special();
 193 
 194   assert((((size_t) base()) & ShenandoahHeapRegion::region_size_bytes_mask()) == 0,
 195          "Misaligned heap: " PTR_FORMAT, p2i(base()));



 196 
 197 #if SHENANDOAH_OPTIMIZED_MARKTASK
 198   // The optimized ShenandoahMarkTask takes some bits away from the full object bits.
 199   // Fail if we ever attempt to address more than we can.
 200   if ((uintptr_t)heap_rs.end() >= ShenandoahMarkTask::max_addressable()) {
 201     FormatBuffer<512> buf("Shenandoah reserved [" PTR_FORMAT ", " PTR_FORMAT") for the heap, \n"
 202                           "but max object address is " PTR_FORMAT ". Try to reduce heap size, or try other \n"
 203                           "VM options that allocate heap at lower addresses (HeapBaseMinAddress, AllocateHeapAt, etc).",
 204                 p2i(heap_rs.base()), p2i(heap_rs.end()), ShenandoahMarkTask::max_addressable());
 205     vm_exit_during_initialization("Fatal Error", buf);
 206   }
 207 #endif
 208 
 209   ReservedSpace sh_rs = heap_rs.first_part(max_byte_size);
 210   if (!_heap_region_special) {
 211     os::commit_memory_or_exit(sh_rs.base(), _initial_size, heap_alignment, false,
 212                               "Cannot commit heap memory");
 213   }
 214 






















 215   //
 216   // Reserve and commit memory for bitmap(s)
 217   //
 218 
 219   _bitmap_size = ShenandoahMarkBitMap::compute_size(heap_rs.size());
 220   _bitmap_size = align_up(_bitmap_size, bitmap_page_size);
 221 
 222   size_t bitmap_bytes_per_region = reg_size_bytes / ShenandoahMarkBitMap::heap_map_factor();
 223 
 224   guarantee(bitmap_bytes_per_region != 0,
 225             "Bitmap bytes per region should not be zero");
 226   guarantee(is_power_of_2(bitmap_bytes_per_region),
 227             "Bitmap bytes per region should be power of two: " SIZE_FORMAT, bitmap_bytes_per_region);
 228 
 229   if (bitmap_page_size > bitmap_bytes_per_region) {
 230     _bitmap_regions_per_slice = bitmap_page_size / bitmap_bytes_per_region;
 231     _bitmap_bytes_per_slice = bitmap_page_size;
 232   } else {
 233     _bitmap_regions_per_slice = 1;
 234     _bitmap_bytes_per_slice = bitmap_bytes_per_region;
 235   }
 236 
 237   guarantee(_bitmap_regions_per_slice >= 1,
 238             "Should have at least one region per slice: " SIZE_FORMAT,
 239             _bitmap_regions_per_slice);
 240 
 241   guarantee(((_bitmap_bytes_per_slice) % bitmap_page_size) == 0,
 242             "Bitmap slices should be page-granular: bps = " SIZE_FORMAT ", page size = " SIZE_FORMAT,
 243             _bitmap_bytes_per_slice, bitmap_page_size);
 244 
 245   ReservedSpace bitmap(_bitmap_size, bitmap_page_size);




 246   MemTracker::record_virtual_memory_type(bitmap.base(), mtGC);
 247   _bitmap_region = MemRegion((HeapWord*) bitmap.base(), bitmap.size() / HeapWordSize);
 248   _bitmap_region_special = bitmap.special();
 249 
 250   size_t bitmap_init_commit = _bitmap_bytes_per_slice *
 251                               align_up(num_committed_regions, _bitmap_regions_per_slice) / _bitmap_regions_per_slice;
 252   bitmap_init_commit = MIN2(_bitmap_size, bitmap_init_commit);
 253   if (!_bitmap_region_special) {
 254     os::commit_memory_or_exit((char *) _bitmap_region.start(), bitmap_init_commit, bitmap_page_size, false,
 255                               "Cannot commit bitmap memory");
 256   }
 257 
 258   _marking_context = new ShenandoahMarkingContext(_heap_region, _bitmap_region, _num_regions, _max_workers);
 259 
 260   if (ShenandoahVerify) {
 261     ReservedSpace verify_bitmap(_bitmap_size, bitmap_page_size);




 262     if (!verify_bitmap.special()) {
 263       os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), bitmap_page_size, false,
 264                                 "Cannot commit verification bitmap memory");
 265     }
 266     MemTracker::record_virtual_memory_type(verify_bitmap.base(), mtGC);
 267     MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize);
 268     _verification_bit_map.initialize(_heap_region, verify_bitmap_region);
 269     _verifier = new ShenandoahVerifier(this, &_verification_bit_map);
 270   }
 271 
 272   // Reserve aux bitmap for use in object_iterate(). We don't commit it here.
 273   ReservedSpace aux_bitmap(_bitmap_size, bitmap_page_size);





 274   MemTracker::record_virtual_memory_type(aux_bitmap.base(), mtGC);
 275   _aux_bitmap_region = MemRegion((HeapWord*) aux_bitmap.base(), aux_bitmap.size() / HeapWordSize);
 276   _aux_bitmap_region_special = aux_bitmap.special();
 277   _aux_bit_map.initialize(_heap_region, _aux_bitmap_region);
 278 
 279   //
 280   // Create regions and region sets
 281   //
 282   size_t region_align = align_up(sizeof(ShenandoahHeapRegion), SHENANDOAH_CACHE_LINE_SIZE);
 283   size_t region_storage_size = align_up(region_align * _num_regions, region_page_size);
 284   region_storage_size = align_up(region_storage_size, os::vm_allocation_granularity());

 285 
 286   ReservedSpace region_storage(region_storage_size, region_page_size);



 287   MemTracker::record_virtual_memory_type(region_storage.base(), mtGC);
 288   if (!region_storage.special()) {
 289     os::commit_memory_or_exit(region_storage.base(), region_storage_size, region_page_size, false,
 290                               "Cannot commit region memory");
 291   }
 292 
 293   // Try to fit the collection set bitmap at lower addresses. This optimizes code generation for cset checks.
 294   // Go up until a sensible limit (subject to encoding constraints) and try to reserve the space there.
 295   // If not successful, bite a bullet and allocate at whatever address.
 296   {
 297     size_t cset_align = MAX2<size_t>(os::vm_page_size(), os::vm_allocation_granularity());
 298     size_t cset_size = align_up(((size_t) sh_rs.base() + sh_rs.size()) >> ShenandoahHeapRegion::region_size_bytes_shift(), cset_align);

 299 
 300     uintptr_t min = round_up_power_of_2(cset_align);
 301     uintptr_t max = (1u << 30u);

 302 
 303     for (uintptr_t addr = min; addr <= max; addr <<= 1u) {
 304       char* req_addr = (char*)addr;
 305       assert(is_aligned(req_addr, cset_align), "Should be aligned");
 306       ReservedSpace cset_rs(cset_size, cset_align, os::vm_page_size(), req_addr);
 307       if (cset_rs.is_reserved()) {
 308         assert(cset_rs.base() == req_addr, "Allocated where requested: " PTR_FORMAT ", " PTR_FORMAT, p2i(cset_rs.base()), addr);
 309         _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
 310         break;
 311       }
 312     }
 313 
 314     if (_collection_set == nullptr) {
 315       ReservedSpace cset_rs(cset_size, cset_align, os::vm_page_size());
 316       _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
 317     }




 318   }
 319 
 320   _regions = NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, _num_regions, mtGC);

 321   _free_set = new ShenandoahFreeSet(this, _num_regions);
 322 
 323   {
 324     ShenandoahHeapLocker locker(lock());
 325 
 326     for (size_t i = 0; i < _num_regions; i++) {
 327       HeapWord* start = (HeapWord*)sh_rs.base() + ShenandoahHeapRegion::region_size_words() * i;
 328       bool is_committed = i < num_committed_regions;
 329       void* loc = region_storage.base() + i * region_align;
 330 
 331       ShenandoahHeapRegion* r = new (loc) ShenandoahHeapRegion(start, i, is_committed);
 332       assert(is_aligned(r, SHENANDOAH_CACHE_LINE_SIZE), "Sanity");
 333 
 334       _marking_context->initialize_top_at_mark_start(r);
 335       _regions[i] = r;
 336       assert(!collection_set()->is_in(i), "New region should not be in collection set");


 337     }
 338 
 339     // Initialize to complete
 340     _marking_context->mark_complete();

 341 
 342     _free_set->rebuild();



 343   }
 344 
 345   if (AlwaysPreTouch) {
 346     // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads,
 347     // before initialize() below zeroes it with initializing thread. For any given region,
 348     // we touch the region and the corresponding bitmaps from the same thread.
 349     ShenandoahPushWorkerScope scope(workers(), _max_workers, false);
 350 
 351     _pretouch_heap_page_size = heap_page_size;
 352     _pretouch_bitmap_page_size = bitmap_page_size;
 353 
 354 #ifdef LINUX
 355     // UseTransparentHugePages would madvise that backing memory can be coalesced into huge
 356     // pages. But, the kernel needs to know that every small page is used, in order to coalesce
 357     // them into huge one. Therefore, we need to pretouch with smaller pages.
 358     if (UseTransparentHugePages) {
 359       _pretouch_heap_page_size = (size_t)os::vm_page_size();
 360       _pretouch_bitmap_page_size = (size_t)os::vm_page_size();
 361     }
 362 #endif
 363 
 364     // OS memory managers may want to coalesce back-to-back pages. Make their jobs
 365     // simpler by pre-touching continuous spaces (heap and bitmap) separately.
 366 
 367     ShenandoahPretouchBitmapTask bcl(bitmap.base(), _bitmap_size, _pretouch_bitmap_page_size);
 368     _workers->run_task(&bcl);
 369 
 370     ShenandoahPretouchHeapTask hcl(_pretouch_heap_page_size);
 371     _workers->run_task(&hcl);
 372   }
 373 
 374   //
 375   // Initialize the rest of GC subsystems
 376   //
 377 
 378   _liveness_cache = NEW_C_HEAP_ARRAY(ShenandoahLiveData*, _max_workers, mtGC);
 379   for (uint worker = 0; worker < _max_workers; worker++) {
 380     _liveness_cache[worker] = NEW_C_HEAP_ARRAY(ShenandoahLiveData, _num_regions, mtGC);
 381     Copy::fill_to_bytes(_liveness_cache[worker], _num_regions * sizeof(ShenandoahLiveData));
 382   }
 383 
 384   // There should probably be Shenandoah-specific options for these,
 385   // just as there are G1-specific options.
 386   {
 387     ShenandoahSATBMarkQueueSet& satbqs = ShenandoahBarrierSet::satb_mark_queue_set();
 388     satbqs.set_process_completed_buffers_threshold(20); // G1SATBProcessCompletedThreshold
 389     satbqs.set_buffer_enqueue_threshold_percentage(60); // G1SATBBufferEnqueueingThresholdPercent
 390   }
 391 
 392   _monitoring_support = new ShenandoahMonitoringSupport(this);
 393   _phase_timings = new ShenandoahPhaseTimings(max_workers());
 394   ShenandoahCodeRoots::initialize();
 395 
 396   if (ShenandoahPacing) {
 397     _pacer = new ShenandoahPacer(this);
 398     _pacer->setup_for_idle();
 399   } else {
 400     _pacer = nullptr;
 401   }
 402 
 403   _control_thread = new ShenandoahControlThread();
 404 
 405   ShenandoahInitLogger::print();
 406 
 407   return JNI_OK;
 408 }
 409 








 410 void ShenandoahHeap::initialize_mode() {
 411   if (ShenandoahGCMode != nullptr) {
 412     if (strcmp(ShenandoahGCMode, "satb") == 0) {
 413       _gc_mode = new ShenandoahSATBMode();
 414     } else if (strcmp(ShenandoahGCMode, "iu") == 0) {
 415       _gc_mode = new ShenandoahIUMode();
 416     } else if (strcmp(ShenandoahGCMode, "passive") == 0) {
 417       _gc_mode = new ShenandoahPassiveMode();


 418     } else {
 419       vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option");
 420     }
 421   } else {
 422     vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option (null)");
 423   }
 424   _gc_mode->initialize_flags();
 425   if (_gc_mode->is_diagnostic() && !UnlockDiagnosticVMOptions) {
 426     vm_exit_during_initialization(
 427             err_msg("GC mode \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
 428                     _gc_mode->name()));
 429   }
 430   if (_gc_mode->is_experimental() && !UnlockExperimentalVMOptions) {
 431     vm_exit_during_initialization(
 432             err_msg("GC mode \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
 433                     _gc_mode->name()));
 434   }
 435 }
 436 
 437 void ShenandoahHeap::initialize_heuristics() {
 438   assert(_gc_mode != nullptr, "Must be initialized");
 439   _heuristics = _gc_mode->initialize_heuristics();
 440 
 441   if (_heuristics->is_diagnostic() && !UnlockDiagnosticVMOptions) {
 442     vm_exit_during_initialization(
 443             err_msg("Heuristics \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
 444                     _heuristics->name()));
 445   }
 446   if (_heuristics->is_experimental() && !UnlockExperimentalVMOptions) {
 447     vm_exit_during_initialization(
 448             err_msg("Heuristics \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
 449                     _heuristics->name()));
 450   }
 451 }
 452 
 453 #ifdef _MSC_VER
 454 #pragma warning( push )
 455 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
 456 #endif
 457 
 458 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
 459   CollectedHeap(),


 460   _initial_size(0),
 461   _used(0),
 462   _committed(0),
 463   _bytes_allocated_since_gc_start(0),
 464   _max_workers(MAX2(ConcGCThreads, ParallelGCThreads)),
 465   _workers(nullptr),
 466   _safepoint_workers(nullptr),
 467   _heap_region_special(false),
 468   _num_regions(0),
 469   _regions(nullptr),
 470   _update_refs_iterator(this),
 471   _gc_state_changed(false),




 472   _control_thread(nullptr),


 473   _shenandoah_policy(policy),
 474   _gc_mode(nullptr),
 475   _heuristics(nullptr),
 476   _free_set(nullptr),
 477   _pacer(nullptr),
 478   _verifier(nullptr),
 479   _phase_timings(nullptr),

 480   _monitoring_support(nullptr),
 481   _memory_pool(nullptr),
 482   _stw_memory_manager("Shenandoah Pauses"),
 483   _cycle_memory_manager("Shenandoah Cycles"),
 484   _gc_timer(new ConcurrentGCTimer()),
 485   _soft_ref_policy(),
 486   _log_min_obj_alignment_in_bytes(LogMinObjAlignmentInBytes),
 487   _ref_processor(new ShenandoahReferenceProcessor(MAX2(_max_workers, 1U))),
 488   _marking_context(nullptr),
 489   _bitmap_size(0),
 490   _bitmap_regions_per_slice(0),
 491   _bitmap_bytes_per_slice(0),
 492   _bitmap_region_special(false),
 493   _aux_bitmap_region_special(false),
 494   _liveness_cache(nullptr),
 495   _collection_set(nullptr)
 496 {
 497   // Initialize GC mode early, so we can adjust barrier support
 498   initialize_mode();
 499   BarrierSet::set_barrier_set(new ShenandoahBarrierSet(this));
 500 
 501   _max_workers = MAX2(_max_workers, 1U);
 502   _workers = new ShenandoahWorkerThreads("Shenandoah GC Threads", _max_workers);
 503   if (_workers == nullptr) {
 504     vm_exit_during_initialization("Failed necessary allocation.");
 505   } else {
 506     _workers->initialize_workers();
 507   }
 508 
 509   if (ParallelGCThreads > 1) {
 510     _safepoint_workers = new ShenandoahWorkerThreads("Safepoint Cleanup Thread",
 511                                                 ParallelGCThreads);
 512     _safepoint_workers->initialize_workers();
 513   }
 514 }
 515 
 516 #ifdef _MSC_VER
 517 #pragma warning( pop )
 518 #endif
 519 
 520 class ShenandoahResetBitmapTask : public WorkerTask {
 521 private:
 522   ShenandoahRegionIterator _regions;
 523 
 524 public:
 525   ShenandoahResetBitmapTask() :
 526     WorkerTask("Shenandoah Reset Bitmap") {}
 527 
 528   void work(uint worker_id) {
 529     ShenandoahHeapRegion* region = _regions.next();
 530     ShenandoahHeap* heap = ShenandoahHeap::heap();
 531     ShenandoahMarkingContext* const ctx = heap->marking_context();
 532     while (region != nullptr) {
 533       if (heap->is_bitmap_slice_committed(region)) {
 534         ctx->clear_bitmap(region);
 535       }
 536       region = _regions.next();
 537     }
 538   }
 539 };
 540 
 541 void ShenandoahHeap::reset_mark_bitmap() {
 542   assert_gc_workers(_workers->active_workers());
 543   mark_incomplete_marking_context();
 544 
 545   ShenandoahResetBitmapTask task;
 546   _workers->run_task(&task);
 547 }
 548 
 549 void ShenandoahHeap::print_on(outputStream* st) const {
 550   st->print_cr("Shenandoah Heap");
 551   st->print_cr(" " SIZE_FORMAT "%s max, " SIZE_FORMAT "%s soft max, " SIZE_FORMAT "%s committed, " SIZE_FORMAT "%s used",
 552                byte_size_in_proper_unit(max_capacity()), proper_unit_for_byte_size(max_capacity()),
 553                byte_size_in_proper_unit(soft_max_capacity()), proper_unit_for_byte_size(soft_max_capacity()),
 554                byte_size_in_proper_unit(committed()),    proper_unit_for_byte_size(committed()),
 555                byte_size_in_proper_unit(used()),         proper_unit_for_byte_size(used()));
 556   st->print_cr(" " SIZE_FORMAT " x " SIZE_FORMAT"%s regions",
 557                num_regions(),
 558                byte_size_in_proper_unit(ShenandoahHeapRegion::region_size_bytes()),
 559                proper_unit_for_byte_size(ShenandoahHeapRegion::region_size_bytes()));
 560 
 561   st->print("Status: ");
 562   if (has_forwarded_objects())                 st->print("has forwarded objects, ");
 563   if (is_concurrent_mark_in_progress())        st->print("marking, ");

 564   if (is_evacuation_in_progress())             st->print("evacuating, ");
 565   if (is_update_refs_in_progress())            st->print("updating refs, ");
 566   if (is_degenerated_gc_in_progress())         st->print("degenerated gc, ");
 567   if (is_full_gc_in_progress())                st->print("full gc, ");
 568   if (is_full_gc_move_in_progress())           st->print("full gc move, ");
 569   if (is_concurrent_weak_root_in_progress())   st->print("concurrent weak roots, ");
 570   if (is_concurrent_strong_root_in_progress() &&
 571       !is_concurrent_weak_root_in_progress())  st->print("concurrent strong roots, ");
 572 
 573   if (cancelled_gc()) {
 574     st->print("cancelled");
 575   } else {
 576     st->print("not cancelled");
 577   }
 578   st->cr();
 579 
 580   st->print_cr("Reserved region:");
 581   st->print_cr(" - [" PTR_FORMAT ", " PTR_FORMAT ") ",
 582                p2i(reserved_region().start()),
 583                p2i(reserved_region().end()));

 594   st->cr();
 595   MetaspaceUtils::print_on(st);
 596 
 597   if (Verbose) {
 598     st->cr();
 599     print_heap_regions_on(st);
 600   }
 601 }
 602 
 603 class ShenandoahInitWorkerGCLABClosure : public ThreadClosure {
 604 public:
 605   void do_thread(Thread* thread) {
 606     assert(thread != nullptr, "Sanity");
 607     assert(thread->is_Worker_thread(), "Only worker thread expected");
 608     ShenandoahThreadLocalData::initialize_gclab(thread);
 609   }
 610 };
 611 
 612 void ShenandoahHeap::post_initialize() {
 613   CollectedHeap::post_initialize();


 614   MutexLocker ml(Threads_lock);
 615 
 616   ShenandoahInitWorkerGCLABClosure init_gclabs;
 617   _workers->threads_do(&init_gclabs);
 618 
 619   // gclab can not be initialized early during VM startup, as it can not determinate its max_size.
 620   // Now, we will let WorkerThreads to initialize gclab when new worker is created.
 621   _workers->set_initialize_gclab();
 622   if (_safepoint_workers != nullptr) {
 623     _safepoint_workers->threads_do(&init_gclabs);
 624     _safepoint_workers->set_initialize_gclab();
 625   }
 626 
 627   _heuristics->initialize();
 628 
 629   JFR_ONLY(ShenandoahJFRSupport::register_jfr_type_serializers());
 630 }
 631 




 632 size_t ShenandoahHeap::used() const {
 633   return Atomic::load(&_used);
 634 }
 635 
 636 size_t ShenandoahHeap::committed() const {
 637   return Atomic::load(&_committed);
 638 }
 639 
 640 void ShenandoahHeap::increase_committed(size_t bytes) {
 641   shenandoah_assert_heaplocked_or_safepoint();
 642   _committed += bytes;
 643 }
 644 
 645 void ShenandoahHeap::decrease_committed(size_t bytes) {
 646   shenandoah_assert_heaplocked_or_safepoint();
 647   _committed -= bytes;
 648 }
 649 
 650 void ShenandoahHeap::increase_used(size_t bytes) {
 651   Atomic::add(&_used, bytes, memory_order_relaxed);









































 652 }
 653 
 654 void ShenandoahHeap::set_used(size_t bytes) {
 655   Atomic::store(&_used, bytes);



 656 }
 657 
 658 void ShenandoahHeap::decrease_used(size_t bytes) {
 659   assert(used() >= bytes, "never decrease heap size by more than we've left");
 660   Atomic::sub(&_used, bytes, memory_order_relaxed);


 661 }
 662 
 663 void ShenandoahHeap::increase_allocated(size_t bytes) {
 664   Atomic::add(&_bytes_allocated_since_gc_start, bytes, memory_order_relaxed);



 665 }
 666 
 667 void ShenandoahHeap::notify_mutator_alloc_words(size_t words, bool waste) {
 668   size_t bytes = words * HeapWordSize;
 669   if (!waste) {
 670     increase_used(bytes);
 671   }
 672   increase_allocated(bytes);


 673   if (ShenandoahPacing) {
 674     control_thread()->pacing_notify_alloc(words);
 675     if (waste) {
 676       pacer()->claim_for_alloc(words, true);
 677     }
 678   }
 679 }
 680 
 681 size_t ShenandoahHeap::capacity() const {
 682   return committed();
 683 }
 684 
 685 size_t ShenandoahHeap::max_capacity() const {
 686   return _num_regions * ShenandoahHeapRegion::region_size_bytes();
 687 }
 688 
 689 size_t ShenandoahHeap::soft_max_capacity() const {
 690   size_t v = Atomic::load(&_soft_max_size);
 691   assert(min_capacity() <= v && v <= max_capacity(),
 692          "Should be in bounds: " SIZE_FORMAT " <= " SIZE_FORMAT " <= " SIZE_FORMAT,
 693          min_capacity(), v, max_capacity());
 694   return v;
 695 }
 696 
 697 void ShenandoahHeap::set_soft_max_capacity(size_t v) {
 698   assert(min_capacity() <= v && v <= max_capacity(),
 699          "Should be in bounds: " SIZE_FORMAT " <= " SIZE_FORMAT " <= " SIZE_FORMAT,
 700          min_capacity(), v, max_capacity());
 701   Atomic::store(&_soft_max_size, v);
 702 }
 703 
 704 size_t ShenandoahHeap::min_capacity() const {
 705   return _minimum_size;
 706 }
 707 
 708 size_t ShenandoahHeap::initial_capacity() const {
 709   return _initial_size;
 710 }
 711 
 712 bool ShenandoahHeap::is_in(const void* p) const {
 713   HeapWord* heap_base = (HeapWord*) base();
 714   HeapWord* last_region_end = heap_base + ShenandoahHeapRegion::region_size_words() * num_regions();
 715   return p >= heap_base && p < last_region_end;





















 716 }
 717 
 718 void ShenandoahHeap::op_uncommit(double shrink_before, size_t shrink_until) {
 719   assert (ShenandoahUncommit, "should be enabled");
 720 
 721   // Application allocates from the beginning of the heap, and GC allocates at
 722   // the end of it. It is more efficient to uncommit from the end, so that applications
 723   // could enjoy the near committed regions. GC allocations are much less frequent,
 724   // and therefore can accept the committing costs.
 725 
 726   size_t count = 0;
 727   for (size_t i = num_regions(); i > 0; i--) { // care about size_t underflow
 728     ShenandoahHeapRegion* r = get_region(i - 1);
 729     if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
 730       ShenandoahHeapLocker locker(lock());
 731       if (r->is_empty_committed()) {
 732         if (committed() < shrink_until + ShenandoahHeapRegion::region_size_bytes()) {
 733           break;
 734         }
 735 
 736         r->make_uncommitted();
 737         count++;
 738       }
 739     }
 740     SpinPause(); // allow allocators to take the lock
 741   }
 742 
 743   if (count > 0) {
 744     control_thread()->notify_heap_changed();

















 745   }
















 746 }
 747 
 748 HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) {
 749   // New object should fit the GCLAB size
 750   size_t min_size = MAX2(size, PLAB::min_size());
 751 
 752   // Figure out size of new GCLAB, looking back at heuristics. Expand aggressively.
 753   size_t new_size = ShenandoahThreadLocalData::gclab_size(thread) * 2;

 754   new_size = MIN2(new_size, PLAB::max_size());
 755   new_size = MAX2(new_size, PLAB::min_size());
 756 
 757   // Record new heuristic value even if we take any shortcut. This captures
 758   // the case when moderately-sized objects always take a shortcut. At some point,
 759   // heuristics should catch up with them.

 760   ShenandoahThreadLocalData::set_gclab_size(thread, new_size);
 761 
 762   if (new_size < size) {
 763     // New size still does not fit the object. Fall back to shared allocation.
 764     // This avoids retiring perfectly good GCLABs, when we encounter a large object.

 765     return nullptr;
 766   }
 767 
 768   // Retire current GCLAB, and allocate a new one.
 769   PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
 770   gclab->retire();
 771 
 772   size_t actual_size = 0;
 773   HeapWord* gclab_buf = allocate_new_gclab(min_size, new_size, &actual_size);
 774   if (gclab_buf == nullptr) {
 775     return nullptr;
 776   }
 777 
 778   assert (size <= actual_size, "allocation should fit");
 779 
 780   // ...and clear or zap just allocated TLAB, if needed.
 781   if (ZeroTLAB) {
 782     Copy::zero_to_words(gclab_buf, actual_size);
 783   } else if (ZapTLAB) {
 784     // Skip mangling the space corresponding to the object header to
 785     // ensure that the returned space is not considered parsable by
 786     // any concurrent GC thread.
 787     size_t hdr_size = oopDesc::header_size();
 788     Copy::fill_to_words(gclab_buf + hdr_size, actual_size - hdr_size, badHeapWordVal);
 789   }
 790   gclab->set_buf(gclab_buf, actual_size);
 791   return gclab->allocate(size);
 792 }
 793 

 794 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t min_size,
 795                                             size_t requested_size,
 796                                             size_t* actual_size) {
 797   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_tlab(min_size, requested_size);
 798   HeapWord* res = allocate_memory(req);
 799   if (res != nullptr) {
 800     *actual_size = req.actual_size();
 801   } else {
 802     *actual_size = 0;
 803   }
 804   return res;
 805 }
 806 
 807 HeapWord* ShenandoahHeap::allocate_new_gclab(size_t min_size,
 808                                              size_t word_size,
 809                                              size_t* actual_size) {
 810   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_gclab(min_size, word_size);
 811   HeapWord* res = allocate_memory(req);
 812   if (res != nullptr) {
 813     *actual_size = req.actual_size();

 815     *actual_size = 0;
 816   }
 817   return res;
 818 }
 819 
 820 HeapWord* ShenandoahHeap::allocate_memory(ShenandoahAllocRequest& req) {
 821   intptr_t pacer_epoch = 0;
 822   bool in_new_region = false;
 823   HeapWord* result = nullptr;
 824 
 825   if (req.is_mutator_alloc()) {
 826     if (ShenandoahPacing) {
 827       pacer()->pace_for_alloc(req.size());
 828       pacer_epoch = pacer()->epoch();
 829     }
 830 
 831     if (!ShenandoahAllocFailureALot || !should_inject_alloc_failure()) {
 832       result = allocate_memory_under_lock(req, in_new_region);
 833     }
 834 
 835     // Allocation failed, block until control thread reacted, then retry allocation.
 836     //
 837     // It might happen that one of the threads requesting allocation would unblock
 838     // way later after GC happened, only to fail the second allocation, because
 839     // other threads have already depleted the free storage. In this case, a better
 840     // strategy is to try again, as long as GC makes progress (or until at least
 841     // one full GC has completed).
 842     size_t original_count = shenandoah_policy()->full_gc_count();
 843     while (result == nullptr
 844         && (_progress_last_gc.is_set() || original_count == shenandoah_policy()->full_gc_count())) {
 845       control_thread()->handle_alloc_failure(req);
 846       result = allocate_memory_under_lock(req, in_new_region);






























 847     }
 848   } else {
 849     assert(req.is_gc_alloc(), "Can only accept GC allocs here");
 850     result = allocate_memory_under_lock(req, in_new_region);
 851     // Do not call handle_alloc_failure() here, because we cannot block.
 852     // The allocation failure would be handled by the LRB slowpath with handle_alloc_failure_evac().
 853   }
 854 
 855   if (in_new_region) {
 856     control_thread()->notify_heap_changed();
 857   }
 858 








 859   if (result != nullptr) {
 860     size_t requested = req.size();
 861     size_t actual = req.actual_size();
 862 
 863     assert (req.is_lab_alloc() || (requested == actual),
 864             "Only LAB allocations are elastic: %s, requested = " SIZE_FORMAT ", actual = " SIZE_FORMAT,
 865             ShenandoahAllocRequest::alloc_type_to_string(req.type()), requested, actual);
 866 
 867     if (req.is_mutator_alloc()) {
 868       notify_mutator_alloc_words(actual, false);
 869 
 870       // If we requested more than we were granted, give the rest back to pacer.
 871       // This only matters if we are in the same pacing epoch: do not try to unpace
 872       // over the budget for the other phase.
 873       if (ShenandoahPacing && (pacer_epoch > 0) && (requested > actual)) {
 874         pacer()->unpace_for_alloc(pacer_epoch, requested - actual);
 875       }
 876     } else {
 877       increase_used(actual*HeapWordSize);
 878     }
 879   }
 880 
 881   return result;
 882 }
 883 
 884 HeapWord* ShenandoahHeap::allocate_memory_under_lock(ShenandoahAllocRequest& req, bool& in_new_region) {
 885   // If we are dealing with mutator allocation, then we may need to block for safepoint.
 886   // We cannot block for safepoint for GC allocations, because there is a high chance
 887   // we are already running at safepoint or from stack watermark machinery, and we cannot
 888   // block again.
 889   ShenandoahHeapLocker locker(lock(), req.is_mutator_alloc());
 890   return _free_set->allocate(req, in_new_region);




































 891 }
 892 
 893 HeapWord* ShenandoahHeap::mem_allocate(size_t size,
 894                                         bool*  gc_overhead_limit_was_exceeded) {
 895   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared(size);
 896   return allocate_memory(req);
 897 }
 898 
 899 MetaWord* ShenandoahHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
 900                                                              size_t size,
 901                                                              Metaspace::MetadataType mdtype) {
 902   MetaWord* result;
 903 
 904   // Inform metaspace OOM to GC heuristics if class unloading is possible.
 905   if (heuristics()->can_unload_classes()) {
 906     ShenandoahHeuristics* h = heuristics();
 907     h->record_metaspace_oom();
 908   }
 909 
 910   // Expand and retry allocation
 911   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
 912   if (result != nullptr) {
 913     return result;
 914   }
 915 
 916   // Start full GC
 917   collect(GCCause::_metadata_GC_clear_soft_refs);
 918 
 919   // Retry allocation
 920   result = loader_data->metaspace_non_null()->allocate(size, mdtype);
 921   if (result != nullptr) {
 922     return result;
 923   }
 924 
 925   // Expand and retry allocation
 926   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);

 983     while ((r =_cs->claim_next()) != nullptr) {
 984       assert(r->has_live(), "Region " SIZE_FORMAT " should have been reclaimed early", r->index());
 985       _sh->marked_object_iterate(r, &cl);
 986 
 987       if (ShenandoahPacing) {
 988         _sh->pacer()->report_evac(r->used() >> LogHeapWordSize);
 989       }
 990 
 991       if (_sh->check_cancelled_gc_and_yield(_concurrent)) {
 992         break;
 993       }
 994     }
 995   }
 996 };
 997 
 998 void ShenandoahHeap::evacuate_collection_set(bool concurrent) {
 999   ShenandoahEvacuationTask task(this, _collection_set, concurrent);
1000   workers()->run_task(&task);
1001 }
1002 
























































































1003 void ShenandoahHeap::trash_cset_regions() {
1004   ShenandoahHeapLocker locker(lock());
1005 
1006   ShenandoahCollectionSet* set = collection_set();
1007   ShenandoahHeapRegion* r;
1008   set->clear_current_index();
1009   while ((r = set->next()) != nullptr) {
1010     r->make_trash();
1011   }
1012   collection_set()->clear();
1013 }
1014 
1015 void ShenandoahHeap::print_heap_regions_on(outputStream* st) const {
1016   st->print_cr("Heap Regions:");
1017   st->print_cr("Region state: EU=empty-uncommitted, EC=empty-committed, R=regular, H=humongous start, HP=pinned humongous start");
1018   st->print_cr("              HC=humongous continuation, CS=collection set, TR=trash, P=pinned, CSP=pinned collection set");
1019   st->print_cr("BTE=bottom/top/end, TAMS=top-at-mark-start");
1020   st->print_cr("UWM=update watermark, U=used");
1021   st->print_cr("T=TLAB allocs, G=GCLAB allocs");
1022   st->print_cr("S=shared allocs, L=live data");
1023   st->print_cr("CP=critical pins");
1024 
1025   for (size_t i = 0; i < num_regions(); i++) {
1026     get_region(i)->print_on(st);
1027   }
1028 }
1029 
1030 void ShenandoahHeap::trash_humongous_region_at(ShenandoahHeapRegion* start) {
1031   assert(start->is_humongous_start(), "reclaim regions starting with the first one");
1032 
1033   oop humongous_obj = cast_to_oop(start->bottom());
1034   size_t size = humongous_obj->size();
1035   size_t required_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize);
1036   size_t index = start->index() + required_regions - 1;
1037 
1038   assert(!start->has_live(), "liveness must be zero");
1039 
1040   for(size_t i = 0; i < required_regions; i++) {
1041     // Reclaim from tail. Otherwise, assertion fails when printing region to trace log,
1042     // as it expects that every region belongs to a humongous region starting with a humongous start region.
1043     ShenandoahHeapRegion* region = get_region(index --);
1044 
1045     assert(region->is_humongous(), "expect correct humongous start or continuation");
1046     assert(!region->is_cset(), "Humongous region should not be in collection set");
1047 
1048     region->make_trash_immediate();
1049   }

1050 }
1051 
1052 class ShenandoahCheckCleanGCLABClosure : public ThreadClosure {
1053 public:
1054   ShenandoahCheckCleanGCLABClosure() {}
1055   void do_thread(Thread* thread) {
1056     PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1057     assert(gclab != nullptr, "GCLAB should be initialized for %s", thread->name());
1058     assert(gclab->words_remaining() == 0, "GCLAB should not need retirement");






1059   }
1060 };
1061 
1062 class ShenandoahRetireGCLABClosure : public ThreadClosure {
1063 private:
1064   bool const _resize;
1065 public:
1066   ShenandoahRetireGCLABClosure(bool resize) : _resize(resize) {}
1067   void do_thread(Thread* thread) {
1068     PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1069     assert(gclab != nullptr, "GCLAB should be initialized for %s", thread->name());
1070     gclab->retire();
1071     if (_resize && ShenandoahThreadLocalData::gclab_size(thread) > 0) {
1072       ShenandoahThreadLocalData::set_gclab_size(thread, 0);
1073     }













1074   }
1075 };
1076 
1077 void ShenandoahHeap::labs_make_parsable() {
1078   assert(UseTLAB, "Only call with UseTLAB");
1079 
1080   ShenandoahRetireGCLABClosure cl(false);
1081 
1082   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1083     ThreadLocalAllocBuffer& tlab = t->tlab();
1084     tlab.make_parsable();
1085     cl.do_thread(t);
1086   }
1087 
1088   workers()->threads_do(&cl);
1089 }
1090 
1091 void ShenandoahHeap::tlabs_retire(bool resize) {
1092   assert(UseTLAB, "Only call with UseTLAB");
1093   assert(!resize || ResizeTLAB, "Only call for resize when ResizeTLAB is enabled");

1155   }
1156   return nullptr;
1157 }
1158 
1159 bool ShenandoahHeap::block_is_obj(const HeapWord* addr) const {
1160   ShenandoahHeapRegion* r = heap_region_containing(addr);
1161   return r->block_is_obj(addr);
1162 }
1163 
1164 bool ShenandoahHeap::print_location(outputStream* st, void* addr) const {
1165   return BlockLocationPrinter<ShenandoahHeap>::print_location(st, addr);
1166 }
1167 
1168 void ShenandoahHeap::prepare_for_verify() {
1169   if (SafepointSynchronize::is_at_safepoint() && UseTLAB) {
1170     labs_make_parsable();
1171   }
1172 }
1173 
1174 void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const {
1175   tcl->do_thread(_control_thread);







1176   workers()->threads_do(tcl);
1177   if (_safepoint_workers != nullptr) {
1178     _safepoint_workers->threads_do(tcl);
1179   }
1180 }
1181 
1182 void ShenandoahHeap::print_tracing_info() const {
1183   LogTarget(Info, gc, stats) lt;
1184   if (lt.is_enabled()) {
1185     ResourceMark rm;
1186     LogStream ls(lt);
1187 
1188     phase_timings()->print_global_on(&ls);
1189 
1190     ls.cr();
1191     ls.cr();
1192 
1193     shenandoah_policy()->print_gc_stats(&ls);
1194 
1195     ls.cr();
1196     ls.cr();
1197   }
1198 }
1199 








































1200 void ShenandoahHeap::verify(VerifyOption vo) {
1201   if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
1202     if (ShenandoahVerify) {
1203       verifier()->verify_generic(vo);
1204     } else {
1205       // TODO: Consider allocating verification bitmaps on demand,
1206       // and turn this on unconditionally.
1207     }
1208   }
1209 }
1210 size_t ShenandoahHeap::tlab_capacity(Thread *thr) const {
1211   return _free_set->capacity();
1212 }
1213 
1214 class ObjectIterateScanRootClosure : public BasicOopIterateClosure {
1215 private:
1216   MarkBitMap* _bitmap;
1217   ShenandoahScanObjectStack* _oop_stack;
1218   ShenandoahHeap* const _heap;
1219   ShenandoahMarkingContext* const _marking_context;

1462 }
1463 
1464 // Keep alive an object that was loaded with AS_NO_KEEPALIVE.
1465 void ShenandoahHeap::keep_alive(oop obj) {
1466   if (is_concurrent_mark_in_progress() && (obj != nullptr)) {
1467     ShenandoahBarrierSet::barrier_set()->enqueue(obj);
1468   }
1469 }
1470 
1471 void ShenandoahHeap::heap_region_iterate(ShenandoahHeapRegionClosure* blk) const {
1472   for (size_t i = 0; i < num_regions(); i++) {
1473     ShenandoahHeapRegion* current = get_region(i);
1474     blk->heap_region_do(current);
1475   }
1476 }
1477 
1478 class ShenandoahParallelHeapRegionTask : public WorkerTask {
1479 private:
1480   ShenandoahHeap* const _heap;
1481   ShenandoahHeapRegionClosure* const _blk;

1482 
1483   shenandoah_padding(0);
1484   volatile size_t _index;
1485   shenandoah_padding(1);
1486 
1487 public:
1488   ShenandoahParallelHeapRegionTask(ShenandoahHeapRegionClosure* blk) :
1489           WorkerTask("Shenandoah Parallel Region Operation"),
1490           _heap(ShenandoahHeap::heap()), _blk(blk), _index(0) {}
1491 
1492   void work(uint worker_id) {
1493     ShenandoahParallelWorkerSession worker_session(worker_id);
1494     size_t stride = ShenandoahParallelRegionStride;
1495 
1496     size_t max = _heap->num_regions();
1497     while (Atomic::load(&_index) < max) {
1498       size_t cur = Atomic::fetch_then_add(&_index, stride, memory_order_relaxed);
1499       size_t start = cur;
1500       size_t end = MIN2(cur + stride, max);
1501       if (start >= max) break;
1502 
1503       for (size_t i = cur; i < end; i++) {
1504         ShenandoahHeapRegion* current = _heap->get_region(i);
1505         _blk->heap_region_do(current);
1506       }
1507     }
1508   }
1509 };
1510 
1511 void ShenandoahHeap::parallel_heap_region_iterate(ShenandoahHeapRegionClosure* blk) const {
1512   assert(blk->is_thread_safe(), "Only thread-safe closures here");
1513   if (num_regions() > ShenandoahParallelRegionStride) {
1514     ShenandoahParallelHeapRegionTask task(blk);












1515     workers()->run_task(&task);
1516   } else {
1517     heap_region_iterate(blk);
1518   }
1519 }
1520 
1521 class ShenandoahInitMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
1522 private:
1523   ShenandoahMarkingContext* const _ctx;
1524 public:
1525   ShenandoahInitMarkUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
1526 
1527   void heap_region_do(ShenandoahHeapRegion* r) {
1528     assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index());
1529     if (r->is_active()) {
1530       // Check if region needs updating its TAMS. We have updated it already during concurrent
1531       // reset, so it is very likely we don't need to do another write here.
1532       if (_ctx->top_at_mark_start(r) != r->top()) {
1533         _ctx->capture_top_at_mark_start(r);
1534       }
1535     } else {
1536       assert(_ctx->top_at_mark_start(r) == r->top(),
1537              "Region " SIZE_FORMAT " should already have correct TAMS", r->index());
1538     }
1539   }
1540 
1541   bool is_thread_safe() { return true; }
1542 };
1543 
1544 class ShenandoahRendezvousClosure : public HandshakeClosure {
1545 public:
1546   inline ShenandoahRendezvousClosure() : HandshakeClosure("ShenandoahRendezvous") {}
1547   inline void do_thread(Thread* thread) {}
1548 };
1549 
1550 void ShenandoahHeap::rendezvous_threads() {
1551   ShenandoahRendezvousClosure cl;
1552   Handshake::execute(&cl);
1553 }
1554 
1555 void ShenandoahHeap::recycle_trash() {
1556   free_set()->recycle_trash();
1557 }
1558 
1559 class ShenandoahResetUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
1560 private:
1561   ShenandoahMarkingContext* const _ctx;
1562 public:
1563   ShenandoahResetUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
1564 
1565   void heap_region_do(ShenandoahHeapRegion* r) {
1566     if (r->is_active()) {
1567       // Reset live data and set TAMS optimistically. We would recheck these under the pause
1568       // anyway to capture any updates that happened since now.
1569       r->clear_live_data();
1570       _ctx->capture_top_at_mark_start(r);
1571     }
1572   }
1573 
1574   bool is_thread_safe() { return true; }
1575 };
1576 
1577 void ShenandoahHeap::prepare_gc() {
1578   reset_mark_bitmap();
1579 
1580   ShenandoahResetUpdateRegionStateClosure cl;
1581   parallel_heap_region_iterate(&cl);
1582 }
1583 
1584 class ShenandoahFinalMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
1585 private:
1586   ShenandoahMarkingContext* const _ctx;
1587   ShenandoahHeapLock* const _lock;
1588 
1589 public:
1590   ShenandoahFinalMarkUpdateRegionStateClosure() :
1591     _ctx(ShenandoahHeap::heap()->complete_marking_context()), _lock(ShenandoahHeap::heap()->lock()) {}
1592 
1593   void heap_region_do(ShenandoahHeapRegion* r) {
1594     if (r->is_active()) {
1595       // All allocations past TAMS are implicitly live, adjust the region data.
1596       // Bitmaps/TAMS are swapped at this point, so we need to poll complete bitmap.
1597       HeapWord *tams = _ctx->top_at_mark_start(r);
1598       HeapWord *top = r->top();
1599       if (top > tams) {
1600         r->increase_live_data_alloc_words(pointer_delta(top, tams));
1601       }
1602 
1603       // We are about to select the collection set, make sure it knows about
1604       // current pinning status. Also, this allows trashing more regions that
1605       // now have their pinning status dropped.
1606       if (r->is_pinned()) {
1607         if (r->pin_count() == 0) {
1608           ShenandoahHeapLocker locker(_lock);
1609           r->make_unpinned();
1610         }
1611       } else {
1612         if (r->pin_count() > 0) {
1613           ShenandoahHeapLocker locker(_lock);
1614           r->make_pinned();
1615         }
1616       }
1617 
1618       // Remember limit for updating refs. It's guaranteed that we get no
1619       // from-space-refs written from here on.
1620       r->set_update_watermark_at_safepoint(r->top());
1621     } else {
1622       assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index());
1623       assert(_ctx->top_at_mark_start(r) == r->top(),
1624              "Region " SIZE_FORMAT " should have correct TAMS", r->index());
1625     }
1626   }
1627 
1628   bool is_thread_safe() { return true; }
1629 };
1630 
1631 void ShenandoahHeap::prepare_regions_and_collection_set(bool concurrent) {
1632   assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
1633   {
1634     ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::final_update_region_states :
1635                                          ShenandoahPhaseTimings::degen_gc_final_update_region_states);
1636     ShenandoahFinalMarkUpdateRegionStateClosure cl;
1637     parallel_heap_region_iterate(&cl);
1638 
1639     assert_pinned_region_status();
1640   }
1641 
1642   {
1643     ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::choose_cset :
1644                                          ShenandoahPhaseTimings::degen_gc_choose_cset);
1645     ShenandoahHeapLocker locker(lock());
1646     _collection_set->clear();
1647     heuristics()->choose_collection_set(_collection_set);
1648   }
1649 
1650   {
1651     ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::final_rebuild_freeset :
1652                                          ShenandoahPhaseTimings::degen_gc_final_rebuild_freeset);
1653     ShenandoahHeapLocker locker(lock());
1654     _free_set->rebuild();
1655   }
1656 }
1657 
1658 void ShenandoahHeap::do_class_unloading() {
1659   _unloader.unload();



1660 }
1661 
1662 void ShenandoahHeap::stw_weak_refs(bool full_gc) {
1663   // Weak refs processing
1664   ShenandoahPhaseTimings::Phase phase = full_gc ? ShenandoahPhaseTimings::full_gc_weakrefs
1665                                                 : ShenandoahPhaseTimings::degen_gc_weakrefs;
1666   ShenandoahTimingsTracker t(phase);
1667   ShenandoahGCWorkerPhase worker_phase(phase);
1668   ref_processor()->process_references(phase, workers(), false /* concurrent */);

1669 }
1670 
1671 void ShenandoahHeap::prepare_update_heap_references(bool concurrent) {
1672   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
1673 
1674   // Evacuation is over, no GCLABs are needed anymore. GCLABs are under URWM, so we need to
1675   // make them parsable for update code to work correctly. Plus, we can compute new sizes
1676   // for future GCLABs here.
1677   if (UseTLAB) {
1678     ShenandoahGCPhase phase(concurrent ?
1679                             ShenandoahPhaseTimings::init_update_refs_manage_gclabs :
1680                             ShenandoahPhaseTimings::degen_gc_init_update_refs_manage_gclabs);
1681     gclabs_retire(ResizeTLAB);
1682   }
1683 
1684   _update_refs_iterator.reset();
1685 }
1686 
1687 void ShenandoahHeap::propagate_gc_state_to_java_threads() {
1688   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at Shenandoah safepoint");
1689   if (_gc_state_changed) {
1690     _gc_state_changed = false;
1691     char state = gc_state();
1692     for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1693       ShenandoahThreadLocalData::set_gc_state(t, state);
1694     }
1695   }
1696 }
1697 
1698 void ShenandoahHeap::set_gc_state(uint mask, bool value) {
1699   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at Shenandoah safepoint");
1700   _gc_state.set_cond(mask, value);
1701   _gc_state_changed = true;





































1702 }
1703 
1704 void ShenandoahHeap::set_concurrent_mark_in_progress(bool in_progress) {
1705   assert(!has_forwarded_objects(), "Not expected before/after mark phase");
1706   set_gc_state(MARKING, in_progress);
1707   ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);














1708 }
1709 
1710 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
1711   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only call this at safepoint");
1712   set_gc_state(EVACUATION, in_progress);
1713 }
1714 
1715 void ShenandoahHeap::set_concurrent_strong_root_in_progress(bool in_progress) {
1716   if (in_progress) {
1717     _concurrent_strong_root_in_progress.set();
1718   } else {
1719     _concurrent_strong_root_in_progress.unset();
1720   }
1721 }
1722 
1723 void ShenandoahHeap::set_concurrent_weak_root_in_progress(bool cond) {
1724   set_gc_state(WEAK_ROOTS, cond);
1725 }
1726 
1727 GCTracer* ShenandoahHeap::tracer() {
1728   return shenandoah_policy()->tracer();
1729 }
1730 
1731 size_t ShenandoahHeap::tlab_used(Thread* thread) const {
1732   return _free_set->used();
1733 }
1734 
1735 bool ShenandoahHeap::try_cancel_gc() {
1736   jbyte prev = _cancelled_gc.cmpxchg(CANCELLED, CANCELLABLE);
1737   return prev == CANCELLABLE;
1738 }
1739 











1740 void ShenandoahHeap::cancel_gc(GCCause::Cause cause) {
1741   if (try_cancel_gc()) {
1742     FormatBuffer<> msg("Cancelling GC: %s", GCCause::to_string(cause));
1743     log_info(gc)("%s", msg.buffer());
1744     Events::log(Thread::current(), "%s", msg.buffer());

1745   }
1746 }
1747 
1748 uint ShenandoahHeap::max_workers() {
1749   return _max_workers;
1750 }
1751 
1752 void ShenandoahHeap::stop() {
1753   // The shutdown sequence should be able to terminate when GC is running.
1754 
1755   // Step 0. Notify policy to disable event recording.
1756   _shenandoah_policy->record_shutdown();
1757 
1758   // Step 1. Notify control thread that we are in shutdown.
1759   // Note that we cannot do that with stop(), because stop() is blocking and waits for the actual shutdown.
1760   // Doing stop() here would wait for the normal GC cycle to complete, never falling through to cancel below.
1761   control_thread()->prepare_for_graceful_shutdown();
1762 
1763   // Step 2. Notify GC workers that we are cancelling GC.
1764   cancel_gc(GCCause::_shenandoah_stop_vm);

1848 }
1849 
1850 void ShenandoahHeap::set_has_forwarded_objects(bool cond) {
1851   set_gc_state(HAS_FORWARDED, cond);
1852 }
1853 
1854 void ShenandoahHeap::set_unload_classes(bool uc) {
1855   _unload_classes.set_cond(uc);
1856 }
1857 
1858 bool ShenandoahHeap::unload_classes() const {
1859   return _unload_classes.is_set();
1860 }
1861 
1862 address ShenandoahHeap::in_cset_fast_test_addr() {
1863   ShenandoahHeap* heap = ShenandoahHeap::heap();
1864   assert(heap->collection_set() != nullptr, "Sanity");
1865   return (address) heap->collection_set()->biased_map_address();
1866 }
1867 
1868 size_t ShenandoahHeap::bytes_allocated_since_gc_start() {
1869   return Atomic::load(&_bytes_allocated_since_gc_start);
1870 }
1871 
1872 void ShenandoahHeap::reset_bytes_allocated_since_gc_start() {
1873   Atomic::store(&_bytes_allocated_since_gc_start, (size_t)0);





1874 }
1875 
1876 void ShenandoahHeap::set_degenerated_gc_in_progress(bool in_progress) {
1877   _degenerated_gc_in_progress.set_cond(in_progress);
1878 }
1879 
1880 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) {
1881   _full_gc_in_progress.set_cond(in_progress);
1882 }
1883 
1884 void ShenandoahHeap::set_full_gc_move_in_progress(bool in_progress) {
1885   assert (is_full_gc_in_progress(), "should be");
1886   _full_gc_move_in_progress.set_cond(in_progress);
1887 }
1888 
1889 void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) {
1890   set_gc_state(UPDATEREFS, in_progress);
1891 }
1892 
1893 void ShenandoahHeap::register_nmethod(nmethod* nm) {

1917     if (r->is_active()) {
1918       if (r->is_pinned()) {
1919         if (r->pin_count() == 0) {
1920           r->make_unpinned();
1921         }
1922       } else {
1923         if (r->pin_count() > 0) {
1924           r->make_pinned();
1925         }
1926       }
1927     }
1928   }
1929 
1930   assert_pinned_region_status();
1931 }
1932 
1933 #ifdef ASSERT
1934 void ShenandoahHeap::assert_pinned_region_status() {
1935   for (size_t i = 0; i < num_regions(); i++) {
1936     ShenandoahHeapRegion* r = get_region(i);
1937     assert((r->is_pinned() && r->pin_count() > 0) || (!r->is_pinned() && r->pin_count() == 0),
1938            "Region " SIZE_FORMAT " pinning status is inconsistent", i);



1939   }
1940 }
1941 #endif
1942 
1943 ConcurrentGCTimer* ShenandoahHeap::gc_timer() const {
1944   return _gc_timer;
1945 }
1946 
1947 void ShenandoahHeap::prepare_concurrent_roots() {
1948   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
1949   assert(!is_stw_gc_in_progress(), "Only concurrent GC");
1950   set_concurrent_strong_root_in_progress(!collection_set()->is_empty());
1951   set_concurrent_weak_root_in_progress(true);
1952   if (unload_classes()) {
1953     _unloader.prepare();
1954   }
1955 }
1956 
1957 void ShenandoahHeap::finish_concurrent_roots() {
1958   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
1959   assert(!is_stw_gc_in_progress(), "Only concurrent GC");
1960   if (unload_classes()) {
1961     _unloader.finish();
1962   }
1963 }
1964 
1965 #ifdef ASSERT
1966 void ShenandoahHeap::assert_gc_workers(uint nworkers) {
1967   assert(nworkers > 0 && nworkers <= max_workers(), "Sanity");
1968 
1969   if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
1970     if (UseDynamicNumberOfGCThreads) {
1971       assert(nworkers <= ParallelGCThreads, "Cannot use more than it has");
1972     } else {
1973       // Use ParallelGCThreads inside safepoints
1974       assert(nworkers == ParallelGCThreads, "Use ParallelGCThreads within safepoints");
1975     }
1976   } else {
1977     if (UseDynamicNumberOfGCThreads) {
1978       assert(nworkers <= ConcGCThreads, "Cannot use more than it has");
1979     } else {
1980       // Use ConcGCThreads outside safepoints
1981       assert(nworkers == ConcGCThreads, "Use ConcGCThreads outside safepoints");
1982     }
1983   }
1984 }
1985 #endif
1986 
1987 ShenandoahVerifier* ShenandoahHeap::verifier() {
1988   guarantee(ShenandoahVerify, "Should be enabled");
1989   assert (_verifier != nullptr, "sanity");
1990   return _verifier;
1991 }
1992 
1993 template<bool CONCURRENT>
1994 class ShenandoahUpdateHeapRefsTask : public WorkerTask {
1995 private:
1996   ShenandoahHeap* _heap;
1997   ShenandoahRegionIterator* _regions;
1998 public:
1999   ShenandoahUpdateHeapRefsTask(ShenandoahRegionIterator* regions) :
2000     WorkerTask("Shenandoah Update References"),
2001     _heap(ShenandoahHeap::heap()),
2002     _regions(regions) {
2003   }
2004 
2005   void work(uint worker_id) {
2006     if (CONCURRENT) {
2007       ShenandoahConcurrentWorkerSession worker_session(worker_id);
2008       ShenandoahSuspendibleThreadSetJoiner stsj;
2009       do_work<ShenandoahConcUpdateRefsClosure>();
2010     } else {
2011       ShenandoahParallelWorkerSession worker_session(worker_id);
2012       do_work<ShenandoahSTWUpdateRefsClosure>();
2013     }
2014   }
2015 
2016 private:
2017   template<class T>
2018   void do_work() {












2019     T cl;
2020     ShenandoahHeapRegion* r = _regions->next();
2021     ShenandoahMarkingContext* const ctx = _heap->complete_marking_context();
2022     while (r != nullptr) {
2023       HeapWord* update_watermark = r->get_update_watermark();
2024       assert (update_watermark >= r->bottom(), "sanity");
2025       if (r->is_active() && !r->is_cset()) {
2026         _heap->marked_object_oop_iterate(r, &cl, update_watermark);
2027       }
2028       if (ShenandoahPacing) {
2029         _heap->pacer()->report_updaterefs(pointer_delta(update_watermark, r->bottom()));
2030       }
2031       if (_heap->check_cancelled_gc_and_yield(CONCURRENT)) {
2032         return;
2033       }
2034       r = _regions->next();
2035     }
2036   }
2037 };
2038 
2039 void ShenandoahHeap::update_heap_references(bool concurrent) {
2040   assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
2041 
2042   if (concurrent) {
2043     ShenandoahUpdateHeapRefsTask<true> task(&_update_refs_iterator);
2044     workers()->run_task(&task);
2045   } else {
2046     ShenandoahUpdateHeapRefsTask<false> task(&_update_refs_iterator);
2047     workers()->run_task(&task);
2048   }
2049 }
2050 

2051 
2052 class ShenandoahFinalUpdateRefsUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
2053 private:
2054   ShenandoahHeapLock* const _lock;
2055 
2056 public:
2057   ShenandoahFinalUpdateRefsUpdateRegionStateClosure() : _lock(ShenandoahHeap::heap()->lock()) {}
2058 
2059   void heap_region_do(ShenandoahHeapRegion* r) {
2060     // Drop unnecessary "pinned" state from regions that does not have CP marks
2061     // anymore, as this would allow trashing them.
2062 
2063     if (r->is_active()) {
2064       if (r->is_pinned()) {
2065         if (r->pin_count() == 0) {
2066           ShenandoahHeapLocker locker(_lock);
2067           r->make_unpinned();
2068         }
2069       } else {
2070         if (r->pin_count() > 0) {
2071           ShenandoahHeapLocker locker(_lock);
2072           r->make_pinned();
2073         }
2074       }
2075     }
2076   }
2077 
2078   bool is_thread_safe() { return true; }
2079 };
2080 
2081 void ShenandoahHeap::update_heap_region_states(bool concurrent) {
2082   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2083   assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
2084 
2085   {
2086     ShenandoahGCPhase phase(concurrent ?
2087                             ShenandoahPhaseTimings::final_update_refs_update_region_states :
2088                             ShenandoahPhaseTimings::degen_gc_final_update_refs_update_region_states);
2089     ShenandoahFinalUpdateRefsUpdateRegionStateClosure cl;
2090     parallel_heap_region_iterate(&cl);
2091 
2092     assert_pinned_region_status();
2093   }
2094 
2095   {
2096     ShenandoahGCPhase phase(concurrent ?
2097                             ShenandoahPhaseTimings::final_update_refs_trash_cset :
2098                             ShenandoahPhaseTimings::degen_gc_final_update_refs_trash_cset);
2099     trash_cset_regions();
2100   }
2101 }
2102 





2103 void ShenandoahHeap::rebuild_free_set(bool concurrent) {
2104   {
2105     ShenandoahGCPhase phase(concurrent ?
2106                             ShenandoahPhaseTimings::final_update_refs_rebuild_freeset :
2107                             ShenandoahPhaseTimings::degen_gc_final_update_refs_rebuild_freeset);
2108     ShenandoahHeapLocker locker(lock());
2109     _free_set->rebuild();




































2110   }
2111 }
2112 
2113 void ShenandoahHeap::print_extended_on(outputStream *st) const {
2114   print_on(st);
2115   st->cr();
2116   print_heap_regions_on(st);
2117 }
2118 
2119 bool ShenandoahHeap::is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self) {
2120   size_t slice = r->index() / _bitmap_regions_per_slice;
2121 
2122   size_t regions_from = _bitmap_regions_per_slice * slice;
2123   size_t regions_to   = MIN2(num_regions(), _bitmap_regions_per_slice * (slice + 1));
2124   for (size_t g = regions_from; g < regions_to; g++) {
2125     assert (g / _bitmap_regions_per_slice == slice, "same slice");
2126     if (skip_self && g == r->index()) continue;
2127     if (get_region(g)->is_committed()) {
2128       return true;
2129     }

2177   }
2178 
2179   // Uncommit the bitmap slice:
2180   size_t slice = r->index() / _bitmap_regions_per_slice;
2181   size_t off = _bitmap_bytes_per_slice * slice;
2182   size_t len = _bitmap_bytes_per_slice;
2183   if (!os::uncommit_memory((char*)_bitmap_region.start() + off, len)) {
2184     return false;
2185   }
2186   return true;
2187 }
2188 
2189 void ShenandoahHeap::safepoint_synchronize_begin() {
2190   SuspendibleThreadSet::synchronize();
2191 }
2192 
2193 void ShenandoahHeap::safepoint_synchronize_end() {
2194   SuspendibleThreadSet::desynchronize();
2195 }
2196 
2197 void ShenandoahHeap::entry_uncommit(double shrink_before, size_t shrink_until) {
2198   static const char *msg = "Concurrent uncommit";
2199   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_uncommit, true /* log_heap_usage */);
2200   EventMark em("%s", msg);
2201 
2202   op_uncommit(shrink_before, shrink_until);
2203 }
2204 
2205 void ShenandoahHeap::try_inject_alloc_failure() {
2206   if (ShenandoahAllocFailureALot && !cancelled_gc() && ((os::random() % 1000) > 950)) {
2207     _inject_alloc_failure.set();
2208     os::naked_short_sleep(1);
2209     if (cancelled_gc()) {
2210       log_info(gc)("Allocation failure was successfully injected");
2211     }
2212   }
2213 }
2214 
2215 bool ShenandoahHeap::should_inject_alloc_failure() {
2216   return _inject_alloc_failure.is_set() && _inject_alloc_failure.try_unset();
2217 }
2218 
2219 void ShenandoahHeap::initialize_serviceability() {
2220   _memory_pool = new ShenandoahMemoryPool(this);
2221   _cycle_memory_manager.add_pool(_memory_pool);
2222   _stw_memory_manager.add_pool(_memory_pool);
2223 }
2224 
2225 GrowableArray<GCMemoryManager*> ShenandoahHeap::memory_managers() {
2226   GrowableArray<GCMemoryManager*> memory_managers(2);
2227   memory_managers.append(&_cycle_memory_manager);
2228   memory_managers.append(&_stw_memory_manager);
2229   return memory_managers;
2230 }
2231 
2232 GrowableArray<MemoryPool*> ShenandoahHeap::memory_pools() {
2233   GrowableArray<MemoryPool*> memory_pools(1);
2234   memory_pools.append(_memory_pool);
2235   return memory_pools;
2236 }
2237 
2238 MemoryUsage ShenandoahHeap::memory_usage() {
2239   return _memory_pool->get_memory_usage();
2240 }
2241 
2242 ShenandoahRegionIterator::ShenandoahRegionIterator() :
2243   _heap(ShenandoahHeap::heap()),
2244   _index(0) {}
2245 
2246 ShenandoahRegionIterator::ShenandoahRegionIterator(ShenandoahHeap* heap) :
2247   _heap(heap),
2248   _index(0) {}
2249 
2250 void ShenandoahRegionIterator::reset() {
2251   _index = 0;
2252 }
2253 
2254 bool ShenandoahRegionIterator::has_next() const {
2255   return _index < _heap->num_regions();
2256 }
2257 
2258 char ShenandoahHeap::gc_state() const {
2259   return _gc_state.raw_value();

2284   }
2285 }
2286 
2287 bool ShenandoahHeap::requires_barriers(stackChunkOop obj) const {
2288   if (is_idle()) return false;
2289 
2290   // Objects allocated after marking start are implicitly alive, don't need any barriers during
2291   // marking phase.
2292   if (is_concurrent_mark_in_progress() &&
2293      !marking_context()->allocated_after_mark_start(obj)) {
2294     return true;
2295   }
2296 
2297   // Can not guarantee obj is deeply good.
2298   if (has_forwarded_objects()) {
2299     return true;
2300   }
2301 
2302   return false;
2303 }























   1 /*
   2  * Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2013, 2022, Red Hat, Inc. All rights reserved.
   4  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
   5  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   6  *
   7  * This code is free software; you can redistribute it and/or modify it
   8  * under the terms of the GNU General Public License version 2 only, as
   9  * published by the Free Software Foundation.
  10  *
  11  * This code is distributed in the hope that it will be useful, but WITHOUT
  12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  14  * version 2 for more details (a copy is included in the LICENSE file that
  15  * accompanied this code).
  16  *
  17  * You should have received a copy of the GNU General Public License version
  18  * 2 along with this work; if not, write to the Free Software Foundation,
  19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20  *
  21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22  * or visit www.oracle.com if you need additional information or have any
  23  * questions.
  24  *
  25  */
  26 
  27 #include "precompiled.hpp"
  28 #include "memory/allocation.hpp"
  29 #include "memory/universe.hpp"
  30 
  31 #include "gc/shared/classUnloadingContext.hpp"
  32 #include "gc/shared/gcArguments.hpp"
  33 #include "gc/shared/gcTimer.hpp"
  34 #include "gc/shared/gcTraceTime.inline.hpp"
  35 #include "gc/shared/locationPrinter.inline.hpp"
  36 #include "gc/shared/memAllocator.hpp"
  37 #include "gc/shared/plab.hpp"
  38 #include "gc/shared/tlab_globals.hpp"
  39 
  40 #include "gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp"
  41 #include "gc/shenandoah/heuristics/shenandoahYoungHeuristics.hpp"
  42 #include "gc/shenandoah/shenandoahAllocRequest.hpp"
  43 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  44 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
  45 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  46 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  47 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
  48 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  49 #include "gc/shenandoah/shenandoahControlThread.hpp"
  50 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  51 #include "gc/shenandoah/shenandoahGenerationalEvacuationTask.hpp"
  52 #include "gc/shenandoah/shenandoahGenerationalHeap.hpp"
  53 #include "gc/shenandoah/shenandoahGlobalGeneration.hpp"
  54 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  55 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  56 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
  57 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  58 #include "gc/shenandoah/shenandoahInitLogger.hpp"
  59 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  60 #include "gc/shenandoah/shenandoahMemoryPool.hpp"

  61 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  62 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
  63 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  64 #include "gc/shenandoah/shenandoahPacer.inline.hpp"
  65 #include "gc/shenandoah/shenandoahPadding.hpp"
  66 #include "gc/shenandoah/shenandoahParallelCleaning.inline.hpp"
  67 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
  68 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
  69 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
  70 #include "gc/shenandoah/shenandoahSTWMark.hpp"
  71 #include "gc/shenandoah/shenandoahUtils.hpp"
  72 #include "gc/shenandoah/shenandoahVerifier.hpp"
  73 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
  74 #include "gc/shenandoah/shenandoahVMOperations.hpp"
  75 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
  76 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  77 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
  78 #include "gc/shenandoah/mode/shenandoahGenerationalMode.hpp"
  79 #include "gc/shenandoah/mode/shenandoahIUMode.hpp"
  80 #include "gc/shenandoah/mode/shenandoahPassiveMode.hpp"
  81 #include "gc/shenandoah/mode/shenandoahSATBMode.hpp"
  82 #include "utilities/globalDefinitions.hpp"
  83 
  84 #if INCLUDE_JFR
  85 #include "gc/shenandoah/shenandoahJfrSupport.hpp"
  86 #endif
  87 
  88 #include "classfile/systemDictionary.hpp"
  89 #include "code/codeCache.hpp"
  90 #include "memory/classLoaderMetaspace.hpp"
  91 #include "memory/metaspaceUtils.hpp"
  92 #include "oops/compressedOops.inline.hpp"
  93 #include "prims/jvmtiTagMap.hpp"
  94 #include "runtime/atomic.hpp"
  95 #include "runtime/globals.hpp"
  96 #include "runtime/interfaceSupport.inline.hpp"
  97 #include "runtime/java.hpp"
  98 #include "runtime/orderAccess.hpp"
  99 #include "runtime/safepointMechanism.hpp"
 100 #include "runtime/vmThread.hpp"
 101 #include "services/mallocTracker.hpp"
 102 #include "services/memTracker.hpp"
 103 #include "utilities/events.hpp"

 155 jint ShenandoahHeap::initialize() {
 156   //
 157   // Figure out heap sizing
 158   //
 159 
 160   size_t init_byte_size = InitialHeapSize;
 161   size_t min_byte_size  = MinHeapSize;
 162   size_t max_byte_size  = MaxHeapSize;
 163   size_t heap_alignment = HeapAlignment;
 164 
 165   size_t reg_size_bytes = ShenandoahHeapRegion::region_size_bytes();
 166 
 167   Universe::check_alignment(max_byte_size,  reg_size_bytes, "Shenandoah heap");
 168   Universe::check_alignment(init_byte_size, reg_size_bytes, "Shenandoah heap");
 169 
 170   _num_regions = ShenandoahHeapRegion::region_count();
 171   assert(_num_regions == (max_byte_size / reg_size_bytes),
 172          "Regions should cover entire heap exactly: " SIZE_FORMAT " != " SIZE_FORMAT "/" SIZE_FORMAT,
 173          _num_regions, max_byte_size, reg_size_bytes);
 174 



 175   size_t num_committed_regions = init_byte_size / reg_size_bytes;
 176   num_committed_regions = MIN2(num_committed_regions, _num_regions);
 177   assert(num_committed_regions <= _num_regions, "sanity");
 178   _initial_size = num_committed_regions * reg_size_bytes;
 179 
 180   size_t num_min_regions = min_byte_size / reg_size_bytes;
 181   num_min_regions = MIN2(num_min_regions, _num_regions);
 182   assert(num_min_regions <= _num_regions, "sanity");
 183   _minimum_size = num_min_regions * reg_size_bytes;
 184 
 185   // Default to max heap size.
 186   _soft_max_size = _num_regions * reg_size_bytes;
 187 
 188   _committed = _initial_size;
 189 
 190   size_t heap_page_size   = UseLargePages ? os::large_page_size() : os::vm_page_size();
 191   size_t bitmap_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
 192   size_t region_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
 193 
 194   //
 195   // Reserve and commit memory for heap
 196   //
 197 
 198   ReservedHeapSpace heap_rs = Universe::reserve_heap(max_byte_size, heap_alignment);
 199   initialize_reserved_region(heap_rs);
 200   _heap_region = MemRegion((HeapWord*)heap_rs.base(), heap_rs.size() / HeapWordSize);
 201   _heap_region_special = heap_rs.special();
 202 
 203   assert((((size_t) base()) & ShenandoahHeapRegion::region_size_bytes_mask()) == 0,
 204          "Misaligned heap: " PTR_FORMAT, p2i(base()));
 205   os::trace_page_sizes_for_requested_size("Heap",
 206                                           max_byte_size, heap_rs.page_size(), heap_alignment,
 207                                           heap_rs.base(), heap_rs.size());
 208 
 209 #if SHENANDOAH_OPTIMIZED_MARKTASK
 210   // The optimized ShenandoahMarkTask takes some bits away from the full object bits.
 211   // Fail if we ever attempt to address more than we can.
 212   if ((uintptr_t)heap_rs.end() >= ShenandoahMarkTask::max_addressable()) {
 213     FormatBuffer<512> buf("Shenandoah reserved [" PTR_FORMAT ", " PTR_FORMAT") for the heap, \n"
 214                           "but max object address is " PTR_FORMAT ". Try to reduce heap size, or try other \n"
 215                           "VM options that allocate heap at lower addresses (HeapBaseMinAddress, AllocateHeapAt, etc).",
 216                 p2i(heap_rs.base()), p2i(heap_rs.end()), ShenandoahMarkTask::max_addressable());
 217     vm_exit_during_initialization("Fatal Error", buf);
 218   }
 219 #endif
 220 
 221   ReservedSpace sh_rs = heap_rs.first_part(max_byte_size);
 222   if (!_heap_region_special) {
 223     os::commit_memory_or_exit(sh_rs.base(), _initial_size, heap_alignment, false,
 224                               "Cannot commit heap memory");
 225   }
 226 
 227   BarrierSet::set_barrier_set(new ShenandoahBarrierSet(this, _heap_region));
 228 
 229   // Now we know the number of regions and heap sizes, initialize the heuristics.
 230   initialize_heuristics();
 231 
 232   assert(_heap_region.byte_size() == heap_rs.size(), "Need to know reserved size for card table");
 233 
 234   //
 235   // Worker threads must be initialized after the barrier is configured
 236   //
 237   _workers = new ShenandoahWorkerThreads("Shenandoah GC Threads", _max_workers);
 238   if (_workers == nullptr) {
 239     vm_exit_during_initialization("Failed necessary allocation.");
 240   } else {
 241     _workers->initialize_workers();
 242   }
 243 
 244   if (ParallelGCThreads > 1) {
 245     _safepoint_workers = new ShenandoahWorkerThreads("Safepoint Cleanup Thread", ParallelGCThreads);
 246     _safepoint_workers->initialize_workers();
 247   }
 248 
 249   //
 250   // Reserve and commit memory for bitmap(s)
 251   //
 252 
 253   size_t bitmap_size_orig = ShenandoahMarkBitMap::compute_size(heap_rs.size());
 254   _bitmap_size = align_up(bitmap_size_orig, bitmap_page_size);
 255 
 256   size_t bitmap_bytes_per_region = reg_size_bytes / ShenandoahMarkBitMap::heap_map_factor();
 257 
 258   guarantee(bitmap_bytes_per_region != 0,
 259             "Bitmap bytes per region should not be zero");
 260   guarantee(is_power_of_2(bitmap_bytes_per_region),
 261             "Bitmap bytes per region should be power of two: " SIZE_FORMAT, bitmap_bytes_per_region);
 262 
 263   if (bitmap_page_size > bitmap_bytes_per_region) {
 264     _bitmap_regions_per_slice = bitmap_page_size / bitmap_bytes_per_region;
 265     _bitmap_bytes_per_slice = bitmap_page_size;
 266   } else {
 267     _bitmap_regions_per_slice = 1;
 268     _bitmap_bytes_per_slice = bitmap_bytes_per_region;
 269   }
 270 
 271   guarantee(_bitmap_regions_per_slice >= 1,
 272             "Should have at least one region per slice: " SIZE_FORMAT,
 273             _bitmap_regions_per_slice);
 274 
 275   guarantee(((_bitmap_bytes_per_slice) % bitmap_page_size) == 0,
 276             "Bitmap slices should be page-granular: bps = " SIZE_FORMAT ", page size = " SIZE_FORMAT,
 277             _bitmap_bytes_per_slice, bitmap_page_size);
 278 
 279   ReservedSpace bitmap(_bitmap_size, bitmap_page_size);
 280   os::trace_page_sizes_for_requested_size("Mark Bitmap",
 281                                           bitmap_size_orig, bitmap.page_size(), bitmap_page_size,
 282                                           bitmap.base(),
 283                                           bitmap.size());
 284   MemTracker::record_virtual_memory_type(bitmap.base(), mtGC);
 285   _bitmap_region = MemRegion((HeapWord*) bitmap.base(), bitmap.size() / HeapWordSize);
 286   _bitmap_region_special = bitmap.special();
 287 
 288   size_t bitmap_init_commit = _bitmap_bytes_per_slice *
 289     align_up(num_committed_regions, _bitmap_regions_per_slice) / _bitmap_regions_per_slice;
 290   bitmap_init_commit = MIN2(_bitmap_size, bitmap_init_commit);
 291   if (!_bitmap_region_special) {
 292     os::commit_memory_or_exit((char *) _bitmap_region.start(), bitmap_init_commit, bitmap_page_size, false,
 293                               "Cannot commit bitmap memory");
 294   }
 295 
 296   _marking_context = new ShenandoahMarkingContext(_heap_region, _bitmap_region, _num_regions);
 297 
 298   if (ShenandoahVerify) {
 299     ReservedSpace verify_bitmap(_bitmap_size, bitmap_page_size);
 300     os::trace_page_sizes_for_requested_size("Verify Bitmap",
 301                                             bitmap_size_orig, verify_bitmap.page_size(), bitmap_page_size,
 302                                             verify_bitmap.base(),
 303                                             verify_bitmap.size());
 304     if (!verify_bitmap.special()) {
 305       os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), bitmap_page_size, false,
 306                                 "Cannot commit verification bitmap memory");
 307     }
 308     MemTracker::record_virtual_memory_type(verify_bitmap.base(), mtGC);
 309     MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize);
 310     _verification_bit_map.initialize(_heap_region, verify_bitmap_region);
 311     _verifier = new ShenandoahVerifier(this, &_verification_bit_map);
 312   }
 313 
 314   // Reserve aux bitmap for use in object_iterate(). We don't commit it here.
 315   size_t aux_bitmap_page_size = bitmap_page_size;
 316 
 317   ReservedSpace aux_bitmap(_bitmap_size, aux_bitmap_page_size);
 318   os::trace_page_sizes_for_requested_size("Aux Bitmap",
 319                                           bitmap_size_orig, aux_bitmap.page_size(), aux_bitmap_page_size,
 320                                           aux_bitmap.base(), aux_bitmap.size());
 321   MemTracker::record_virtual_memory_type(aux_bitmap.base(), mtGC);
 322   _aux_bitmap_region = MemRegion((HeapWord*) aux_bitmap.base(), aux_bitmap.size() / HeapWordSize);
 323   _aux_bitmap_region_special = aux_bitmap.special();
 324   _aux_bit_map.initialize(_heap_region, _aux_bitmap_region);
 325 
 326   //
 327   // Create regions and region sets
 328   //
 329   size_t region_align = align_up(sizeof(ShenandoahHeapRegion), SHENANDOAH_CACHE_LINE_SIZE);
 330   size_t region_storage_size_orig = region_align * _num_regions;
 331   size_t region_storage_size = align_up(region_storage_size_orig,
 332                                         MAX2(region_page_size, os::vm_allocation_granularity()));
 333 
 334   ReservedSpace region_storage(region_storage_size, region_page_size);
 335   os::trace_page_sizes_for_requested_size("Region Storage",
 336                                           region_storage_size_orig, region_storage.page_size(), region_page_size,
 337                                           region_storage.base(), region_storage.size());
 338   MemTracker::record_virtual_memory_type(region_storage.base(), mtGC);
 339   if (!region_storage.special()) {
 340     os::commit_memory_or_exit(region_storage.base(), region_storage_size, region_page_size, false,
 341                               "Cannot commit region memory");
 342   }
 343 
 344   // Try to fit the collection set bitmap at lower addresses. This optimizes code generation for cset checks.
 345   // Go up until a sensible limit (subject to encoding constraints) and try to reserve the space there.
 346   // If not successful, bite a bullet and allocate at whatever address.
 347   {
 348     const size_t cset_align = MAX2<size_t>(os::vm_page_size(), os::vm_allocation_granularity());
 349     const size_t cset_size = align_up(((size_t) sh_rs.base() + sh_rs.size()) >> ShenandoahHeapRegion::region_size_bytes_shift(), cset_align);
 350     const size_t cset_page_size = os::vm_page_size();
 351 
 352     uintptr_t min = round_up_power_of_2(cset_align);
 353     uintptr_t max = (1u << 30u);
 354     ReservedSpace cset_rs;
 355 
 356     for (uintptr_t addr = min; addr <= max; addr <<= 1u) {
 357       char* req_addr = (char*)addr;
 358       assert(is_aligned(req_addr, cset_align), "Should be aligned");
 359       cset_rs = ReservedSpace(cset_size, cset_align, cset_page_size, req_addr);
 360       if (cset_rs.is_reserved()) {
 361         assert(cset_rs.base() == req_addr, "Allocated where requested: " PTR_FORMAT ", " PTR_FORMAT, p2i(cset_rs.base()), addr);
 362         _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
 363         break;
 364       }
 365     }
 366 
 367     if (_collection_set == nullptr) {
 368       cset_rs = ReservedSpace(cset_size, cset_align, os::vm_page_size());
 369       _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
 370     }
 371     os::trace_page_sizes_for_requested_size("Collection Set",
 372                                             cset_size, cset_rs.page_size(), cset_page_size,
 373                                             cset_rs.base(),
 374                                             cset_rs.size());
 375   }
 376 
 377   _regions = NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, _num_regions, mtGC);
 378   _affiliations = NEW_C_HEAP_ARRAY(uint8_t, _num_regions, mtGC);
 379   _free_set = new ShenandoahFreeSet(this, _num_regions);
 380 
 381   {
 382     ShenandoahHeapLocker locker(lock());
 383 
 384     for (size_t i = 0; i < _num_regions; i++) {
 385       HeapWord* start = (HeapWord*)sh_rs.base() + ShenandoahHeapRegion::region_size_words() * i;
 386       bool is_committed = i < num_committed_regions;
 387       void* loc = region_storage.base() + i * region_align;
 388 
 389       ShenandoahHeapRegion* r = new (loc) ShenandoahHeapRegion(start, i, is_committed);
 390       assert(is_aligned(r, SHENANDOAH_CACHE_LINE_SIZE), "Sanity");
 391 
 392       _marking_context->initialize_top_at_mark_start(r);
 393       _regions[i] = r;
 394       assert(!collection_set()->is_in(i), "New region should not be in collection set");
 395 
 396       _affiliations[i] = ShenandoahAffiliation::FREE;
 397     }
 398 
 399     // Initialize to complete
 400     _marking_context->mark_complete();
 401     size_t young_cset_regions, old_cset_regions;
 402 
 403     // We are initializing free set.  We ignore cset region tallies.
 404     size_t first_old, last_old, num_old;
 405     _free_set->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old, last_old, num_old);
 406     _free_set->finish_rebuild(young_cset_regions, old_cset_regions, num_old);
 407   }
 408 
 409   if (AlwaysPreTouch) {
 410     // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads,
 411     // before initialize() below zeroes it with initializing thread. For any given region,
 412     // we touch the region and the corresponding bitmaps from the same thread.
 413     ShenandoahPushWorkerScope scope(workers(), _max_workers, false);
 414 
 415     _pretouch_heap_page_size = heap_page_size;
 416     _pretouch_bitmap_page_size = bitmap_page_size;
 417 










 418     // OS memory managers may want to coalesce back-to-back pages. Make their jobs
 419     // simpler by pre-touching continuous spaces (heap and bitmap) separately.
 420 
 421     ShenandoahPretouchBitmapTask bcl(bitmap.base(), _bitmap_size, _pretouch_bitmap_page_size);
 422     _workers->run_task(&bcl);
 423 
 424     ShenandoahPretouchHeapTask hcl(_pretouch_heap_page_size);
 425     _workers->run_task(&hcl);
 426   }
 427 
 428   //
 429   // Initialize the rest of GC subsystems
 430   //
 431 
 432   _liveness_cache = NEW_C_HEAP_ARRAY(ShenandoahLiveData*, _max_workers, mtGC);
 433   for (uint worker = 0; worker < _max_workers; worker++) {
 434     _liveness_cache[worker] = NEW_C_HEAP_ARRAY(ShenandoahLiveData, _num_regions, mtGC);
 435     Copy::fill_to_bytes(_liveness_cache[worker], _num_regions * sizeof(ShenandoahLiveData));
 436   }
 437 
 438   // There should probably be Shenandoah-specific options for these,
 439   // just as there are G1-specific options.
 440   {
 441     ShenandoahSATBMarkQueueSet& satbqs = ShenandoahBarrierSet::satb_mark_queue_set();
 442     satbqs.set_process_completed_buffers_threshold(20); // G1SATBProcessCompletedThreshold
 443     satbqs.set_buffer_enqueue_threshold_percentage(60); // G1SATBBufferEnqueueingThresholdPercent
 444   }
 445 
 446   _monitoring_support = new ShenandoahMonitoringSupport(this);
 447   _phase_timings = new ShenandoahPhaseTimings(max_workers());
 448   ShenandoahCodeRoots::initialize();
 449 
 450   if (ShenandoahPacing) {
 451     _pacer = new ShenandoahPacer(this);
 452     _pacer->setup_for_idle();


 453   }
 454 
 455   initialize_controller();
 456 
 457   print_init_logger();
 458 
 459   return JNI_OK;
 460 }
 461 
 462 void ShenandoahHeap::initialize_controller() {
 463   _control_thread = new ShenandoahControlThread();
 464 }
 465 
 466 void ShenandoahHeap::print_init_logger() const {
 467   ShenandoahInitLogger::print();
 468 }
 469 
 470 void ShenandoahHeap::initialize_mode() {
 471   if (ShenandoahGCMode != nullptr) {
 472     if (strcmp(ShenandoahGCMode, "satb") == 0) {
 473       _gc_mode = new ShenandoahSATBMode();
 474     } else if (strcmp(ShenandoahGCMode, "iu") == 0) {
 475       _gc_mode = new ShenandoahIUMode();
 476     } else if (strcmp(ShenandoahGCMode, "passive") == 0) {
 477       _gc_mode = new ShenandoahPassiveMode();
 478     } else if (strcmp(ShenandoahGCMode, "generational") == 0) {
 479       _gc_mode = new ShenandoahGenerationalMode();
 480     } else {
 481       vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option");
 482     }
 483   } else {
 484     vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option (null)");
 485   }
 486   _gc_mode->initialize_flags();
 487   if (_gc_mode->is_diagnostic() && !UnlockDiagnosticVMOptions) {
 488     vm_exit_during_initialization(
 489             err_msg("GC mode \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
 490                     _gc_mode->name()));
 491   }
 492   if (_gc_mode->is_experimental() && !UnlockExperimentalVMOptions) {
 493     vm_exit_during_initialization(
 494             err_msg("GC mode \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
 495                     _gc_mode->name()));
 496   }
 497 }
 498 
 499 void ShenandoahHeap::initialize_heuristics() {
 500   _global_generation = new ShenandoahGlobalGeneration(mode()->is_generational(), max_workers(), max_capacity(), max_capacity());
 501   _global_generation->initialize_heuristics(mode());











 502 }
 503 
 504 #ifdef _MSC_VER
 505 #pragma warning( push )
 506 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
 507 #endif
 508 
 509 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
 510   CollectedHeap(),
 511   _gc_generation(nullptr),
 512   _active_generation(nullptr),
 513   _initial_size(0),

 514   _committed(0),
 515   _max_workers(MAX3(ConcGCThreads, ParallelGCThreads, 1U)),

 516   _workers(nullptr),
 517   _safepoint_workers(nullptr),
 518   _heap_region_special(false),
 519   _num_regions(0),
 520   _regions(nullptr),
 521   _affiliations(nullptr),
 522   _gc_state_changed(false),
 523   _gc_no_progress_count(0),
 524   _cancel_requested_time(0),
 525   _update_refs_iterator(this),
 526   _global_generation(nullptr),
 527   _control_thread(nullptr),
 528   _young_generation(nullptr),
 529   _old_generation(nullptr),
 530   _shenandoah_policy(policy),
 531   _gc_mode(nullptr),

 532   _free_set(nullptr),
 533   _pacer(nullptr),
 534   _verifier(nullptr),
 535   _phase_timings(nullptr),
 536   _mmu_tracker(),
 537   _monitoring_support(nullptr),
 538   _memory_pool(nullptr),
 539   _stw_memory_manager("Shenandoah Pauses"),
 540   _cycle_memory_manager("Shenandoah Cycles"),
 541   _gc_timer(new ConcurrentGCTimer()),
 542   _soft_ref_policy(),
 543   _log_min_obj_alignment_in_bytes(LogMinObjAlignmentInBytes),

 544   _marking_context(nullptr),
 545   _bitmap_size(0),
 546   _bitmap_regions_per_slice(0),
 547   _bitmap_bytes_per_slice(0),
 548   _bitmap_region_special(false),
 549   _aux_bitmap_region_special(false),
 550   _liveness_cache(nullptr),
 551   _collection_set(nullptr)
 552 {
 553   // Initialize GC mode early, many subsequent initialization procedures depend on it
 554   initialize_mode();















 555 }
 556 
 557 #ifdef _MSC_VER
 558 #pragma warning( pop )
 559 #endif
 560 





























 561 void ShenandoahHeap::print_on(outputStream* st) const {
 562   st->print_cr("Shenandoah Heap");
 563   st->print_cr(" " SIZE_FORMAT "%s max, " SIZE_FORMAT "%s soft max, " SIZE_FORMAT "%s committed, " SIZE_FORMAT "%s used",
 564                byte_size_in_proper_unit(max_capacity()), proper_unit_for_byte_size(max_capacity()),
 565                byte_size_in_proper_unit(soft_max_capacity()), proper_unit_for_byte_size(soft_max_capacity()),
 566                byte_size_in_proper_unit(committed()),    proper_unit_for_byte_size(committed()),
 567                byte_size_in_proper_unit(used()),         proper_unit_for_byte_size(used()));
 568   st->print_cr(" " SIZE_FORMAT " x " SIZE_FORMAT"%s regions",
 569                num_regions(),
 570                byte_size_in_proper_unit(ShenandoahHeapRegion::region_size_bytes()),
 571                proper_unit_for_byte_size(ShenandoahHeapRegion::region_size_bytes()));
 572 
 573   st->print("Status: ");
 574   if (has_forwarded_objects())                 st->print("has forwarded objects, ");
 575   if (is_concurrent_old_mark_in_progress())    st->print("old marking, ");
 576   if (is_concurrent_young_mark_in_progress())  st->print("young marking, ");
 577   if (is_evacuation_in_progress())             st->print("evacuating, ");
 578   if (is_update_refs_in_progress())            st->print("updating refs, ");
 579   if (is_degenerated_gc_in_progress())         st->print("degenerated gc, ");
 580   if (is_full_gc_in_progress())                st->print("full gc, ");
 581   if (is_full_gc_move_in_progress())           st->print("full gc move, ");
 582   if (is_concurrent_weak_root_in_progress())   st->print("concurrent weak roots, ");
 583   if (is_concurrent_strong_root_in_progress() &&
 584       !is_concurrent_weak_root_in_progress())  st->print("concurrent strong roots, ");
 585 
 586   if (cancelled_gc()) {
 587     st->print("cancelled");
 588   } else {
 589     st->print("not cancelled");
 590   }
 591   st->cr();
 592 
 593   st->print_cr("Reserved region:");
 594   st->print_cr(" - [" PTR_FORMAT ", " PTR_FORMAT ") ",
 595                p2i(reserved_region().start()),
 596                p2i(reserved_region().end()));

 607   st->cr();
 608   MetaspaceUtils::print_on(st);
 609 
 610   if (Verbose) {
 611     st->cr();
 612     print_heap_regions_on(st);
 613   }
 614 }
 615 
 616 class ShenandoahInitWorkerGCLABClosure : public ThreadClosure {
 617 public:
 618   void do_thread(Thread* thread) {
 619     assert(thread != nullptr, "Sanity");
 620     assert(thread->is_Worker_thread(), "Only worker thread expected");
 621     ShenandoahThreadLocalData::initialize_gclab(thread);
 622   }
 623 };
 624 
 625 void ShenandoahHeap::post_initialize() {
 626   CollectedHeap::post_initialize();
 627   _mmu_tracker.initialize();
 628 
 629   MutexLocker ml(Threads_lock);
 630 
 631   ShenandoahInitWorkerGCLABClosure init_gclabs;
 632   _workers->threads_do(&init_gclabs);
 633 
 634   // gclab can not be initialized early during VM startup, as it can not determinate its max_size.
 635   // Now, we will let WorkerThreads to initialize gclab when new worker is created.
 636   _workers->set_initialize_gclab();
 637   if (_safepoint_workers != nullptr) {
 638     _safepoint_workers->threads_do(&init_gclabs);
 639     _safepoint_workers->set_initialize_gclab();
 640   }
 641 


 642   JFR_ONLY(ShenandoahJFRSupport::register_jfr_type_serializers());
 643 }
 644 
 645 ShenandoahHeuristics* ShenandoahHeap::heuristics() {
 646   return _global_generation->heuristics();
 647 }
 648 
 649 size_t ShenandoahHeap::used() const {
 650   return global_generation()->used();
 651 }
 652 
 653 size_t ShenandoahHeap::committed() const {
 654   return Atomic::load(&_committed);
 655 }
 656 
 657 void ShenandoahHeap::increase_committed(size_t bytes) {
 658   shenandoah_assert_heaplocked_or_safepoint();
 659   _committed += bytes;
 660 }
 661 
 662 void ShenandoahHeap::decrease_committed(size_t bytes) {
 663   shenandoah_assert_heaplocked_or_safepoint();
 664   _committed -= bytes;
 665 }
 666 
 667 // For tracking usage based on allocations, it should be the case that:
 668 // * The sum of regions::used == heap::used
 669 // * The sum of a generation's regions::used == generation::used
 670 // * The sum of a generation's humongous regions::free == generation::humongous_waste
 671 // These invariants are checked by the verifier on GC safepoints.
 672 //
 673 // Additional notes:
 674 // * When a mutator's allocation request causes a region to be retired, the
 675 //   free memory left in that region is considered waste. It does not contribute
 676 //   to the usage, but it _does_ contribute to allocation rate.
 677 // * The bottom of a PLAB must be aligned on card size. In some cases this will
 678 //   require padding in front of the PLAB (a filler object). Because this padding
 679 //   is included in the region's used memory we include the padding in the usage
 680 //   accounting as waste.
 681 // * Mutator allocations are used to compute an allocation rate. They are also
 682 //   sent to the Pacer for those purposes.
 683 // * There are three sources of waste:
 684 //  1. The padding used to align a PLAB on card size
 685 //  2. Region's free is less than minimum TLAB size and is retired
 686 //  3. The unused portion of memory in the last region of a humongous object
 687 void ShenandoahHeap::increase_used(const ShenandoahAllocRequest& req) {
 688   size_t actual_bytes = req.actual_size() * HeapWordSize;
 689   size_t wasted_bytes = req.waste() * HeapWordSize;
 690   ShenandoahGeneration* generation = generation_for(req.affiliation());
 691 
 692   if (req.is_gc_alloc()) {
 693     assert(wasted_bytes == 0 || req.type() == ShenandoahAllocRequest::_alloc_plab, "Only PLABs have waste");
 694     increase_used(generation, actual_bytes + wasted_bytes);
 695   } else {
 696     assert(req.is_mutator_alloc(), "Expected mutator alloc here");
 697     // padding and actual size both count towards allocation counter
 698     generation->increase_allocated(actual_bytes + wasted_bytes);
 699 
 700     // only actual size counts toward usage for mutator allocations
 701     increase_used(generation, actual_bytes);
 702 
 703     // notify pacer of both actual size and waste
 704     notify_mutator_alloc_words(req.actual_size(), req.waste());
 705 
 706     if (wasted_bytes > 0 && ShenandoahHeapRegion::requires_humongous(req.actual_size())) {
 707       increase_humongous_waste(generation,wasted_bytes);
 708     }
 709   }
 710 }
 711 
 712 void ShenandoahHeap::increase_humongous_waste(ShenandoahGeneration* generation, size_t bytes) {
 713   generation->increase_humongous_waste(bytes);
 714   if (!generation->is_global()) {
 715     global_generation()->increase_humongous_waste(bytes);
 716   }
 717 }
 718 
 719 void ShenandoahHeap::decrease_humongous_waste(ShenandoahGeneration* generation, size_t bytes) {
 720   generation->decrease_humongous_waste(bytes);
 721   if (!generation->is_global()) {
 722     global_generation()->decrease_humongous_waste(bytes);
 723   }
 724 }
 725 
 726 void ShenandoahHeap::increase_used(ShenandoahGeneration* generation, size_t bytes) {
 727   generation->increase_used(bytes);
 728   if (!generation->is_global()) {
 729     global_generation()->increase_used(bytes);
 730   }
 731 }
 732 
 733 void ShenandoahHeap::decrease_used(ShenandoahGeneration* generation, size_t bytes) {
 734   generation->decrease_used(bytes);
 735   if (!generation->is_global()) {
 736     global_generation()->decrease_used(bytes);
 737   }
 738 }
 739 
 740 void ShenandoahHeap::notify_mutator_alloc_words(size_t words, size_t waste) {
 741   if (ShenandoahPacing) {
 742     control_thread()->pacing_notify_alloc(words);
 743     if (waste > 0) {
 744       pacer()->claim_for_alloc<true>(waste);
 745     }
 746   }
 747 }
 748 
 749 size_t ShenandoahHeap::capacity() const {
 750   return committed();
 751 }
 752 
 753 size_t ShenandoahHeap::max_capacity() const {
 754   return _num_regions * ShenandoahHeapRegion::region_size_bytes();
 755 }
 756 
 757 size_t ShenandoahHeap::soft_max_capacity() const {
 758   size_t v = Atomic::load(&_soft_max_size);
 759   assert(min_capacity() <= v && v <= max_capacity(),
 760          "Should be in bounds: " SIZE_FORMAT " <= " SIZE_FORMAT " <= " SIZE_FORMAT,
 761          min_capacity(), v, max_capacity());
 762   return v;
 763 }
 764 
 765 void ShenandoahHeap::set_soft_max_capacity(size_t v) {
 766   assert(min_capacity() <= v && v <= max_capacity(),
 767          "Should be in bounds: " SIZE_FORMAT " <= " SIZE_FORMAT " <= " SIZE_FORMAT,
 768          min_capacity(), v, max_capacity());
 769   Atomic::store(&_soft_max_size, v);
 770 }
 771 
 772 size_t ShenandoahHeap::min_capacity() const {
 773   return _minimum_size;
 774 }
 775 
 776 size_t ShenandoahHeap::initial_capacity() const {
 777   return _initial_size;
 778 }
 779 
 780 void ShenandoahHeap::maybe_uncommit(double shrink_before, size_t shrink_until) {
 781   assert (ShenandoahUncommit, "should be enabled");
 782 
 783   // Determine if there is work to do. This avoids taking heap lock if there is
 784   // no work available, avoids spamming logs with superfluous logging messages,
 785   // and minimises the amount of work while locks are taken.
 786 
 787   if (committed() <= shrink_until) return;
 788 
 789   bool has_work = false;
 790   for (size_t i = 0; i < num_regions(); i++) {
 791     ShenandoahHeapRegion* r = get_region(i);
 792     if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
 793       has_work = true;
 794       break;
 795     }
 796   }
 797 
 798   if (has_work) {
 799     static const char* msg = "Concurrent uncommit";
 800     ShenandoahConcurrentPhase gcPhase(msg, ShenandoahPhaseTimings::conc_uncommit, true /* log_heap_usage */);
 801     EventMark em("%s", msg);
 802 
 803     op_uncommit(shrink_before, shrink_until);
 804   }
 805 }
 806 
 807 void ShenandoahHeap::op_uncommit(double shrink_before, size_t shrink_until) {
 808   assert (ShenandoahUncommit, "should be enabled");
 809 
 810   // Application allocates from the beginning of the heap, and GC allocates at
 811   // the end of it. It is more efficient to uncommit from the end, so that applications
 812   // could enjoy the near committed regions. GC allocations are much less frequent,
 813   // and therefore can accept the committing costs.
 814 
 815   size_t count = 0;
 816   for (size_t i = num_regions(); i > 0; i--) { // care about size_t underflow
 817     ShenandoahHeapRegion* r = get_region(i - 1);
 818     if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
 819       ShenandoahHeapLocker locker(lock());
 820       if (r->is_empty_committed()) {
 821         if (committed() < shrink_until + ShenandoahHeapRegion::region_size_bytes()) {
 822           break;
 823         }
 824 
 825         r->make_uncommitted();
 826         count++;
 827       }
 828     }
 829     SpinPause(); // allow allocators to take the lock
 830   }
 831 
 832   if (count > 0) {
 833     notify_heap_changed();
 834   }
 835 }
 836 
 837 bool ShenandoahHeap::check_soft_max_changed() {
 838   size_t new_soft_max = Atomic::load(&SoftMaxHeapSize);
 839   size_t old_soft_max = soft_max_capacity();
 840   if (new_soft_max != old_soft_max) {
 841     new_soft_max = MAX2(min_capacity(), new_soft_max);
 842     new_soft_max = MIN2(max_capacity(), new_soft_max);
 843     if (new_soft_max != old_soft_max) {
 844       log_info(gc)("Soft Max Heap Size: " SIZE_FORMAT "%s -> " SIZE_FORMAT "%s",
 845                    byte_size_in_proper_unit(old_soft_max), proper_unit_for_byte_size(old_soft_max),
 846                    byte_size_in_proper_unit(new_soft_max), proper_unit_for_byte_size(new_soft_max)
 847       );
 848       set_soft_max_capacity(new_soft_max);
 849       return true;
 850     }
 851   }
 852   return false;
 853 }
 854 
 855 void ShenandoahHeap::notify_heap_changed() {
 856   // Update monitoring counters when we took a new region. This amortizes the
 857   // update costs on slow path.
 858   monitoring_support()->notify_heap_changed();
 859   _heap_changed.set();
 860 }
 861 
 862 void ShenandoahHeap::set_forced_counters_update(bool value) {
 863   monitoring_support()->set_forced_counters_update(value);
 864 }
 865 
 866 void ShenandoahHeap::handle_force_counters_update() {
 867   monitoring_support()->handle_force_counters_update();
 868 }
 869 
 870 HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) {
 871   // New object should fit the GCLAB size
 872   size_t min_size = MAX2(size, PLAB::min_size());
 873 
 874   // Figure out size of new GCLAB, looking back at heuristics. Expand aggressively.
 875   size_t new_size = ShenandoahThreadLocalData::gclab_size(thread) * 2;
 876 
 877   new_size = MIN2(new_size, PLAB::max_size());
 878   new_size = MAX2(new_size, PLAB::min_size());
 879 
 880   // Record new heuristic value even if we take any shortcut. This captures
 881   // the case when moderately-sized objects always take a shortcut. At some point,
 882   // heuristics should catch up with them.
 883   log_debug(gc, free)("Set new GCLAB size: " SIZE_FORMAT, new_size);
 884   ShenandoahThreadLocalData::set_gclab_size(thread, new_size);
 885 
 886   if (new_size < size) {
 887     // New size still does not fit the object. Fall back to shared allocation.
 888     // This avoids retiring perfectly good GCLABs, when we encounter a large object.
 889     log_debug(gc, free)("New gclab size (" SIZE_FORMAT ") is too small for " SIZE_FORMAT, new_size, size);
 890     return nullptr;
 891   }
 892 
 893   // Retire current GCLAB, and allocate a new one.
 894   PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
 895   gclab->retire();
 896 
 897   size_t actual_size = 0;
 898   HeapWord* gclab_buf = allocate_new_gclab(min_size, new_size, &actual_size);
 899   if (gclab_buf == nullptr) {
 900     return nullptr;
 901   }
 902 
 903   assert (size <= actual_size, "allocation should fit");
 904 
 905   // ...and clear or zap just allocated TLAB, if needed.
 906   if (ZeroTLAB) {
 907     Copy::zero_to_words(gclab_buf, actual_size);
 908   } else if (ZapTLAB) {
 909     // Skip mangling the space corresponding to the object header to
 910     // ensure that the returned space is not considered parsable by
 911     // any concurrent GC thread.
 912     size_t hdr_size = oopDesc::header_size();
 913     Copy::fill_to_words(gclab_buf + hdr_size, actual_size - hdr_size, badHeapWordVal);
 914   }
 915   gclab->set_buf(gclab_buf, actual_size);
 916   return gclab->allocate(size);
 917 }
 918 
 919 // Called from stubs in JIT code or interpreter
 920 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t min_size,
 921                                             size_t requested_size,
 922                                             size_t* actual_size) {
 923   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_tlab(min_size, requested_size);
 924   HeapWord* res = allocate_memory(req);
 925   if (res != nullptr) {
 926     *actual_size = req.actual_size();
 927   } else {
 928     *actual_size = 0;
 929   }
 930   return res;
 931 }
 932 
 933 HeapWord* ShenandoahHeap::allocate_new_gclab(size_t min_size,
 934                                              size_t word_size,
 935                                              size_t* actual_size) {
 936   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_gclab(min_size, word_size);
 937   HeapWord* res = allocate_memory(req);
 938   if (res != nullptr) {
 939     *actual_size = req.actual_size();

 941     *actual_size = 0;
 942   }
 943   return res;
 944 }
 945 
 946 HeapWord* ShenandoahHeap::allocate_memory(ShenandoahAllocRequest& req) {
 947   intptr_t pacer_epoch = 0;
 948   bool in_new_region = false;
 949   HeapWord* result = nullptr;
 950 
 951   if (req.is_mutator_alloc()) {
 952     if (ShenandoahPacing) {
 953       pacer()->pace_for_alloc(req.size());
 954       pacer_epoch = pacer()->epoch();
 955     }
 956 
 957     if (!ShenandoahAllocFailureALot || !should_inject_alloc_failure()) {
 958       result = allocate_memory_under_lock(req, in_new_region);
 959     }
 960 
 961     // Check that gc overhead is not exceeded.
 962     //
 963     // Shenandoah will grind along for quite a while allocating one
 964     // object at a time using shared (non-tlab) allocations. This check
 965     // is testing that the GC overhead limit has not been exceeded.
 966     // This will notify the collector to start a cycle, but will raise
 967     // an OOME to the mutator if the last Full GCs have not made progress.
 968     // gc_no_progress_count is incremented following each degen or full GC that fails to achieve is_good_progress().
 969     if ((result == nullptr) && !req.is_lab_alloc() && (get_gc_no_progress_count() > ShenandoahNoProgressThreshold)) {
 970       control_thread()->handle_alloc_failure(req, false);
 971       req.set_actual_size(0);
 972       return nullptr;
 973     }
 974 
 975     if (result == nullptr) {
 976       // Block until control thread reacted, then retry allocation.
 977       //
 978       // It might happen that one of the threads requesting allocation would unblock
 979       // way later after GC happened, only to fail the second allocation, because
 980       // other threads have already depleted the free storage. In this case, a better
 981       // strategy is to try again, until at least one full GC has completed.
 982       //
 983       // Stop retrying and return nullptr to cause OOMError exception if our allocation failed even after:
 984       //   a) We experienced a GC that had good progress, or
 985       //   b) We experienced at least one Full GC (whether or not it had good progress)
 986 
 987       size_t original_count = shenandoah_policy()->full_gc_count();
 988       while ((result == nullptr) && (original_count == shenandoah_policy()->full_gc_count())) {
 989         control_thread()->handle_alloc_failure(req, true);
 990         result = allocate_memory_under_lock(req, in_new_region);
 991       }
 992       if (result != nullptr) {
 993         // If our allocation request has been satisifed after it initially failed, we count this as good gc progress
 994         notify_gc_progress();
 995       }
 996       if (log_develop_is_enabled(Debug, gc, alloc)) {
 997         ResourceMark rm;
 998         log_debug(gc, alloc)("Thread: %s, Result: " PTR_FORMAT ", Request: %s, Size: " SIZE_FORMAT
 999                              ", Original: " SIZE_FORMAT ", Latest: " SIZE_FORMAT,
1000                              Thread::current()->name(), p2i(result), req.type_string(), req.size(),
1001                              original_count, get_gc_no_progress_count());
1002       }
1003     }
1004   } else {
1005     assert(req.is_gc_alloc(), "Can only accept GC allocs here");
1006     result = allocate_memory_under_lock(req, in_new_region);
1007     // Do not call handle_alloc_failure() here, because we cannot block.
1008     // The allocation failure would be handled by the LRB slowpath with handle_alloc_failure_evac().
1009   }
1010 
1011   if (in_new_region) {
1012     notify_heap_changed();
1013   }
1014 
1015   if (result == nullptr) {
1016     req.set_actual_size(0);
1017   }
1018 
1019   // This is called regardless of the outcome of the allocation to account
1020   // for any waste created by retiring regions with this request.
1021   increase_used(req);
1022 
1023   if (result != nullptr) {
1024     size_t requested = req.size();
1025     size_t actual = req.actual_size();
1026 
1027     assert (req.is_lab_alloc() || (requested == actual),
1028             "Only LAB allocations are elastic: %s, requested = " SIZE_FORMAT ", actual = " SIZE_FORMAT,
1029             ShenandoahAllocRequest::alloc_type_to_string(req.type()), requested, actual);
1030 
1031     if (req.is_mutator_alloc()) {


1032       // If we requested more than we were granted, give the rest back to pacer.
1033       // This only matters if we are in the same pacing epoch: do not try to unpace
1034       // over the budget for the other phase.
1035       if (ShenandoahPacing && (pacer_epoch > 0) && (requested > actual)) {
1036         pacer()->unpace_for_alloc(pacer_epoch, requested - actual);
1037       }


1038     }
1039   }
1040 
1041   return result;
1042 }
1043 
1044 HeapWord* ShenandoahHeap::allocate_memory_under_lock(ShenandoahAllocRequest& req, bool& in_new_region) {
1045   // If we are dealing with mutator allocation, then we may need to block for safepoint.
1046   // We cannot block for safepoint for GC allocations, because there is a high chance
1047   // we are already running at safepoint or from stack watermark machinery, and we cannot
1048   // block again.
1049   ShenandoahHeapLocker locker(lock(), req.is_mutator_alloc());
1050 
1051   // Make sure the old generation has room for either evacuations or promotions before trying to allocate.
1052   if (req.is_old() && !old_generation()->can_allocate(req)) {
1053     return nullptr;
1054   }
1055 
1056   // If TLAB request size is greater than available, allocate() will attempt to downsize request to fit within available
1057   // memory.
1058   HeapWord* result = _free_set->allocate(req, in_new_region);
1059 
1060   // Record the plab configuration for this result and register the object.
1061   if (result != nullptr && req.is_old()) {
1062     old_generation()->configure_plab_for_current_thread(req);
1063     if (req.type() == ShenandoahAllocRequest::_alloc_shared_gc) {
1064       // Register the newly allocated object while we're holding the global lock since there's no synchronization
1065       // built in to the implementation of register_object().  There are potential races when multiple independent
1066       // threads are allocating objects, some of which might span the same card region.  For example, consider
1067       // a card table's memory region within which three objects are being allocated by three different threads:
1068       //
1069       // objects being "concurrently" allocated:
1070       //    [-----a------][-----b-----][--------------c------------------]
1071       //            [---- card table memory range --------------]
1072       //
1073       // Before any objects are allocated, this card's memory range holds no objects.  Note that allocation of object a
1074       // wants to set the starts-object, first-start, and last-start attributes of the preceding card region.
1075       // Allocation of object b wants to set the starts-object, first-start, and last-start attributes of this card region.
1076       // Allocation of object c also wants to set the starts-object, first-start, and last-start attributes of this
1077       // card region.
1078       //
1079       // The thread allocating b and the thread allocating c can "race" in various ways, resulting in confusion, such as
1080       // last-start representing object b while first-start represents object c.  This is why we need to require all
1081       // register_object() invocations to be "mutually exclusive" with respect to each card's memory range.
1082       old_generation()->card_scan()->register_object(result);
1083     }
1084   }
1085 
1086   return result;
1087 }
1088 
1089 HeapWord* ShenandoahHeap::mem_allocate(size_t size,
1090                                         bool*  gc_overhead_limit_was_exceeded) {
1091   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared(size);
1092   return allocate_memory(req);
1093 }
1094 
1095 MetaWord* ShenandoahHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
1096                                                              size_t size,
1097                                                              Metaspace::MetadataType mdtype) {
1098   MetaWord* result;
1099 
1100   // Inform metaspace OOM to GC heuristics if class unloading is possible.
1101   ShenandoahHeuristics* h = global_generation()->heuristics();
1102   if (h->can_unload_classes()) {
1103     h->record_metaspace_oom();
1104   }
1105 
1106   // Expand and retry allocation
1107   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
1108   if (result != nullptr) {
1109     return result;
1110   }
1111 
1112   // Start full GC
1113   collect(GCCause::_metadata_GC_clear_soft_refs);
1114 
1115   // Retry allocation
1116   result = loader_data->metaspace_non_null()->allocate(size, mdtype);
1117   if (result != nullptr) {
1118     return result;
1119   }
1120 
1121   // Expand and retry allocation
1122   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);

1179     while ((r =_cs->claim_next()) != nullptr) {
1180       assert(r->has_live(), "Region " SIZE_FORMAT " should have been reclaimed early", r->index());
1181       _sh->marked_object_iterate(r, &cl);
1182 
1183       if (ShenandoahPacing) {
1184         _sh->pacer()->report_evac(r->used() >> LogHeapWordSize);
1185       }
1186 
1187       if (_sh->check_cancelled_gc_and_yield(_concurrent)) {
1188         break;
1189       }
1190     }
1191   }
1192 };
1193 
1194 void ShenandoahHeap::evacuate_collection_set(bool concurrent) {
1195   ShenandoahEvacuationTask task(this, _collection_set, concurrent);
1196   workers()->run_task(&task);
1197 }
1198 
1199 oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) {
1200   assert(thread == Thread::current(), "Expected thread parameter to be current thread.");
1201   if (ShenandoahThreadLocalData::is_oom_during_evac(thread)) {
1202     // This thread went through the OOM during evac protocol. It is safe to return
1203     // the forward pointer. It must not attempt to evacuate any other objects.
1204     return ShenandoahBarrierSet::resolve_forwarded(p);
1205   }
1206 
1207   assert(ShenandoahThreadLocalData::is_evac_allowed(thread), "must be enclosed in oom-evac scope");
1208 
1209   ShenandoahHeapRegion* r = heap_region_containing(p);
1210   assert(!r->is_humongous(), "never evacuate humongous objects");
1211 
1212   ShenandoahAffiliation target_gen = r->affiliation();
1213   return try_evacuate_object(p, thread, r, target_gen);
1214 }
1215 
1216 oop ShenandoahHeap::try_evacuate_object(oop p, Thread* thread, ShenandoahHeapRegion* from_region,
1217                                                ShenandoahAffiliation target_gen) {
1218   assert(target_gen == YOUNG_GENERATION, "Only expect evacuations to young in this mode");
1219   assert(from_region->is_young(), "Only expect evacuations from young in this mode");
1220   bool alloc_from_lab = true;
1221   HeapWord* copy = nullptr;
1222   size_t size = p->size();
1223 
1224 #ifdef ASSERT
1225   if (ShenandoahOOMDuringEvacALot &&
1226       (os::random() & 1) == 0) { // Simulate OOM every ~2nd slow-path call
1227     copy = nullptr;
1228   } else {
1229 #endif
1230     if (UseTLAB) {
1231       copy = allocate_from_gclab(thread, size);
1232     }
1233     if (copy == nullptr) {
1234       // If we failed to allocate in LAB, we'll try a shared allocation.
1235       ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared_gc(size, target_gen);
1236       copy = allocate_memory(req);
1237       alloc_from_lab = false;
1238     }
1239 #ifdef ASSERT
1240   }
1241 #endif
1242 
1243   if (copy == nullptr) {
1244     control_thread()->handle_alloc_failure_evac(size);
1245 
1246     _oom_evac_handler.handle_out_of_memory_during_evacuation();
1247 
1248     return ShenandoahBarrierSet::resolve_forwarded(p);
1249   }
1250 
1251   // Copy the object:
1252   Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(p), copy, size);
1253 
1254   // Try to install the new forwarding pointer.
1255   oop copy_val = cast_to_oop(copy);
1256   oop result = ShenandoahForwarding::try_update_forwardee(p, copy_val);
1257   if (result == copy_val) {
1258     // Successfully evacuated. Our copy is now the public one!
1259     ContinuationGCSupport::relativize_stack_chunk(copy_val);
1260     shenandoah_assert_correct(nullptr, copy_val);
1261     return copy_val;
1262   }  else {
1263     // Failed to evacuate. We need to deal with the object that is left behind. Since this
1264     // new allocation is certainly after TAMS, it will be considered live in the next cycle.
1265     // But if it happens to contain references to evacuated regions, those references would
1266     // not get updated for this stale copy during this cycle, and we will crash while scanning
1267     // it the next cycle.
1268     if (alloc_from_lab) {
1269       // For LAB allocations, it is enough to rollback the allocation ptr. Either the next
1270       // object will overwrite this stale copy, or the filler object on LAB retirement will
1271       // do this.
1272       ShenandoahThreadLocalData::gclab(thread)->undo_allocation(copy, size);
1273     } else {
1274       // For non-LAB allocations, we have no way to retract the allocation, and
1275       // have to explicitly overwrite the copy with the filler object. With that overwrite,
1276       // we have to keep the fwdptr initialized and pointing to our (stale) copy.
1277       assert(size >= ShenandoahHeap::min_fill_size(), "previously allocated object known to be larger than min_size");
1278       fill_with_object(copy, size);
1279       shenandoah_assert_correct(nullptr, copy_val);
1280       // For non-LAB allocations, the object has already been registered
1281     }
1282     shenandoah_assert_correct(nullptr, result);
1283     return result;
1284   }
1285 }
1286 
1287 void ShenandoahHeap::trash_cset_regions() {
1288   ShenandoahHeapLocker locker(lock());
1289 
1290   ShenandoahCollectionSet* set = collection_set();
1291   ShenandoahHeapRegion* r;
1292   set->clear_current_index();
1293   while ((r = set->next()) != nullptr) {
1294     r->make_trash();
1295   }
1296   collection_set()->clear();
1297 }
1298 
1299 void ShenandoahHeap::print_heap_regions_on(outputStream* st) const {
1300   st->print_cr("Heap Regions:");
1301   st->print_cr("Region state: EU=empty-uncommitted, EC=empty-committed, R=regular, H=humongous start, HP=pinned humongous start");
1302   st->print_cr("              HC=humongous continuation, CS=collection set, TR=trash, P=pinned, CSP=pinned collection set");
1303   st->print_cr("BTE=bottom/top/end, TAMS=top-at-mark-start");
1304   st->print_cr("UWM=update watermark, U=used");
1305   st->print_cr("T=TLAB allocs, G=GCLAB allocs");
1306   st->print_cr("S=shared allocs, L=live data");
1307   st->print_cr("CP=critical pins");
1308 
1309   for (size_t i = 0; i < num_regions(); i++) {
1310     get_region(i)->print_on(st);
1311   }
1312 }
1313 
1314 size_t ShenandoahHeap::trash_humongous_region_at(ShenandoahHeapRegion* start) {
1315   assert(start->is_humongous_start(), "reclaim regions starting with the first one");
1316 
1317   oop humongous_obj = cast_to_oop(start->bottom());
1318   size_t size = humongous_obj->size();
1319   size_t required_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize);
1320   size_t index = start->index() + required_regions - 1;
1321 
1322   assert(!start->has_live(), "liveness must be zero");
1323 
1324   for(size_t i = 0; i < required_regions; i++) {
1325     // Reclaim from tail. Otherwise, assertion fails when printing region to trace log,
1326     // as it expects that every region belongs to a humongous region starting with a humongous start region.
1327     ShenandoahHeapRegion* region = get_region(index --);
1328 
1329     assert(region->is_humongous(), "expect correct humongous start or continuation");
1330     assert(!region->is_cset(), "Humongous region should not be in collection set");
1331 
1332     region->make_trash_immediate();
1333   }
1334   return required_regions;
1335 }
1336 
1337 class ShenandoahCheckCleanGCLABClosure : public ThreadClosure {
1338 public:
1339   ShenandoahCheckCleanGCLABClosure() {}
1340   void do_thread(Thread* thread) {
1341     PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1342     assert(gclab != nullptr, "GCLAB should be initialized for %s", thread->name());
1343     assert(gclab->words_remaining() == 0, "GCLAB should not need retirement");
1344 
1345     if (ShenandoahHeap::heap()->mode()->is_generational()) {
1346       PLAB* plab = ShenandoahThreadLocalData::plab(thread);
1347       assert(plab != nullptr, "PLAB should be initialized for %s", thread->name());
1348       assert(plab->words_remaining() == 0, "PLAB should not need retirement");
1349     }
1350   }
1351 };
1352 
1353 class ShenandoahRetireGCLABClosure : public ThreadClosure {
1354 private:
1355   bool const _resize;
1356 public:
1357   ShenandoahRetireGCLABClosure(bool resize) : _resize(resize) {}
1358   void do_thread(Thread* thread) {
1359     PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1360     assert(gclab != nullptr, "GCLAB should be initialized for %s", thread->name());
1361     gclab->retire();
1362     if (_resize && ShenandoahThreadLocalData::gclab_size(thread) > 0) {
1363       ShenandoahThreadLocalData::set_gclab_size(thread, 0);
1364     }
1365 
1366     if (ShenandoahHeap::heap()->mode()->is_generational()) {
1367       PLAB* plab = ShenandoahThreadLocalData::plab(thread);
1368       assert(plab != nullptr, "PLAB should be initialized for %s", thread->name());
1369 
1370       // There are two reasons to retire all plabs between old-gen evacuation passes.
1371       //  1. We need to make the plab memory parsable by remembered-set scanning.
1372       //  2. We need to establish a trustworthy UpdateWaterMark value within each old-gen heap region
1373       ShenandoahGenerationalHeap::heap()->retire_plab(plab, thread);
1374       if (_resize && ShenandoahThreadLocalData::plab_size(thread) > 0) {
1375         ShenandoahThreadLocalData::set_plab_size(thread, 0);
1376       }
1377     }
1378   }
1379 };
1380 
1381 void ShenandoahHeap::labs_make_parsable() {
1382   assert(UseTLAB, "Only call with UseTLAB");
1383 
1384   ShenandoahRetireGCLABClosure cl(false);
1385 
1386   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1387     ThreadLocalAllocBuffer& tlab = t->tlab();
1388     tlab.make_parsable();
1389     cl.do_thread(t);
1390   }
1391 
1392   workers()->threads_do(&cl);
1393 }
1394 
1395 void ShenandoahHeap::tlabs_retire(bool resize) {
1396   assert(UseTLAB, "Only call with UseTLAB");
1397   assert(!resize || ResizeTLAB, "Only call for resize when ResizeTLAB is enabled");

1459   }
1460   return nullptr;
1461 }
1462 
1463 bool ShenandoahHeap::block_is_obj(const HeapWord* addr) const {
1464   ShenandoahHeapRegion* r = heap_region_containing(addr);
1465   return r->block_is_obj(addr);
1466 }
1467 
1468 bool ShenandoahHeap::print_location(outputStream* st, void* addr) const {
1469   return BlockLocationPrinter<ShenandoahHeap>::print_location(st, addr);
1470 }
1471 
1472 void ShenandoahHeap::prepare_for_verify() {
1473   if (SafepointSynchronize::is_at_safepoint() && UseTLAB) {
1474     labs_make_parsable();
1475   }
1476 }
1477 
1478 void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const {
1479   if (_shenandoah_policy->is_at_shutdown()) {
1480     return;
1481   }
1482 
1483   if (_control_thread != nullptr) {
1484     tcl->do_thread(_control_thread);
1485   }
1486 
1487   workers()->threads_do(tcl);
1488   if (_safepoint_workers != nullptr) {
1489     _safepoint_workers->threads_do(tcl);
1490   }
1491 }
1492 
1493 void ShenandoahHeap::print_tracing_info() const {
1494   LogTarget(Info, gc, stats) lt;
1495   if (lt.is_enabled()) {
1496     ResourceMark rm;
1497     LogStream ls(lt);
1498 
1499     phase_timings()->print_global_on(&ls);
1500 
1501     ls.cr();
1502     ls.cr();
1503 
1504     shenandoah_policy()->print_gc_stats(&ls);
1505 
1506     ls.cr();
1507     ls.cr();
1508   }
1509 }
1510 
1511 void ShenandoahHeap::set_gc_generation(ShenandoahGeneration* generation) {
1512   shenandoah_assert_control_or_vm_thread_at_safepoint();
1513   _gc_generation = generation;
1514 }
1515 
1516 // Active generation may only be set by the VM thread at a safepoint.
1517 void ShenandoahHeap::set_active_generation() {
1518   assert(Thread::current()->is_VM_thread(), "Only the VM Thread");
1519   assert(SafepointSynchronize::is_at_safepoint(), "Only at a safepoint!");
1520   assert(_gc_generation != nullptr, "Will set _active_generation to nullptr");
1521   _active_generation = _gc_generation;
1522 }
1523 
1524 void ShenandoahHeap::on_cycle_start(GCCause::Cause cause, ShenandoahGeneration* generation) {
1525   shenandoah_policy()->record_collection_cause(cause);
1526 
1527   assert(gc_cause()  == GCCause::_no_gc, "Over-writing cause");
1528   assert(_gc_generation == nullptr, "Over-writing _gc_generation");
1529 
1530   set_gc_cause(cause);
1531   set_gc_generation(generation);
1532 
1533   generation->heuristics()->record_cycle_start();
1534 }
1535 
1536 void ShenandoahHeap::on_cycle_end(ShenandoahGeneration* generation) {
1537   assert(gc_cause() != GCCause::_no_gc, "cause wasn't set");
1538   assert(_gc_generation != nullptr, "_gc_generation wasn't set");
1539 
1540   generation->heuristics()->record_cycle_end();
1541   if (mode()->is_generational() && generation->is_global()) {
1542     // If we just completed a GLOBAL GC, claim credit for completion of young-gen and old-gen GC as well
1543     young_generation()->heuristics()->record_cycle_end();
1544     old_generation()->heuristics()->record_cycle_end();
1545   }
1546 
1547   set_gc_generation(nullptr);
1548   set_gc_cause(GCCause::_no_gc);
1549 }
1550 
1551 void ShenandoahHeap::verify(VerifyOption vo) {
1552   if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
1553     if (ShenandoahVerify) {
1554       verifier()->verify_generic(vo);
1555     } else {
1556       // TODO: Consider allocating verification bitmaps on demand,
1557       // and turn this on unconditionally.
1558     }
1559   }
1560 }
1561 size_t ShenandoahHeap::tlab_capacity(Thread *thr) const {
1562   return _free_set->capacity();
1563 }
1564 
1565 class ObjectIterateScanRootClosure : public BasicOopIterateClosure {
1566 private:
1567   MarkBitMap* _bitmap;
1568   ShenandoahScanObjectStack* _oop_stack;
1569   ShenandoahHeap* const _heap;
1570   ShenandoahMarkingContext* const _marking_context;

1813 }
1814 
1815 // Keep alive an object that was loaded with AS_NO_KEEPALIVE.
1816 void ShenandoahHeap::keep_alive(oop obj) {
1817   if (is_concurrent_mark_in_progress() && (obj != nullptr)) {
1818     ShenandoahBarrierSet::barrier_set()->enqueue(obj);
1819   }
1820 }
1821 
1822 void ShenandoahHeap::heap_region_iterate(ShenandoahHeapRegionClosure* blk) const {
1823   for (size_t i = 0; i < num_regions(); i++) {
1824     ShenandoahHeapRegion* current = get_region(i);
1825     blk->heap_region_do(current);
1826   }
1827 }
1828 
1829 class ShenandoahParallelHeapRegionTask : public WorkerTask {
1830 private:
1831   ShenandoahHeap* const _heap;
1832   ShenandoahHeapRegionClosure* const _blk;
1833   size_t const _stride;
1834 
1835   shenandoah_padding(0);
1836   volatile size_t _index;
1837   shenandoah_padding(1);
1838 
1839 public:
1840   ShenandoahParallelHeapRegionTask(ShenandoahHeapRegionClosure* blk, size_t stride) :
1841           WorkerTask("Shenandoah Parallel Region Operation"),
1842           _heap(ShenandoahHeap::heap()), _blk(blk), _stride(stride), _index(0) {}
1843 
1844   void work(uint worker_id) {
1845     ShenandoahParallelWorkerSession worker_session(worker_id);
1846     size_t stride = _stride;
1847 
1848     size_t max = _heap->num_regions();
1849     while (Atomic::load(&_index) < max) {
1850       size_t cur = Atomic::fetch_then_add(&_index, stride, memory_order_relaxed);
1851       size_t start = cur;
1852       size_t end = MIN2(cur + stride, max);
1853       if (start >= max) break;
1854 
1855       for (size_t i = cur; i < end; i++) {
1856         ShenandoahHeapRegion* current = _heap->get_region(i);
1857         _blk->heap_region_do(current);
1858       }
1859     }
1860   }
1861 };
1862 
1863 void ShenandoahHeap::parallel_heap_region_iterate(ShenandoahHeapRegionClosure* blk) const {
1864   assert(blk->is_thread_safe(), "Only thread-safe closures here");
1865   const uint active_workers = workers()->active_workers();
1866   const size_t n_regions = num_regions();
1867   size_t stride = ShenandoahParallelRegionStride;
1868   if (stride == 0 && active_workers > 1) {
1869     // Automatically derive the stride to balance the work between threads
1870     // evenly. Do not try to split work if below the reasonable threshold.
1871     constexpr size_t threshold = 4096;
1872     stride = n_regions <= threshold ?
1873             threshold :
1874             (n_regions + active_workers - 1) / active_workers;
1875   }
1876 
1877   if (n_regions > stride && active_workers > 1) {
1878     ShenandoahParallelHeapRegionTask task(blk, stride);
1879     workers()->run_task(&task);
1880   } else {
1881     heap_region_iterate(blk);
1882   }
1883 }
1884 























1885 class ShenandoahRendezvousClosure : public HandshakeClosure {
1886 public:
1887   inline ShenandoahRendezvousClosure() : HandshakeClosure("ShenandoahRendezvous") {}
1888   inline void do_thread(Thread* thread) {}
1889 };
1890 
1891 void ShenandoahHeap::rendezvous_threads() {
1892   ShenandoahRendezvousClosure cl;
1893   Handshake::execute(&cl);
1894 }
1895 
1896 void ShenandoahHeap::recycle_trash() {
1897   free_set()->recycle_trash();
1898 }
1899 



































































































1900 void ShenandoahHeap::do_class_unloading() {
1901   _unloader.unload();
1902   if (mode()->is_generational()) {
1903     old_generation()->set_parsable(false);
1904   }
1905 }
1906 
1907 void ShenandoahHeap::stw_weak_refs(bool full_gc) {
1908   // Weak refs processing
1909   ShenandoahPhaseTimings::Phase phase = full_gc ? ShenandoahPhaseTimings::full_gc_weakrefs
1910                                                 : ShenandoahPhaseTimings::degen_gc_weakrefs;
1911   ShenandoahTimingsTracker t(phase);
1912   ShenandoahGCWorkerPhase worker_phase(phase);
1913   shenandoah_assert_generations_reconciled();
1914   gc_generation()->ref_processor()->process_references(phase, workers(), false /* concurrent */);
1915 }
1916 
1917 void ShenandoahHeap::prepare_update_heap_references(bool concurrent) {
1918   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
1919 
1920   // Evacuation is over, no GCLABs are needed anymore. GCLABs are under URWM, so we need to
1921   // make them parsable for update code to work correctly. Plus, we can compute new sizes
1922   // for future GCLABs here.
1923   if (UseTLAB) {
1924     ShenandoahGCPhase phase(concurrent ?
1925                             ShenandoahPhaseTimings::init_update_refs_manage_gclabs :
1926                             ShenandoahPhaseTimings::degen_gc_init_update_refs_manage_gclabs);
1927     gclabs_retire(ResizeTLAB);
1928   }
1929 
1930   _update_refs_iterator.reset();
1931 }
1932 
1933 void ShenandoahHeap::propagate_gc_state_to_java_threads() {
1934   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at Shenandoah safepoint");
1935   if (_gc_state_changed) {
1936     _gc_state_changed = false;
1937     char state = gc_state();
1938     for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1939       ShenandoahThreadLocalData::set_gc_state(t, state);
1940     }
1941   }
1942 }
1943 
1944 void ShenandoahHeap::set_gc_state(uint mask, bool value) {
1945   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at Shenandoah safepoint");
1946   _gc_state.set_cond(mask, value);
1947   _gc_state_changed = true;
1948   // Check that if concurrent weak root is set then active_gen isn't null
1949   assert(!is_concurrent_weak_root_in_progress() || active_generation() != nullptr, "Error");
1950   shenandoah_assert_generations_reconciled();
1951 }
1952 
1953 void ShenandoahHeap::set_concurrent_young_mark_in_progress(bool in_progress) {
1954   uint mask;
1955   assert(!has_forwarded_objects(), "Young marking is not concurrent with evacuation");
1956   if (!in_progress && is_concurrent_old_mark_in_progress()) {
1957     assert(mode()->is_generational(), "Only generational GC has old marking");
1958     assert(_gc_state.is_set(MARKING), "concurrent_old_marking_in_progress implies MARKING");
1959     // If old-marking is in progress when we turn off YOUNG_MARKING, leave MARKING (and OLD_MARKING) on
1960     mask = YOUNG_MARKING;
1961   } else {
1962     mask = MARKING | YOUNG_MARKING;
1963   }
1964   set_gc_state(mask, in_progress);
1965   manage_satb_barrier(in_progress);
1966 }
1967 
1968 void ShenandoahHeap::set_concurrent_old_mark_in_progress(bool in_progress) {
1969 #ifdef ASSERT
1970   // has_forwarded_objects() iff UPDATEREFS or EVACUATION
1971   bool has_forwarded = has_forwarded_objects();
1972   bool updating_or_evacuating = _gc_state.is_set(UPDATEREFS | EVACUATION);
1973   bool evacuating = _gc_state.is_set(EVACUATION);
1974   assert ((has_forwarded == updating_or_evacuating) || (evacuating && !has_forwarded && collection_set()->is_empty()),
1975           "Updating or evacuating iff has forwarded objects, or if evacuation phase is promoting in place without forwarding");
1976 #endif
1977   if (!in_progress && is_concurrent_young_mark_in_progress()) {
1978     // If young-marking is in progress when we turn off OLD_MARKING, leave MARKING (and YOUNG_MARKING) on
1979     assert(_gc_state.is_set(MARKING), "concurrent_young_marking_in_progress implies MARKING");
1980     set_gc_state(OLD_MARKING, in_progress);
1981   } else {
1982     set_gc_state(MARKING | OLD_MARKING, in_progress);
1983   }
1984   manage_satb_barrier(in_progress);
1985 }
1986 
1987 bool ShenandoahHeap::is_prepare_for_old_mark_in_progress() const {
1988   return old_generation()->is_preparing_for_mark();
1989 }
1990 
1991 void ShenandoahHeap::manage_satb_barrier(bool active) {
1992   if (is_concurrent_mark_in_progress()) {
1993     // Ignore request to deactivate barrier while concurrent mark is in progress.
1994     // Do not attempt to re-activate the barrier if it is already active.
1995     if (active && !ShenandoahBarrierSet::satb_mark_queue_set().is_active()) {
1996       ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(active, !active);
1997     }
1998   } else {
1999     // No concurrent marking is in progress so honor request to deactivate,
2000     // but only if the barrier is already active.
2001     if (!active && ShenandoahBarrierSet::satb_mark_queue_set().is_active()) {
2002       ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(active, !active);
2003     }
2004   }
2005 }
2006 
2007 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
2008   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only call this at safepoint");
2009   set_gc_state(EVACUATION, in_progress);
2010 }
2011 
2012 void ShenandoahHeap::set_concurrent_strong_root_in_progress(bool in_progress) {
2013   if (in_progress) {
2014     _concurrent_strong_root_in_progress.set();
2015   } else {
2016     _concurrent_strong_root_in_progress.unset();
2017   }
2018 }
2019 
2020 void ShenandoahHeap::set_concurrent_weak_root_in_progress(bool cond) {
2021   set_gc_state(WEAK_ROOTS, cond);
2022 }
2023 
2024 GCTracer* ShenandoahHeap::tracer() {
2025   return shenandoah_policy()->tracer();
2026 }
2027 
2028 size_t ShenandoahHeap::tlab_used(Thread* thread) const {
2029   return _free_set->used();
2030 }
2031 
2032 bool ShenandoahHeap::try_cancel_gc() {
2033   jbyte prev = _cancelled_gc.cmpxchg(CANCELLED, CANCELLABLE);
2034   return prev == CANCELLABLE;
2035 }
2036 
2037 void ShenandoahHeap::cancel_concurrent_mark() {
2038   if (mode()->is_generational()) {
2039     young_generation()->cancel_marking();
2040     old_generation()->cancel_marking();
2041   }
2042 
2043   global_generation()->cancel_marking();
2044 
2045   ShenandoahBarrierSet::satb_mark_queue_set().abandon_partial_marking();
2046 }
2047 
2048 void ShenandoahHeap::cancel_gc(GCCause::Cause cause) {
2049   if (try_cancel_gc()) {
2050     FormatBuffer<> msg("Cancelling GC: %s", GCCause::to_string(cause));
2051     log_info(gc)("%s", msg.buffer());
2052     Events::log(Thread::current(), "%s", msg.buffer());
2053     _cancel_requested_time = os::elapsedTime();
2054   }
2055 }
2056 
2057 uint ShenandoahHeap::max_workers() {
2058   return _max_workers;
2059 }
2060 
2061 void ShenandoahHeap::stop() {
2062   // The shutdown sequence should be able to terminate when GC is running.
2063 
2064   // Step 0. Notify policy to disable event recording.
2065   _shenandoah_policy->record_shutdown();
2066 
2067   // Step 1. Notify control thread that we are in shutdown.
2068   // Note that we cannot do that with stop(), because stop() is blocking and waits for the actual shutdown.
2069   // Doing stop() here would wait for the normal GC cycle to complete, never falling through to cancel below.
2070   control_thread()->prepare_for_graceful_shutdown();
2071 
2072   // Step 2. Notify GC workers that we are cancelling GC.
2073   cancel_gc(GCCause::_shenandoah_stop_vm);

2157 }
2158 
2159 void ShenandoahHeap::set_has_forwarded_objects(bool cond) {
2160   set_gc_state(HAS_FORWARDED, cond);
2161 }
2162 
2163 void ShenandoahHeap::set_unload_classes(bool uc) {
2164   _unload_classes.set_cond(uc);
2165 }
2166 
2167 bool ShenandoahHeap::unload_classes() const {
2168   return _unload_classes.is_set();
2169 }
2170 
2171 address ShenandoahHeap::in_cset_fast_test_addr() {
2172   ShenandoahHeap* heap = ShenandoahHeap::heap();
2173   assert(heap->collection_set() != nullptr, "Sanity");
2174   return (address) heap->collection_set()->biased_map_address();
2175 }
2176 




2177 void ShenandoahHeap::reset_bytes_allocated_since_gc_start() {
2178   if (mode()->is_generational()) {
2179     young_generation()->reset_bytes_allocated_since_gc_start();
2180     old_generation()->reset_bytes_allocated_since_gc_start();
2181   }
2182 
2183   global_generation()->reset_bytes_allocated_since_gc_start();
2184 }
2185 
2186 void ShenandoahHeap::set_degenerated_gc_in_progress(bool in_progress) {
2187   _degenerated_gc_in_progress.set_cond(in_progress);
2188 }
2189 
2190 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) {
2191   _full_gc_in_progress.set_cond(in_progress);
2192 }
2193 
2194 void ShenandoahHeap::set_full_gc_move_in_progress(bool in_progress) {
2195   assert (is_full_gc_in_progress(), "should be");
2196   _full_gc_move_in_progress.set_cond(in_progress);
2197 }
2198 
2199 void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) {
2200   set_gc_state(UPDATEREFS, in_progress);
2201 }
2202 
2203 void ShenandoahHeap::register_nmethod(nmethod* nm) {

2227     if (r->is_active()) {
2228       if (r->is_pinned()) {
2229         if (r->pin_count() == 0) {
2230           r->make_unpinned();
2231         }
2232       } else {
2233         if (r->pin_count() > 0) {
2234           r->make_pinned();
2235         }
2236       }
2237     }
2238   }
2239 
2240   assert_pinned_region_status();
2241 }
2242 
2243 #ifdef ASSERT
2244 void ShenandoahHeap::assert_pinned_region_status() {
2245   for (size_t i = 0; i < num_regions(); i++) {
2246     ShenandoahHeapRegion* r = get_region(i);
2247     shenandoah_assert_generations_reconciled();
2248     if (gc_generation()->contains(r)) {
2249       assert((r->is_pinned() && r->pin_count() > 0) || (!r->is_pinned() && r->pin_count() == 0),
2250              "Region " SIZE_FORMAT " pinning status is inconsistent", i);
2251     }
2252   }
2253 }
2254 #endif
2255 
2256 ConcurrentGCTimer* ShenandoahHeap::gc_timer() const {
2257   return _gc_timer;
2258 }
2259 
2260 void ShenandoahHeap::prepare_concurrent_roots() {
2261   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2262   assert(!is_stw_gc_in_progress(), "Only concurrent GC");
2263   set_concurrent_strong_root_in_progress(!collection_set()->is_empty());
2264   set_concurrent_weak_root_in_progress(true);
2265   if (unload_classes()) {
2266     _unloader.prepare();
2267   }
2268 }
2269 
2270 void ShenandoahHeap::finish_concurrent_roots() {
2271   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2272   assert(!is_stw_gc_in_progress(), "Only concurrent GC");
2273   if (unload_classes()) {
2274     _unloader.finish();
2275   }
2276 }
2277 
2278 #ifdef ASSERT
2279 void ShenandoahHeap::assert_gc_workers(uint nworkers) {
2280   assert(nworkers > 0 && nworkers <= max_workers(), "Sanity");
2281 
2282   if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
2283     // Use ParallelGCThreads inside safepoints
2284     assert(nworkers == ParallelGCThreads, "Use ParallelGCThreads (%u) within safepoint, not %u",
2285            ParallelGCThreads, nworkers);



2286   } else {
2287     // Use ConcGCThreads outside safepoints
2288     assert(nworkers == ConcGCThreads, "Use ConcGCThreads (%u) outside safepoints, %u",
2289            ConcGCThreads, nworkers);



2290   }
2291 }
2292 #endif
2293 
2294 ShenandoahVerifier* ShenandoahHeap::verifier() {
2295   guarantee(ShenandoahVerify, "Should be enabled");
2296   assert (_verifier != nullptr, "sanity");
2297   return _verifier;
2298 }
2299 
2300 template<bool CONCURRENT>
2301 class ShenandoahUpdateHeapRefsTask : public WorkerTask {
2302 private:
2303   ShenandoahHeap* _heap;
2304   ShenandoahRegionIterator* _regions;
2305 public:
2306   explicit ShenandoahUpdateHeapRefsTask(ShenandoahRegionIterator* regions) :
2307     WorkerTask("Shenandoah Update References"),
2308     _heap(ShenandoahHeap::heap()),
2309     _regions(regions) {
2310   }
2311 
2312   void work(uint worker_id) {
2313     if (CONCURRENT) {
2314       ShenandoahConcurrentWorkerSession worker_session(worker_id);
2315       ShenandoahSuspendibleThreadSetJoiner stsj;
2316       do_work<ShenandoahConcUpdateRefsClosure>(worker_id);
2317     } else {
2318       ShenandoahParallelWorkerSession worker_session(worker_id);
2319       do_work<ShenandoahSTWUpdateRefsClosure>(worker_id);
2320     }
2321   }
2322 
2323 private:
2324   template<class T>
2325   void do_work(uint worker_id) {
2326     if (CONCURRENT && (worker_id == 0)) {
2327       // We ask the first worker to replenish the Mutator free set by moving regions previously reserved to hold the
2328       // results of evacuation.  These reserves are no longer necessary because evacuation has completed.
2329       size_t cset_regions = _heap->collection_set()->count();
2330 
2331       // Now that evacuation is done, we can reassign any regions that had been reserved to hold the results of evacuation
2332       // to the mutator free set.  At the end of GC, we will have cset_regions newly evacuated fully empty regions from
2333       // which we will be able to replenish the Collector free set and the OldCollector free set in preparation for the
2334       // next GC cycle.
2335       _heap->free_set()->move_regions_from_collector_to_mutator(cset_regions);
2336     }
2337     // If !CONCURRENT, there's no value in expanding Mutator free set
2338     T cl;
2339     ShenandoahHeapRegion* r = _regions->next();

2340     while (r != nullptr) {
2341       HeapWord* update_watermark = r->get_update_watermark();
2342       assert (update_watermark >= r->bottom(), "sanity");
2343       if (r->is_active() && !r->is_cset()) {
2344         _heap->marked_object_oop_iterate(r, &cl, update_watermark);
2345         if (ShenandoahPacing) {
2346           _heap->pacer()->report_updaterefs(pointer_delta(update_watermark, r->bottom()));
2347         }
2348       }
2349       if (_heap->check_cancelled_gc_and_yield(CONCURRENT)) {
2350         return;
2351       }
2352       r = _regions->next();
2353     }
2354   }
2355 };
2356 
2357 void ShenandoahHeap::update_heap_references(bool concurrent) {
2358   assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
2359 
2360   if (concurrent) {
2361     ShenandoahUpdateHeapRefsTask<true> task(&_update_refs_iterator);
2362     workers()->run_task(&task);
2363   } else {
2364     ShenandoahUpdateHeapRefsTask<false> task(&_update_refs_iterator);
2365     workers()->run_task(&task);
2366   }
2367 }
2368 
2369 ShenandoahSynchronizePinnedRegionStates::ShenandoahSynchronizePinnedRegionStates() : _lock(ShenandoahHeap::heap()->lock()) { }
2370 
2371 void ShenandoahSynchronizePinnedRegionStates::heap_region_do(ShenandoahHeapRegion* r) {
2372   // Drop "pinned" state from regions that no longer have a pinned count. Put
2373   // regions with a pinned count into the "pinned" state.
2374   if (r->is_active()) {
2375     if (r->is_pinned()) {
2376       if (r->pin_count() == 0) {
2377         ShenandoahHeapLocker locker(_lock);
2378         r->make_unpinned();
2379       }
2380     } else {
2381       if (r->pin_count() > 0) {
2382         ShenandoahHeapLocker locker(_lock);
2383         r->make_pinned();









2384       }
2385     }
2386   }
2387 }


2388 
2389 void ShenandoahHeap::update_heap_region_states(bool concurrent) {
2390   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2391   assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
2392 
2393   {
2394     ShenandoahGCPhase phase(concurrent ?
2395                             ShenandoahPhaseTimings::final_update_refs_update_region_states :
2396                             ShenandoahPhaseTimings::degen_gc_final_update_refs_update_region_states);
2397 
2398     final_update_refs_update_region_states();
2399 
2400     assert_pinned_region_status();
2401   }
2402 
2403   {
2404     ShenandoahGCPhase phase(concurrent ?
2405                             ShenandoahPhaseTimings::final_update_refs_trash_cset :
2406                             ShenandoahPhaseTimings::degen_gc_final_update_refs_trash_cset);
2407     trash_cset_regions();
2408   }
2409 }
2410 
2411 void ShenandoahHeap::final_update_refs_update_region_states() {
2412   ShenandoahSynchronizePinnedRegionStates cl;
2413   parallel_heap_region_iterate(&cl);
2414 }
2415 
2416 void ShenandoahHeap::rebuild_free_set(bool concurrent) {
2417   ShenandoahGCPhase phase(concurrent ?
2418                           ShenandoahPhaseTimings::final_update_refs_rebuild_freeset :
2419                           ShenandoahPhaseTimings::degen_gc_final_update_refs_rebuild_freeset);
2420   ShenandoahHeapLocker locker(lock());
2421   size_t young_cset_regions, old_cset_regions;
2422   size_t first_old_region, last_old_region, old_region_count;
2423   _free_set->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old_region, last_old_region, old_region_count);
2424   // If there are no old regions, first_old_region will be greater than last_old_region
2425   assert((first_old_region > last_old_region) ||
2426          ((last_old_region + 1 - first_old_region >= old_region_count) &&
2427           get_region(first_old_region)->is_old() && get_region(last_old_region)->is_old()),
2428          "sanity: old_region_count: " SIZE_FORMAT ", first_old_region: " SIZE_FORMAT ", last_old_region: " SIZE_FORMAT,
2429          old_region_count, first_old_region, last_old_region);
2430 
2431   if (mode()->is_generational()) {
2432 #ifdef ASSERT
2433     if (ShenandoahVerify) {
2434       verifier()->verify_before_rebuilding_free_set();
2435     }
2436 #endif
2437 
2438     // The computation of bytes_of_allocation_runway_before_gc_trigger is quite conservative so consider all of this
2439     // available for transfer to old. Note that transfer of humongous regions does not impact available.
2440     ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::heap();
2441     size_t allocation_runway = gen_heap->young_generation()->heuristics()->bytes_of_allocation_runway_before_gc_trigger(young_cset_regions);
2442     gen_heap->compute_old_generation_balance(allocation_runway, old_cset_regions);
2443 
2444     // Total old_available may have been expanded to hold anticipated promotions.  We trigger if the fragmented available
2445     // memory represents more than 16 regions worth of data.  Note that fragmentation may increase when we promote regular
2446     // regions in place when many of these regular regions have an abundant amount of available memory within them.  Fragmentation
2447     // will decrease as promote-by-copy consumes the available memory within these partially consumed regions.
2448     //
2449     // We consider old-gen to have excessive fragmentation if more than 12.5% of old-gen is free memory that resides
2450     // within partially consumed regions of memory.
2451   }
2452   // Rebuild free set based on adjusted generation sizes.
2453   _free_set->finish_rebuild(young_cset_regions, old_cset_regions, old_region_count);
2454 
2455   if (mode()->is_generational()) {
2456     ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::heap();
2457     ShenandoahOldGeneration* old_gen = gen_heap->old_generation();
2458     old_gen->heuristics()->evaluate_triggers(first_old_region, last_old_region, old_region_count, num_regions());
2459   }
2460 }
2461 
2462 void ShenandoahHeap::print_extended_on(outputStream *st) const {
2463   print_on(st);
2464   st->cr();
2465   print_heap_regions_on(st);
2466 }
2467 
2468 bool ShenandoahHeap::is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self) {
2469   size_t slice = r->index() / _bitmap_regions_per_slice;
2470 
2471   size_t regions_from = _bitmap_regions_per_slice * slice;
2472   size_t regions_to   = MIN2(num_regions(), _bitmap_regions_per_slice * (slice + 1));
2473   for (size_t g = regions_from; g < regions_to; g++) {
2474     assert (g / _bitmap_regions_per_slice == slice, "same slice");
2475     if (skip_self && g == r->index()) continue;
2476     if (get_region(g)->is_committed()) {
2477       return true;
2478     }

2526   }
2527 
2528   // Uncommit the bitmap slice:
2529   size_t slice = r->index() / _bitmap_regions_per_slice;
2530   size_t off = _bitmap_bytes_per_slice * slice;
2531   size_t len = _bitmap_bytes_per_slice;
2532   if (!os::uncommit_memory((char*)_bitmap_region.start() + off, len)) {
2533     return false;
2534   }
2535   return true;
2536 }
2537 
2538 void ShenandoahHeap::safepoint_synchronize_begin() {
2539   SuspendibleThreadSet::synchronize();
2540 }
2541 
2542 void ShenandoahHeap::safepoint_synchronize_end() {
2543   SuspendibleThreadSet::desynchronize();
2544 }
2545 








2546 void ShenandoahHeap::try_inject_alloc_failure() {
2547   if (ShenandoahAllocFailureALot && !cancelled_gc() && ((os::random() % 1000) > 950)) {
2548     _inject_alloc_failure.set();
2549     os::naked_short_sleep(1);
2550     if (cancelled_gc()) {
2551       log_info(gc)("Allocation failure was successfully injected");
2552     }
2553   }
2554 }
2555 
2556 bool ShenandoahHeap::should_inject_alloc_failure() {
2557   return _inject_alloc_failure.is_set() && _inject_alloc_failure.try_unset();
2558 }
2559 
2560 void ShenandoahHeap::initialize_serviceability() {
2561   _memory_pool = new ShenandoahMemoryPool(this);
2562   _cycle_memory_manager.add_pool(_memory_pool);
2563   _stw_memory_manager.add_pool(_memory_pool);
2564 }
2565 
2566 GrowableArray<GCMemoryManager*> ShenandoahHeap::memory_managers() {
2567   GrowableArray<GCMemoryManager*> memory_managers(2);
2568   memory_managers.append(&_cycle_memory_manager);
2569   memory_managers.append(&_stw_memory_manager);
2570   return memory_managers;
2571 }
2572 
2573 GrowableArray<MemoryPool*> ShenandoahHeap::memory_pools() {
2574   GrowableArray<MemoryPool*> memory_pools(1);
2575   memory_pools.append(_memory_pool);
2576   return memory_pools;
2577 }
2578 
2579 MemoryUsage ShenandoahHeap::memory_usage() {
2580   return MemoryUsage(_initial_size, used(), committed(), max_capacity());
2581 }
2582 
2583 ShenandoahRegionIterator::ShenandoahRegionIterator() :
2584   _heap(ShenandoahHeap::heap()),
2585   _index(0) {}
2586 
2587 ShenandoahRegionIterator::ShenandoahRegionIterator(ShenandoahHeap* heap) :
2588   _heap(heap),
2589   _index(0) {}
2590 
2591 void ShenandoahRegionIterator::reset() {
2592   _index = 0;
2593 }
2594 
2595 bool ShenandoahRegionIterator::has_next() const {
2596   return _index < _heap->num_regions();
2597 }
2598 
2599 char ShenandoahHeap::gc_state() const {
2600   return _gc_state.raw_value();

2625   }
2626 }
2627 
2628 bool ShenandoahHeap::requires_barriers(stackChunkOop obj) const {
2629   if (is_idle()) return false;
2630 
2631   // Objects allocated after marking start are implicitly alive, don't need any barriers during
2632   // marking phase.
2633   if (is_concurrent_mark_in_progress() &&
2634      !marking_context()->allocated_after_mark_start(obj)) {
2635     return true;
2636   }
2637 
2638   // Can not guarantee obj is deeply good.
2639   if (has_forwarded_objects()) {
2640     return true;
2641   }
2642 
2643   return false;
2644 }
2645 
2646 ShenandoahGeneration* ShenandoahHeap::generation_for(ShenandoahAffiliation affiliation) const {
2647   if (!mode()->is_generational()) {
2648     return global_generation();
2649   } else if (affiliation == YOUNG_GENERATION) {
2650     return young_generation();
2651   } else if (affiliation == OLD_GENERATION) {
2652     return old_generation();
2653   }
2654 
2655   ShouldNotReachHere();
2656   return nullptr;
2657 }
2658 
2659 void ShenandoahHeap::log_heap_status(const char* msg) const {
2660   if (mode()->is_generational()) {
2661     young_generation()->log_status(msg);
2662     old_generation()->log_status(msg);
2663   } else {
2664     global_generation()->log_status(msg);
2665   }
2666 }
< prev index next >