< prev index next >

src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp

Print this page

   1 /*
   2  * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2013, 2022, Red Hat, Inc. All rights reserved.

   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "memory/allocation.hpp"
  28 #include "memory/universe.hpp"
  29 
  30 #include "gc/shared/classUnloadingContext.hpp"
  31 #include "gc/shared/gcArguments.hpp"
  32 #include "gc/shared/gcTimer.hpp"
  33 #include "gc/shared/gcTraceTime.inline.hpp"
  34 #include "gc/shared/locationPrinter.inline.hpp"
  35 #include "gc/shared/memAllocator.hpp"
  36 #include "gc/shared/plab.hpp"
  37 #include "gc/shared/tlab_globals.hpp"
  38 



  39 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  40 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
  41 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  42 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  43 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
  44 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  45 #include "gc/shenandoah/shenandoahControlThread.hpp"
  46 #include "gc/shenandoah/shenandoahFreeSet.hpp"



  47 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  48 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  49 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
  50 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  51 #include "gc/shenandoah/shenandoahInitLogger.hpp"
  52 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  53 #include "gc/shenandoah/shenandoahMemoryPool.hpp"
  54 #include "gc/shenandoah/shenandoahMetrics.hpp"
  55 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"

  56 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  57 #include "gc/shenandoah/shenandoahPacer.inline.hpp"
  58 #include "gc/shenandoah/shenandoahPadding.hpp"
  59 #include "gc/shenandoah/shenandoahParallelCleaning.inline.hpp"
  60 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
  61 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"

  62 #include "gc/shenandoah/shenandoahSTWMark.hpp"
  63 #include "gc/shenandoah/shenandoahUtils.hpp"
  64 #include "gc/shenandoah/shenandoahVerifier.hpp"
  65 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
  66 #include "gc/shenandoah/shenandoahVMOperations.hpp"
  67 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
  68 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"


  69 #include "gc/shenandoah/mode/shenandoahIUMode.hpp"
  70 #include "gc/shenandoah/mode/shenandoahPassiveMode.hpp"
  71 #include "gc/shenandoah/mode/shenandoahSATBMode.hpp"


  72 #if INCLUDE_JFR
  73 #include "gc/shenandoah/shenandoahJfrSupport.hpp"
  74 #endif
  75 
  76 #include "classfile/systemDictionary.hpp"
  77 #include "code/codeCache.hpp"
  78 #include "memory/classLoaderMetaspace.hpp"
  79 #include "memory/metaspaceUtils.hpp"
  80 #include "oops/compressedOops.inline.hpp"
  81 #include "prims/jvmtiTagMap.hpp"
  82 #include "runtime/atomic.hpp"
  83 #include "runtime/globals.hpp"
  84 #include "runtime/interfaceSupport.inline.hpp"
  85 #include "runtime/java.hpp"
  86 #include "runtime/orderAccess.hpp"
  87 #include "runtime/safepointMechanism.hpp"
  88 #include "runtime/vmThread.hpp"
  89 #include "services/mallocTracker.hpp"
  90 #include "services/memTracker.hpp"
  91 #include "utilities/events.hpp"

 143 jint ShenandoahHeap::initialize() {
 144   //
 145   // Figure out heap sizing
 146   //
 147 
 148   size_t init_byte_size = InitialHeapSize;
 149   size_t min_byte_size  = MinHeapSize;
 150   size_t max_byte_size  = MaxHeapSize;
 151   size_t heap_alignment = HeapAlignment;
 152 
 153   size_t reg_size_bytes = ShenandoahHeapRegion::region_size_bytes();
 154 
 155   Universe::check_alignment(max_byte_size,  reg_size_bytes, "Shenandoah heap");
 156   Universe::check_alignment(init_byte_size, reg_size_bytes, "Shenandoah heap");
 157 
 158   _num_regions = ShenandoahHeapRegion::region_count();
 159   assert(_num_regions == (max_byte_size / reg_size_bytes),
 160          "Regions should cover entire heap exactly: " SIZE_FORMAT " != " SIZE_FORMAT "/" SIZE_FORMAT,
 161          _num_regions, max_byte_size, reg_size_bytes);
 162 
 163   // Now we know the number of regions, initialize the heuristics.
 164   initialize_heuristics();
 165 
 166   size_t num_committed_regions = init_byte_size / reg_size_bytes;
 167   num_committed_regions = MIN2(num_committed_regions, _num_regions);
 168   assert(num_committed_regions <= _num_regions, "sanity");
 169   _initial_size = num_committed_regions * reg_size_bytes;
 170 
 171   size_t num_min_regions = min_byte_size / reg_size_bytes;
 172   num_min_regions = MIN2(num_min_regions, _num_regions);
 173   assert(num_min_regions <= _num_regions, "sanity");
 174   _minimum_size = num_min_regions * reg_size_bytes;
 175 
 176   // Default to max heap size.
 177   _soft_max_size = _num_regions * reg_size_bytes;
 178 
 179   _committed = _initial_size;
 180 
 181   size_t heap_page_size   = UseLargePages ? os::large_page_size() : os::vm_page_size();
 182   size_t bitmap_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
 183   size_t region_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
 184 
 185   //
 186   // Reserve and commit memory for heap
 187   //
 188 
 189   ReservedHeapSpace heap_rs = Universe::reserve_heap(max_byte_size, heap_alignment);
 190   initialize_reserved_region(heap_rs);
 191   _heap_region = MemRegion((HeapWord*)heap_rs.base(), heap_rs.size() / HeapWordSize);
 192   _heap_region_special = heap_rs.special();
 193 
 194   assert((((size_t) base()) & ShenandoahHeapRegion::region_size_bytes_mask()) == 0,
 195          "Misaligned heap: " PTR_FORMAT, p2i(base()));



 196 
 197 #if SHENANDOAH_OPTIMIZED_MARKTASK
 198   // The optimized ShenandoahMarkTask takes some bits away from the full object bits.
 199   // Fail if we ever attempt to address more than we can.
 200   if ((uintptr_t)heap_rs.end() >= ShenandoahMarkTask::max_addressable()) {
 201     FormatBuffer<512> buf("Shenandoah reserved [" PTR_FORMAT ", " PTR_FORMAT") for the heap, \n"
 202                           "but max object address is " PTR_FORMAT ". Try to reduce heap size, or try other \n"
 203                           "VM options that allocate heap at lower addresses (HeapBaseMinAddress, AllocateHeapAt, etc).",
 204                 p2i(heap_rs.base()), p2i(heap_rs.end()), ShenandoahMarkTask::max_addressable());
 205     vm_exit_during_initialization("Fatal Error", buf);
 206   }
 207 #endif
 208 
 209   ReservedSpace sh_rs = heap_rs.first_part(max_byte_size);
 210   if (!_heap_region_special) {
 211     os::commit_memory_or_exit(sh_rs.base(), _initial_size, heap_alignment, false,
 212                               "Cannot commit heap memory");
 213   }
 214 






















 215   //
 216   // Reserve and commit memory for bitmap(s)
 217   //
 218 
 219   _bitmap_size = ShenandoahMarkBitMap::compute_size(heap_rs.size());
 220   _bitmap_size = align_up(_bitmap_size, bitmap_page_size);
 221 
 222   size_t bitmap_bytes_per_region = reg_size_bytes / ShenandoahMarkBitMap::heap_map_factor();
 223 
 224   guarantee(bitmap_bytes_per_region != 0,
 225             "Bitmap bytes per region should not be zero");
 226   guarantee(is_power_of_2(bitmap_bytes_per_region),
 227             "Bitmap bytes per region should be power of two: " SIZE_FORMAT, bitmap_bytes_per_region);
 228 
 229   if (bitmap_page_size > bitmap_bytes_per_region) {
 230     _bitmap_regions_per_slice = bitmap_page_size / bitmap_bytes_per_region;
 231     _bitmap_bytes_per_slice = bitmap_page_size;
 232   } else {
 233     _bitmap_regions_per_slice = 1;
 234     _bitmap_bytes_per_slice = bitmap_bytes_per_region;
 235   }
 236 
 237   guarantee(_bitmap_regions_per_slice >= 1,
 238             "Should have at least one region per slice: " SIZE_FORMAT,
 239             _bitmap_regions_per_slice);
 240 
 241   guarantee(((_bitmap_bytes_per_slice) % bitmap_page_size) == 0,
 242             "Bitmap slices should be page-granular: bps = " SIZE_FORMAT ", page size = " SIZE_FORMAT,
 243             _bitmap_bytes_per_slice, bitmap_page_size);
 244 
 245   ReservedSpace bitmap(_bitmap_size, bitmap_page_size);




 246   MemTracker::record_virtual_memory_type(bitmap.base(), mtGC);
 247   _bitmap_region = MemRegion((HeapWord*) bitmap.base(), bitmap.size() / HeapWordSize);
 248   _bitmap_region_special = bitmap.special();
 249 
 250   size_t bitmap_init_commit = _bitmap_bytes_per_slice *
 251                               align_up(num_committed_regions, _bitmap_regions_per_slice) / _bitmap_regions_per_slice;
 252   bitmap_init_commit = MIN2(_bitmap_size, bitmap_init_commit);
 253   if (!_bitmap_region_special) {
 254     os::commit_memory_or_exit((char *) _bitmap_region.start(), bitmap_init_commit, bitmap_page_size, false,
 255                               "Cannot commit bitmap memory");
 256   }
 257 
 258   _marking_context = new ShenandoahMarkingContext(_heap_region, _bitmap_region, _num_regions, _max_workers);
 259 
 260   if (ShenandoahVerify) {
 261     ReservedSpace verify_bitmap(_bitmap_size, bitmap_page_size);




 262     if (!verify_bitmap.special()) {
 263       os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), bitmap_page_size, false,
 264                                 "Cannot commit verification bitmap memory");
 265     }
 266     MemTracker::record_virtual_memory_type(verify_bitmap.base(), mtGC);
 267     MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize);
 268     _verification_bit_map.initialize(_heap_region, verify_bitmap_region);
 269     _verifier = new ShenandoahVerifier(this, &_verification_bit_map);
 270   }
 271 
 272   // Reserve aux bitmap for use in object_iterate(). We don't commit it here.
 273   ReservedSpace aux_bitmap(_bitmap_size, bitmap_page_size);












 274   MemTracker::record_virtual_memory_type(aux_bitmap.base(), mtGC);
 275   _aux_bitmap_region = MemRegion((HeapWord*) aux_bitmap.base(), aux_bitmap.size() / HeapWordSize);
 276   _aux_bitmap_region_special = aux_bitmap.special();
 277   _aux_bit_map.initialize(_heap_region, _aux_bitmap_region);
 278 
 279   //
 280   // Create regions and region sets
 281   //
 282   size_t region_align = align_up(sizeof(ShenandoahHeapRegion), SHENANDOAH_CACHE_LINE_SIZE);
 283   size_t region_storage_size = align_up(region_align * _num_regions, region_page_size);
 284   region_storage_size = align_up(region_storage_size, os::vm_allocation_granularity());

 285 
 286   ReservedSpace region_storage(region_storage_size, region_page_size);



 287   MemTracker::record_virtual_memory_type(region_storage.base(), mtGC);
 288   if (!region_storage.special()) {
 289     os::commit_memory_or_exit(region_storage.base(), region_storage_size, region_page_size, false,
 290                               "Cannot commit region memory");
 291   }
 292 
 293   // Try to fit the collection set bitmap at lower addresses. This optimizes code generation for cset checks.
 294   // Go up until a sensible limit (subject to encoding constraints) and try to reserve the space there.
 295   // If not successful, bite a bullet and allocate at whatever address.
 296   {
 297     size_t cset_align = MAX2<size_t>(os::vm_page_size(), os::vm_allocation_granularity());
 298     size_t cset_size = align_up(((size_t) sh_rs.base() + sh_rs.size()) >> ShenandoahHeapRegion::region_size_bytes_shift(), cset_align);

 299 
 300     uintptr_t min = round_up_power_of_2(cset_align);
 301     uintptr_t max = (1u << 30u);

 302 
 303     for (uintptr_t addr = min; addr <= max; addr <<= 1u) {
 304       char* req_addr = (char*)addr;
 305       assert(is_aligned(req_addr, cset_align), "Should be aligned");
 306       ReservedSpace cset_rs(cset_size, cset_align, os::vm_page_size(), req_addr);
 307       if (cset_rs.is_reserved()) {
 308         assert(cset_rs.base() == req_addr, "Allocated where requested: " PTR_FORMAT ", " PTR_FORMAT, p2i(cset_rs.base()), addr);
 309         _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
 310         break;
 311       }
 312     }
 313 
 314     if (_collection_set == nullptr) {
 315       ReservedSpace cset_rs(cset_size, cset_align, os::vm_page_size());
 316       _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
 317     }




 318   }
 319 
 320   _regions = NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, _num_regions, mtGC);

 321   _free_set = new ShenandoahFreeSet(this, _num_regions);
 322 
 323   {
 324     ShenandoahHeapLocker locker(lock());
 325 

 326     for (size_t i = 0; i < _num_regions; i++) {
 327       HeapWord* start = (HeapWord*)sh_rs.base() + ShenandoahHeapRegion::region_size_words() * i;
 328       bool is_committed = i < num_committed_regions;
 329       void* loc = region_storage.base() + i * region_align;
 330 
 331       ShenandoahHeapRegion* r = new (loc) ShenandoahHeapRegion(start, i, is_committed);
 332       assert(is_aligned(r, SHENANDOAH_CACHE_LINE_SIZE), "Sanity");
 333 
 334       _marking_context->initialize_top_at_mark_start(r);
 335       _regions[i] = r;
 336       assert(!collection_set()->is_in(i), "New region should not be in collection set");


 337     }
 338 
 339     // Initialize to complete
 340     _marking_context->mark_complete();

 341 
 342     _free_set->rebuild();



 343   }
 344 
 345   if (AlwaysPreTouch) {
 346     // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads,
 347     // before initialize() below zeroes it with initializing thread. For any given region,
 348     // we touch the region and the corresponding bitmaps from the same thread.
 349     ShenandoahPushWorkerScope scope(workers(), _max_workers, false);
 350 
 351     _pretouch_heap_page_size = heap_page_size;
 352     _pretouch_bitmap_page_size = bitmap_page_size;
 353 
 354 #ifdef LINUX
 355     // UseTransparentHugePages would madvise that backing memory can be coalesced into huge
 356     // pages. But, the kernel needs to know that every small page is used, in order to coalesce
 357     // them into huge one. Therefore, we need to pretouch with smaller pages.
 358     if (UseTransparentHugePages) {
 359       _pretouch_heap_page_size = (size_t)os::vm_page_size();
 360       _pretouch_bitmap_page_size = (size_t)os::vm_page_size();
 361     }
 362 #endif

 379   for (uint worker = 0; worker < _max_workers; worker++) {
 380     _liveness_cache[worker] = NEW_C_HEAP_ARRAY(ShenandoahLiveData, _num_regions, mtGC);
 381     Copy::fill_to_bytes(_liveness_cache[worker], _num_regions * sizeof(ShenandoahLiveData));
 382   }
 383 
 384   // There should probably be Shenandoah-specific options for these,
 385   // just as there are G1-specific options.
 386   {
 387     ShenandoahSATBMarkQueueSet& satbqs = ShenandoahBarrierSet::satb_mark_queue_set();
 388     satbqs.set_process_completed_buffers_threshold(20); // G1SATBProcessCompletedThreshold
 389     satbqs.set_buffer_enqueue_threshold_percentage(60); // G1SATBBufferEnqueueingThresholdPercent
 390   }
 391 
 392   _monitoring_support = new ShenandoahMonitoringSupport(this);
 393   _phase_timings = new ShenandoahPhaseTimings(max_workers());
 394   ShenandoahCodeRoots::initialize();
 395 
 396   if (ShenandoahPacing) {
 397     _pacer = new ShenandoahPacer(this);
 398     _pacer->setup_for_idle();
 399   } else {
 400     _pacer = nullptr;
 401   }
 402 
 403   _control_thread = new ShenandoahControlThread();
 404 
 405   ShenandoahInitLogger::print();
 406 
 407   return JNI_OK;
 408 }
 409 








 410 void ShenandoahHeap::initialize_mode() {
 411   if (ShenandoahGCMode != nullptr) {
 412     if (strcmp(ShenandoahGCMode, "satb") == 0) {
 413       _gc_mode = new ShenandoahSATBMode();
 414     } else if (strcmp(ShenandoahGCMode, "iu") == 0) {
 415       _gc_mode = new ShenandoahIUMode();
 416     } else if (strcmp(ShenandoahGCMode, "passive") == 0) {
 417       _gc_mode = new ShenandoahPassiveMode();


 418     } else {
 419       vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option");
 420     }
 421   } else {
 422     vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option (null)");
 423   }
 424   _gc_mode->initialize_flags();
 425   if (_gc_mode->is_diagnostic() && !UnlockDiagnosticVMOptions) {
 426     vm_exit_during_initialization(
 427             err_msg("GC mode \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
 428                     _gc_mode->name()));
 429   }
 430   if (_gc_mode->is_experimental() && !UnlockExperimentalVMOptions) {
 431     vm_exit_during_initialization(
 432             err_msg("GC mode \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
 433                     _gc_mode->name()));
 434   }
 435 }
 436 
 437 void ShenandoahHeap::initialize_heuristics() {
 438   assert(_gc_mode != nullptr, "Must be initialized");
 439   _heuristics = _gc_mode->initialize_heuristics();
 440 
 441   if (_heuristics->is_diagnostic() && !UnlockDiagnosticVMOptions) {
 442     vm_exit_during_initialization(
 443             err_msg("Heuristics \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
 444                     _heuristics->name()));
 445   }
 446   if (_heuristics->is_experimental() && !UnlockExperimentalVMOptions) {
 447     vm_exit_during_initialization(
 448             err_msg("Heuristics \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
 449                     _heuristics->name()));
 450   }
 451 }
 452 
 453 #ifdef _MSC_VER
 454 #pragma warning( push )
 455 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
 456 #endif
 457 
 458 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
 459   CollectedHeap(),


 460   _initial_size(0),
 461   _used(0),
 462   _committed(0),
 463   _bytes_allocated_since_gc_start(0),
 464   _max_workers(MAX2(ConcGCThreads, ParallelGCThreads)),
 465   _workers(nullptr),
 466   _safepoint_workers(nullptr),
 467   _heap_region_special(false),
 468   _num_regions(0),
 469   _regions(nullptr),
 470   _update_refs_iterator(this),
 471   _gc_state_changed(false),




 472   _control_thread(nullptr),


 473   _shenandoah_policy(policy),
 474   _gc_mode(nullptr),
 475   _heuristics(nullptr),
 476   _free_set(nullptr),
 477   _pacer(nullptr),
 478   _verifier(nullptr),
 479   _phase_timings(nullptr),


 480   _monitoring_support(nullptr),
 481   _memory_pool(nullptr),
 482   _stw_memory_manager("Shenandoah Pauses"),
 483   _cycle_memory_manager("Shenandoah Cycles"),
 484   _gc_timer(new ConcurrentGCTimer()),
 485   _soft_ref_policy(),
 486   _log_min_obj_alignment_in_bytes(LogMinObjAlignmentInBytes),
 487   _ref_processor(new ShenandoahReferenceProcessor(MAX2(_max_workers, 1U))),
 488   _marking_context(nullptr),
 489   _bitmap_size(0),
 490   _bitmap_regions_per_slice(0),
 491   _bitmap_bytes_per_slice(0),
 492   _bitmap_region_special(false),
 493   _aux_bitmap_region_special(false),
 494   _liveness_cache(nullptr),
 495   _collection_set(nullptr)
 496 {
 497   // Initialize GC mode early, so we can adjust barrier support
 498   initialize_mode();
 499   BarrierSet::set_barrier_set(new ShenandoahBarrierSet(this));
 500 
 501   _max_workers = MAX2(_max_workers, 1U);
 502   _workers = new ShenandoahWorkerThreads("Shenandoah GC Threads", _max_workers);
 503   if (_workers == nullptr) {
 504     vm_exit_during_initialization("Failed necessary allocation.");
 505   } else {
 506     _workers->initialize_workers();
 507   }
 508 
 509   if (ParallelGCThreads > 1) {
 510     _safepoint_workers = new ShenandoahWorkerThreads("Safepoint Cleanup Thread",
 511                                                 ParallelGCThreads);
 512     _safepoint_workers->initialize_workers();
 513   }
 514 }
 515 
 516 #ifdef _MSC_VER
 517 #pragma warning( pop )
 518 #endif
 519 
 520 class ShenandoahResetBitmapTask : public WorkerTask {
 521 private:
 522   ShenandoahRegionIterator _regions;
 523 
 524 public:
 525   ShenandoahResetBitmapTask() :
 526     WorkerTask("Shenandoah Reset Bitmap") {}
 527 
 528   void work(uint worker_id) {
 529     ShenandoahHeapRegion* region = _regions.next();
 530     ShenandoahHeap* heap = ShenandoahHeap::heap();
 531     ShenandoahMarkingContext* const ctx = heap->marking_context();
 532     while (region != nullptr) {
 533       if (heap->is_bitmap_slice_committed(region)) {
 534         ctx->clear_bitmap(region);
 535       }
 536       region = _regions.next();
 537     }
 538   }
 539 };
 540 
 541 void ShenandoahHeap::reset_mark_bitmap() {
 542   assert_gc_workers(_workers->active_workers());
 543   mark_incomplete_marking_context();
 544 
 545   ShenandoahResetBitmapTask task;
 546   _workers->run_task(&task);
 547 }
 548 
 549 void ShenandoahHeap::print_on(outputStream* st) const {
 550   st->print_cr("Shenandoah Heap");
 551   st->print_cr(" " SIZE_FORMAT "%s max, " SIZE_FORMAT "%s soft max, " SIZE_FORMAT "%s committed, " SIZE_FORMAT "%s used",
 552                byte_size_in_proper_unit(max_capacity()), proper_unit_for_byte_size(max_capacity()),
 553                byte_size_in_proper_unit(soft_max_capacity()), proper_unit_for_byte_size(soft_max_capacity()),
 554                byte_size_in_proper_unit(committed()),    proper_unit_for_byte_size(committed()),
 555                byte_size_in_proper_unit(used()),         proper_unit_for_byte_size(used()));
 556   st->print_cr(" " SIZE_FORMAT " x " SIZE_FORMAT"%s regions",
 557                num_regions(),
 558                byte_size_in_proper_unit(ShenandoahHeapRegion::region_size_bytes()),
 559                proper_unit_for_byte_size(ShenandoahHeapRegion::region_size_bytes()));
 560 
 561   st->print("Status: ");
 562   if (has_forwarded_objects())                 st->print("has forwarded objects, ");
 563   if (is_concurrent_mark_in_progress())        st->print("marking, ");

 564   if (is_evacuation_in_progress())             st->print("evacuating, ");
 565   if (is_update_refs_in_progress())            st->print("updating refs, ");
 566   if (is_degenerated_gc_in_progress())         st->print("degenerated gc, ");
 567   if (is_full_gc_in_progress())                st->print("full gc, ");
 568   if (is_full_gc_move_in_progress())           st->print("full gc move, ");
 569   if (is_concurrent_weak_root_in_progress())   st->print("concurrent weak roots, ");
 570   if (is_concurrent_strong_root_in_progress() &&
 571       !is_concurrent_weak_root_in_progress())  st->print("concurrent strong roots, ");
 572 
 573   if (cancelled_gc()) {
 574     st->print("cancelled");
 575   } else {
 576     st->print("not cancelled");
 577   }
 578   st->cr();
 579 
 580   st->print_cr("Reserved region:");
 581   st->print_cr(" - [" PTR_FORMAT ", " PTR_FORMAT ") ",
 582                p2i(reserved_region().start()),
 583                p2i(reserved_region().end()));

 594   st->cr();
 595   MetaspaceUtils::print_on(st);
 596 
 597   if (Verbose) {
 598     st->cr();
 599     print_heap_regions_on(st);
 600   }
 601 }
 602 
 603 class ShenandoahInitWorkerGCLABClosure : public ThreadClosure {
 604 public:
 605   void do_thread(Thread* thread) {
 606     assert(thread != nullptr, "Sanity");
 607     assert(thread->is_Worker_thread(), "Only worker thread expected");
 608     ShenandoahThreadLocalData::initialize_gclab(thread);
 609   }
 610 };
 611 
 612 void ShenandoahHeap::post_initialize() {
 613   CollectedHeap::post_initialize();


 614   MutexLocker ml(Threads_lock);
 615 
 616   ShenandoahInitWorkerGCLABClosure init_gclabs;
 617   _workers->threads_do(&init_gclabs);
 618 
 619   // gclab can not be initialized early during VM startup, as it can not determinate its max_size.
 620   // Now, we will let WorkerThreads to initialize gclab when new worker is created.
 621   _workers->set_initialize_gclab();
 622   if (_safepoint_workers != nullptr) {
 623     _safepoint_workers->threads_do(&init_gclabs);
 624     _safepoint_workers->set_initialize_gclab();
 625   }
 626 
 627   _heuristics->initialize();
 628 
 629   JFR_ONLY(ShenandoahJFRSupport::register_jfr_type_serializers());
 630 }
 631 




 632 size_t ShenandoahHeap::used() const {
 633   return Atomic::load(&_used);
 634 }
 635 
 636 size_t ShenandoahHeap::committed() const {
 637   return Atomic::load(&_committed);
 638 }
 639 
 640 void ShenandoahHeap::increase_committed(size_t bytes) {
 641   shenandoah_assert_heaplocked_or_safepoint();
 642   _committed += bytes;
 643 }
 644 
 645 void ShenandoahHeap::decrease_committed(size_t bytes) {
 646   shenandoah_assert_heaplocked_or_safepoint();
 647   _committed -= bytes;
 648 }
 649 
 650 void ShenandoahHeap::increase_used(size_t bytes) {
 651   Atomic::add(&_used, bytes, memory_order_relaxed);









































 652 }
 653 
 654 void ShenandoahHeap::set_used(size_t bytes) {
 655   Atomic::store(&_used, bytes);



 656 }
 657 
 658 void ShenandoahHeap::decrease_used(size_t bytes) {
 659   assert(used() >= bytes, "never decrease heap size by more than we've left");
 660   Atomic::sub(&_used, bytes, memory_order_relaxed);


 661 }
 662 
 663 void ShenandoahHeap::increase_allocated(size_t bytes) {
 664   Atomic::add(&_bytes_allocated_since_gc_start, bytes, memory_order_relaxed);



 665 }
 666 
 667 void ShenandoahHeap::notify_mutator_alloc_words(size_t words, bool waste) {
 668   size_t bytes = words * HeapWordSize;
 669   if (!waste) {
 670     increase_used(bytes);
 671   }
 672   increase_allocated(bytes);


 673   if (ShenandoahPacing) {
 674     control_thread()->pacing_notify_alloc(words);
 675     if (waste) {
 676       pacer()->claim_for_alloc(words, true);
 677     }
 678   }
 679 }
 680 
 681 size_t ShenandoahHeap::capacity() const {
 682   return committed();
 683 }
 684 
 685 size_t ShenandoahHeap::max_capacity() const {
 686   return _num_regions * ShenandoahHeapRegion::region_size_bytes();
 687 }
 688 
 689 size_t ShenandoahHeap::soft_max_capacity() const {
 690   size_t v = Atomic::load(&_soft_max_size);
 691   assert(min_capacity() <= v && v <= max_capacity(),
 692          "Should be in bounds: " SIZE_FORMAT " <= " SIZE_FORMAT " <= " SIZE_FORMAT,
 693          min_capacity(), v, max_capacity());
 694   return v;
 695 }
 696 
 697 void ShenandoahHeap::set_soft_max_capacity(size_t v) {
 698   assert(min_capacity() <= v && v <= max_capacity(),
 699          "Should be in bounds: " SIZE_FORMAT " <= " SIZE_FORMAT " <= " SIZE_FORMAT,
 700          min_capacity(), v, max_capacity());
 701   Atomic::store(&_soft_max_size, v);
 702 }
 703 
 704 size_t ShenandoahHeap::min_capacity() const {
 705   return _minimum_size;
 706 }
 707 
 708 size_t ShenandoahHeap::initial_capacity() const {
 709   return _initial_size;
 710 }
 711 
 712 bool ShenandoahHeap::is_in(const void* p) const {
 713   HeapWord* heap_base = (HeapWord*) base();
 714   HeapWord* last_region_end = heap_base + ShenandoahHeapRegion::region_size_words() * num_regions();
 715   return p >= heap_base && p < last_region_end;





















 716 }
 717 
 718 void ShenandoahHeap::op_uncommit(double shrink_before, size_t shrink_until) {
 719   assert (ShenandoahUncommit, "should be enabled");
 720 
 721   // Application allocates from the beginning of the heap, and GC allocates at
 722   // the end of it. It is more efficient to uncommit from the end, so that applications
 723   // could enjoy the near committed regions. GC allocations are much less frequent,
 724   // and therefore can accept the committing costs.
 725 
 726   size_t count = 0;
 727   for (size_t i = num_regions(); i > 0; i--) { // care about size_t underflow
 728     ShenandoahHeapRegion* r = get_region(i - 1);
 729     if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
 730       ShenandoahHeapLocker locker(lock());
 731       if (r->is_empty_committed()) {
 732         if (committed() < shrink_until + ShenandoahHeapRegion::region_size_bytes()) {
 733           break;
 734         }
 735 
 736         r->make_uncommitted();
 737         count++;
 738       }
 739     }
 740     SpinPause(); // allow allocators to take the lock
 741   }
 742 
 743   if (count > 0) {
 744     control_thread()->notify_heap_changed();

















 745   }
















 746 }
 747 
 748 HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) {
 749   // New object should fit the GCLAB size
 750   size_t min_size = MAX2(size, PLAB::min_size());
 751 
 752   // Figure out size of new GCLAB, looking back at heuristics. Expand aggressively.
 753   size_t new_size = ShenandoahThreadLocalData::gclab_size(thread) * 2;

 754   new_size = MIN2(new_size, PLAB::max_size());
 755   new_size = MAX2(new_size, PLAB::min_size());
 756 
 757   // Record new heuristic value even if we take any shortcut. This captures
 758   // the case when moderately-sized objects always take a shortcut. At some point,
 759   // heuristics should catch up with them.

 760   ShenandoahThreadLocalData::set_gclab_size(thread, new_size);
 761 
 762   if (new_size < size) {
 763     // New size still does not fit the object. Fall back to shared allocation.
 764     // This avoids retiring perfectly good GCLABs, when we encounter a large object.

 765     return nullptr;
 766   }
 767 
 768   // Retire current GCLAB, and allocate a new one.
 769   PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
 770   gclab->retire();
 771 
 772   size_t actual_size = 0;
 773   HeapWord* gclab_buf = allocate_new_gclab(min_size, new_size, &actual_size);
 774   if (gclab_buf == nullptr) {
 775     return nullptr;
 776   }
 777 
 778   assert (size <= actual_size, "allocation should fit");
 779 
 780   if (ZeroTLAB) {
 781     // ..and clear it.
 782     Copy::zero_to_words(gclab_buf, actual_size);
 783   } else {
 784     // ...and zap just allocated object.
 785 #ifdef ASSERT
 786     // Skip mangling the space corresponding to the object header to
 787     // ensure that the returned space is not considered parsable by
 788     // any concurrent GC thread.
 789     size_t hdr_size = oopDesc::header_size();
 790     Copy::fill_to_words(gclab_buf + hdr_size, actual_size - hdr_size, badHeapWordVal);
 791 #endif // ASSERT
 792   }
 793   gclab->set_buf(gclab_buf, actual_size);
 794   return gclab->allocate(size);
 795 }
 796 

 797 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t min_size,
 798                                             size_t requested_size,
 799                                             size_t* actual_size) {
 800   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_tlab(min_size, requested_size);
 801   HeapWord* res = allocate_memory(req);
 802   if (res != nullptr) {
 803     *actual_size = req.actual_size();
 804   } else {
 805     *actual_size = 0;
 806   }
 807   return res;
 808 }
 809 
 810 HeapWord* ShenandoahHeap::allocate_new_gclab(size_t min_size,
 811                                              size_t word_size,
 812                                              size_t* actual_size) {
 813   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_gclab(min_size, word_size);
 814   HeapWord* res = allocate_memory(req);
 815   if (res != nullptr) {
 816     *actual_size = req.actual_size();

 818     *actual_size = 0;
 819   }
 820   return res;
 821 }
 822 
 823 HeapWord* ShenandoahHeap::allocate_memory(ShenandoahAllocRequest& req) {
 824   intptr_t pacer_epoch = 0;
 825   bool in_new_region = false;
 826   HeapWord* result = nullptr;
 827 
 828   if (req.is_mutator_alloc()) {
 829     if (ShenandoahPacing) {
 830       pacer()->pace_for_alloc(req.size());
 831       pacer_epoch = pacer()->epoch();
 832     }
 833 
 834     if (!ShenandoahAllocFailureALot || !should_inject_alloc_failure()) {
 835       result = allocate_memory_under_lock(req, in_new_region);
 836     }
 837 
 838     // Allocation failed, block until control thread reacted, then retry allocation.












 839     //
 840     // It might happen that one of the threads requesting allocation would unblock
 841     // way later after GC happened, only to fail the second allocation, because
 842     // other threads have already depleted the free storage. In this case, a better
 843     // strategy is to try again, as long as GC makes progress (or until at least
 844     // one full GC has completed).
 845     size_t original_count = shenandoah_policy()->full_gc_count();
 846     while (result == nullptr
 847         && (_progress_last_gc.is_set() || original_count == shenandoah_policy()->full_gc_count())) {
 848       control_thread()->handle_alloc_failure(req);
 849       result = allocate_memory_under_lock(req, in_new_region);
 850     }






 851   } else {
 852     assert(req.is_gc_alloc(), "Can only accept GC allocs here");
 853     result = allocate_memory_under_lock(req, in_new_region);
 854     // Do not call handle_alloc_failure() here, because we cannot block.
 855     // The allocation failure would be handled by the LRB slowpath with handle_alloc_failure_evac().
 856   }
 857 
 858   if (in_new_region) {
 859     control_thread()->notify_heap_changed();
 860   }
 861 








 862   if (result != nullptr) {
 863     size_t requested = req.size();
 864     size_t actual = req.actual_size();
 865 
 866     assert (req.is_lab_alloc() || (requested == actual),
 867             "Only LAB allocations are elastic: %s, requested = " SIZE_FORMAT ", actual = " SIZE_FORMAT,
 868             ShenandoahAllocRequest::alloc_type_to_string(req.type()), requested, actual);
 869 
 870     if (req.is_mutator_alloc()) {
 871       notify_mutator_alloc_words(actual, false);
 872 
 873       // If we requested more than we were granted, give the rest back to pacer.
 874       // This only matters if we are in the same pacing epoch: do not try to unpace
 875       // over the budget for the other phase.
 876       if (ShenandoahPacing && (pacer_epoch > 0) && (requested > actual)) {
 877         pacer()->unpace_for_alloc(pacer_epoch, requested - actual);
 878       }
 879     } else {
 880       increase_used(actual*HeapWordSize);
 881     }
 882   }
 883 
 884   return result;
 885 }
 886 
 887 HeapWord* ShenandoahHeap::allocate_memory_under_lock(ShenandoahAllocRequest& req, bool& in_new_region) {
 888   ShenandoahHeapLocker locker(lock());
 889   return _free_set->allocate(req, in_new_region);








































 890 }
 891 
 892 HeapWord* ShenandoahHeap::mem_allocate(size_t size,
 893                                         bool*  gc_overhead_limit_was_exceeded) {
 894   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared(size);
 895   return allocate_memory(req);
 896 }
 897 
 898 MetaWord* ShenandoahHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
 899                                                              size_t size,
 900                                                              Metaspace::MetadataType mdtype) {
 901   MetaWord* result;
 902 
 903   // Inform metaspace OOM to GC heuristics if class unloading is possible.
 904   if (heuristics()->can_unload_classes()) {
 905     ShenandoahHeuristics* h = heuristics();
 906     h->record_metaspace_oom();
 907   }
 908 
 909   // Expand and retry allocation
 910   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
 911   if (result != nullptr) {
 912     return result;
 913   }
 914 
 915   // Start full GC
 916   collect(GCCause::_metadata_GC_clear_soft_refs);
 917 
 918   // Retry allocation
 919   result = loader_data->metaspace_non_null()->allocate(size, mdtype);
 920   if (result != nullptr) {
 921     return result;
 922   }
 923 
 924   // Expand and retry allocation
 925   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);

 978 private:
 979   void do_work() {
 980     ShenandoahConcurrentEvacuateRegionObjectClosure cl(_sh);
 981     ShenandoahHeapRegion* r;
 982     while ((r =_cs->claim_next()) != nullptr) {
 983       assert(r->has_live(), "Region " SIZE_FORMAT " should have been reclaimed early", r->index());
 984       _sh->marked_object_iterate(r, &cl);
 985 
 986       if (ShenandoahPacing) {
 987         _sh->pacer()->report_evac(r->used() >> LogHeapWordSize);
 988       }
 989 
 990       if (_sh->check_cancelled_gc_and_yield(_concurrent)) {
 991         break;
 992       }
 993     }
 994   }
 995 };
 996 
 997 void ShenandoahHeap::evacuate_collection_set(bool concurrent) {
 998   ShenandoahEvacuationTask task(this, _collection_set, concurrent);
 999   workers()->run_task(&task);











































































































1000 }
1001 
1002 void ShenandoahHeap::trash_cset_regions() {
1003   ShenandoahHeapLocker locker(lock());
1004 
1005   ShenandoahCollectionSet* set = collection_set();
1006   ShenandoahHeapRegion* r;
1007   set->clear_current_index();
1008   while ((r = set->next()) != nullptr) {
1009     r->make_trash();
1010   }
1011   collection_set()->clear();
1012 }
1013 
1014 void ShenandoahHeap::print_heap_regions_on(outputStream* st) const {
1015   st->print_cr("Heap Regions:");
1016   st->print_cr("Region state: EU=empty-uncommitted, EC=empty-committed, R=regular, H=humongous start, HP=pinned humongous start");
1017   st->print_cr("              HC=humongous continuation, CS=collection set, TR=trash, P=pinned, CSP=pinned collection set");
1018   st->print_cr("BTE=bottom/top/end, TAMS=top-at-mark-start");
1019   st->print_cr("UWM=update watermark, U=used");
1020   st->print_cr("T=TLAB allocs, G=GCLAB allocs");
1021   st->print_cr("S=shared allocs, L=live data");
1022   st->print_cr("CP=critical pins");
1023 
1024   for (size_t i = 0; i < num_regions(); i++) {
1025     get_region(i)->print_on(st);
1026   }
1027 }
1028 
1029 void ShenandoahHeap::trash_humongous_region_at(ShenandoahHeapRegion* start) {
1030   assert(start->is_humongous_start(), "reclaim regions starting with the first one");
1031 
1032   oop humongous_obj = cast_to_oop(start->bottom());
1033   size_t size = humongous_obj->size();
1034   size_t required_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize);
1035   size_t index = start->index() + required_regions - 1;
1036 
1037   assert(!start->has_live(), "liveness must be zero");
1038 
1039   for(size_t i = 0; i < required_regions; i++) {
1040     // Reclaim from tail. Otherwise, assertion fails when printing region to trace log,
1041     // as it expects that every region belongs to a humongous region starting with a humongous start region.
1042     ShenandoahHeapRegion* region = get_region(index --);
1043 
1044     assert(region->is_humongous(), "expect correct humongous start or continuation");
1045     assert(!region->is_cset(), "Humongous region should not be in collection set");
1046 
1047     region->make_trash_immediate();
1048   }

1049 }
1050 
1051 class ShenandoahCheckCleanGCLABClosure : public ThreadClosure {
1052 public:
1053   ShenandoahCheckCleanGCLABClosure() {}
1054   void do_thread(Thread* thread) {
1055     PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1056     assert(gclab != nullptr, "GCLAB should be initialized for %s", thread->name());
1057     assert(gclab->words_remaining() == 0, "GCLAB should not need retirement");






1058   }
1059 };
1060 
1061 class ShenandoahRetireGCLABClosure : public ThreadClosure {
1062 private:
1063   bool const _resize;
1064 public:
1065   ShenandoahRetireGCLABClosure(bool resize) : _resize(resize) {}
1066   void do_thread(Thread* thread) {
1067     PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1068     assert(gclab != nullptr, "GCLAB should be initialized for %s", thread->name());
1069     gclab->retire();
1070     if (_resize && ShenandoahThreadLocalData::gclab_size(thread) > 0) {
1071       ShenandoahThreadLocalData::set_gclab_size(thread, 0);
1072     }













1073   }
1074 };
1075 
1076 void ShenandoahHeap::labs_make_parsable() {
1077   assert(UseTLAB, "Only call with UseTLAB");
1078 
1079   ShenandoahRetireGCLABClosure cl(false);
1080 
1081   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1082     ThreadLocalAllocBuffer& tlab = t->tlab();
1083     tlab.make_parsable();
1084     cl.do_thread(t);
1085   }
1086 
1087   workers()->threads_do(&cl);
1088 }
1089 
1090 void ShenandoahHeap::tlabs_retire(bool resize) {
1091   assert(UseTLAB, "Only call with UseTLAB");
1092   assert(!resize || ResizeTLAB, "Only call for resize when ResizeTLAB is enabled");

1154   }
1155   return nullptr;
1156 }
1157 
1158 bool ShenandoahHeap::block_is_obj(const HeapWord* addr) const {
1159   ShenandoahHeapRegion* r = heap_region_containing(addr);
1160   return r->block_is_obj(addr);
1161 }
1162 
1163 bool ShenandoahHeap::print_location(outputStream* st, void* addr) const {
1164   return BlockLocationPrinter<ShenandoahHeap>::print_location(st, addr);
1165 }
1166 
1167 void ShenandoahHeap::prepare_for_verify() {
1168   if (SafepointSynchronize::is_at_safepoint() && UseTLAB) {
1169     labs_make_parsable();
1170   }
1171 }
1172 
1173 void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const {
1174   tcl->do_thread(_control_thread);







1175   workers()->threads_do(tcl);
1176   if (_safepoint_workers != nullptr) {
1177     _safepoint_workers->threads_do(tcl);
1178   }
1179 }
1180 
1181 void ShenandoahHeap::print_tracing_info() const {
1182   LogTarget(Info, gc, stats) lt;
1183   if (lt.is_enabled()) {
1184     ResourceMark rm;
1185     LogStream ls(lt);
1186 
1187     phase_timings()->print_global_on(&ls);
1188 
1189     ls.cr();
1190     ls.cr();
1191 
1192     shenandoah_policy()->print_gc_stats(&ls);
1193 




1194     ls.cr();
1195     ls.cr();
1196   }
1197 }
1198 








































1199 void ShenandoahHeap::verify(VerifyOption vo) {
1200   if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
1201     if (ShenandoahVerify) {
1202       verifier()->verify_generic(vo);
1203     } else {
1204       // TODO: Consider allocating verification bitmaps on demand,
1205       // and turn this on unconditionally.
1206     }
1207   }
1208 }
1209 size_t ShenandoahHeap::tlab_capacity(Thread *thr) const {
1210   return _free_set->capacity();
1211 }
1212 
1213 class ObjectIterateScanRootClosure : public BasicOopIterateClosure {
1214 private:
1215   MarkBitMap* _bitmap;
1216   ShenandoahScanObjectStack* _oop_stack;
1217   ShenandoahHeap* const _heap;
1218   ShenandoahMarkingContext* const _marking_context;

1500       if (start >= max) break;
1501 
1502       for (size_t i = cur; i < end; i++) {
1503         ShenandoahHeapRegion* current = _heap->get_region(i);
1504         _blk->heap_region_do(current);
1505       }
1506     }
1507   }
1508 };
1509 
1510 void ShenandoahHeap::parallel_heap_region_iterate(ShenandoahHeapRegionClosure* blk) const {
1511   assert(blk->is_thread_safe(), "Only thread-safe closures here");
1512   if (num_regions() > ShenandoahParallelRegionStride) {
1513     ShenandoahParallelHeapRegionTask task(blk);
1514     workers()->run_task(&task);
1515   } else {
1516     heap_region_iterate(blk);
1517   }
1518 }
1519 
1520 class ShenandoahInitMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
1521 private:
1522   ShenandoahMarkingContext* const _ctx;
1523 public:
1524   ShenandoahInitMarkUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
1525 
1526   void heap_region_do(ShenandoahHeapRegion* r) {
1527     assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index());
1528     if (r->is_active()) {
1529       // Check if region needs updating its TAMS. We have updated it already during concurrent
1530       // reset, so it is very likely we don't need to do another write here.
1531       if (_ctx->top_at_mark_start(r) != r->top()) {
1532         _ctx->capture_top_at_mark_start(r);
1533       }
1534     } else {
1535       assert(_ctx->top_at_mark_start(r) == r->top(),
1536              "Region " SIZE_FORMAT " should already have correct TAMS", r->index());
1537     }
1538   }
1539 
1540   bool is_thread_safe() { return true; }
1541 };
1542 
1543 class ShenandoahRendezvousClosure : public HandshakeClosure {
1544 public:
1545   inline ShenandoahRendezvousClosure() : HandshakeClosure("ShenandoahRendezvous") {}
1546   inline void do_thread(Thread* thread) {}
1547 };
1548 
1549 void ShenandoahHeap::rendezvous_threads() {
1550   ShenandoahRendezvousClosure cl;
1551   Handshake::execute(&cl);
1552 }
1553 
1554 void ShenandoahHeap::recycle_trash() {
1555   free_set()->recycle_trash();
1556 }
1557 
1558 class ShenandoahResetUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
1559 private:
1560   ShenandoahMarkingContext* const _ctx;
1561 public:
1562   ShenandoahResetUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
1563 
1564   void heap_region_do(ShenandoahHeapRegion* r) {
1565     if (r->is_active()) {
1566       // Reset live data and set TAMS optimistically. We would recheck these under the pause
1567       // anyway to capture any updates that happened since now.
1568       r->clear_live_data();
1569       _ctx->capture_top_at_mark_start(r);
1570     }
1571   }
1572 
1573   bool is_thread_safe() { return true; }
1574 };
1575 
1576 void ShenandoahHeap::prepare_gc() {
1577   reset_mark_bitmap();
1578 
1579   ShenandoahResetUpdateRegionStateClosure cl;
1580   parallel_heap_region_iterate(&cl);
1581 }
1582 
1583 class ShenandoahFinalMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
1584 private:
1585   ShenandoahMarkingContext* const _ctx;
1586   ShenandoahHeapLock* const _lock;
1587 
1588 public:
1589   ShenandoahFinalMarkUpdateRegionStateClosure() :
1590     _ctx(ShenandoahHeap::heap()->complete_marking_context()), _lock(ShenandoahHeap::heap()->lock()) {}
1591 
1592   void heap_region_do(ShenandoahHeapRegion* r) {
1593     if (r->is_active()) {
1594       // All allocations past TAMS are implicitly live, adjust the region data.
1595       // Bitmaps/TAMS are swapped at this point, so we need to poll complete bitmap.
1596       HeapWord *tams = _ctx->top_at_mark_start(r);
1597       HeapWord *top = r->top();
1598       if (top > tams) {
1599         r->increase_live_data_alloc_words(pointer_delta(top, tams));
1600       }
1601 
1602       // We are about to select the collection set, make sure it knows about
1603       // current pinning status. Also, this allows trashing more regions that
1604       // now have their pinning status dropped.
1605       if (r->is_pinned()) {
1606         if (r->pin_count() == 0) {
1607           ShenandoahHeapLocker locker(_lock);
1608           r->make_unpinned();
1609         }
1610       } else {
1611         if (r->pin_count() > 0) {
1612           ShenandoahHeapLocker locker(_lock);
1613           r->make_pinned();
1614         }
1615       }
1616 
1617       // Remember limit for updating refs. It's guaranteed that we get no
1618       // from-space-refs written from here on.
1619       r->set_update_watermark_at_safepoint(r->top());
1620     } else {
1621       assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index());
1622       assert(_ctx->top_at_mark_start(r) == r->top(),
1623              "Region " SIZE_FORMAT " should have correct TAMS", r->index());
1624     }
1625   }
1626 
1627   bool is_thread_safe() { return true; }
1628 };
1629 
1630 void ShenandoahHeap::prepare_regions_and_collection_set(bool concurrent) {
1631   assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
1632   {
1633     ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::final_update_region_states :
1634                                          ShenandoahPhaseTimings::degen_gc_final_update_region_states);
1635     ShenandoahFinalMarkUpdateRegionStateClosure cl;
1636     parallel_heap_region_iterate(&cl);
1637 
1638     assert_pinned_region_status();
1639   }
1640 
1641   {
1642     ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::choose_cset :
1643                                          ShenandoahPhaseTimings::degen_gc_choose_cset);
1644     ShenandoahHeapLocker locker(lock());
1645     _collection_set->clear();
1646     heuristics()->choose_collection_set(_collection_set);
1647   }
1648 
1649   {
1650     ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::final_rebuild_freeset :
1651                                          ShenandoahPhaseTimings::degen_gc_final_rebuild_freeset);
1652     ShenandoahHeapLocker locker(lock());
1653     _free_set->rebuild();
1654   }
1655 }
1656 
1657 void ShenandoahHeap::do_class_unloading() {
1658   _unloader.unload();



1659 }
1660 
1661 void ShenandoahHeap::stw_weak_refs(bool full_gc) {
1662   // Weak refs processing
1663   ShenandoahPhaseTimings::Phase phase = full_gc ? ShenandoahPhaseTimings::full_gc_weakrefs
1664                                                 : ShenandoahPhaseTimings::degen_gc_weakrefs;
1665   ShenandoahTimingsTracker t(phase);
1666   ShenandoahGCWorkerPhase worker_phase(phase);
1667   ref_processor()->process_references(phase, workers(), false /* concurrent */);

1668 }
1669 
1670 void ShenandoahHeap::prepare_update_heap_references(bool concurrent) {
1671   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
1672 
1673   // Evacuation is over, no GCLABs are needed anymore. GCLABs are under URWM, so we need to
1674   // make them parsable for update code to work correctly. Plus, we can compute new sizes
1675   // for future GCLABs here.
1676   if (UseTLAB) {
1677     ShenandoahGCPhase phase(concurrent ?
1678                             ShenandoahPhaseTimings::init_update_refs_manage_gclabs :
1679                             ShenandoahPhaseTimings::degen_gc_init_update_refs_manage_gclabs);
1680     gclabs_retire(ResizeTLAB);
1681   }
1682 
1683   _update_refs_iterator.reset();
1684 }
1685 
1686 void ShenandoahHeap::propagate_gc_state_to_java_threads() {
1687   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at Shenandoah safepoint");
1688   if (_gc_state_changed) {
1689     _gc_state_changed = false;
1690     char state = gc_state();
1691     for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1692       ShenandoahThreadLocalData::set_gc_state(t, state);
1693     }
1694   }
1695 }
1696 
1697 void ShenandoahHeap::set_gc_state(uint mask, bool value) {
1698   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at Shenandoah safepoint");
1699   _gc_state.set_cond(mask, value);
1700   _gc_state_changed = true;


















1701 }
1702 
1703 void ShenandoahHeap::set_concurrent_mark_in_progress(bool in_progress) {
1704   assert(!has_forwarded_objects(), "Not expected before/after mark phase");
1705   set_gc_state(MARKING, in_progress);
1706   ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);

































1707 }
1708 
1709 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
1710   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only call this at safepoint");
1711   set_gc_state(EVACUATION, in_progress);
1712 }
1713 
1714 void ShenandoahHeap::set_concurrent_strong_root_in_progress(bool in_progress) {
1715   if (in_progress) {
1716     _concurrent_strong_root_in_progress.set();
1717   } else {
1718     _concurrent_strong_root_in_progress.unset();
1719   }
1720 }
1721 
1722 void ShenandoahHeap::set_concurrent_weak_root_in_progress(bool cond) {
1723   set_gc_state(WEAK_ROOTS, cond);
1724 }
1725 
1726 GCTracer* ShenandoahHeap::tracer() {
1727   return shenandoah_policy()->tracer();
1728 }
1729 
1730 size_t ShenandoahHeap::tlab_used(Thread* thread) const {
1731   return _free_set->used();
1732 }
1733 
1734 bool ShenandoahHeap::try_cancel_gc() {
1735   jbyte prev = _cancelled_gc.cmpxchg(CANCELLED, CANCELLABLE);
1736   return prev == CANCELLABLE;
1737 }
1738 











1739 void ShenandoahHeap::cancel_gc(GCCause::Cause cause) {
1740   if (try_cancel_gc()) {
1741     FormatBuffer<> msg("Cancelling GC: %s", GCCause::to_string(cause));
1742     log_info(gc)("%s", msg.buffer());
1743     Events::log(Thread::current(), "%s", msg.buffer());

1744   }
1745 }
1746 
1747 uint ShenandoahHeap::max_workers() {
1748   return _max_workers;
1749 }
1750 
1751 void ShenandoahHeap::stop() {
1752   // The shutdown sequence should be able to terminate when GC is running.
1753 
1754   // Step 0. Notify policy to disable event recording.
1755   _shenandoah_policy->record_shutdown();
1756 
1757   // Step 1. Notify control thread that we are in shutdown.
1758   // Note that we cannot do that with stop(), because stop() is blocking and waits for the actual shutdown.
1759   // Doing stop() here would wait for the normal GC cycle to complete, never falling through to cancel below.
1760   control_thread()->prepare_for_graceful_shutdown();
1761 
1762   // Step 2. Notify GC workers that we are cancelling GC.
1763   cancel_gc(GCCause::_shenandoah_stop_vm);

1847 }
1848 
1849 void ShenandoahHeap::set_has_forwarded_objects(bool cond) {
1850   set_gc_state(HAS_FORWARDED, cond);
1851 }
1852 
1853 void ShenandoahHeap::set_unload_classes(bool uc) {
1854   _unload_classes.set_cond(uc);
1855 }
1856 
1857 bool ShenandoahHeap::unload_classes() const {
1858   return _unload_classes.is_set();
1859 }
1860 
1861 address ShenandoahHeap::in_cset_fast_test_addr() {
1862   ShenandoahHeap* heap = ShenandoahHeap::heap();
1863   assert(heap->collection_set() != nullptr, "Sanity");
1864   return (address) heap->collection_set()->biased_map_address();
1865 }
1866 
1867 size_t ShenandoahHeap::bytes_allocated_since_gc_start() {
1868   return Atomic::load(&_bytes_allocated_since_gc_start);
1869 }
1870 
1871 void ShenandoahHeap::reset_bytes_allocated_since_gc_start() {
1872   Atomic::store(&_bytes_allocated_since_gc_start, (size_t)0);





1873 }
1874 
1875 void ShenandoahHeap::set_degenerated_gc_in_progress(bool in_progress) {
1876   _degenerated_gc_in_progress.set_cond(in_progress);
1877 }
1878 
1879 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) {
1880   _full_gc_in_progress.set_cond(in_progress);
1881 }
1882 
1883 void ShenandoahHeap::set_full_gc_move_in_progress(bool in_progress) {
1884   assert (is_full_gc_in_progress(), "should be");
1885   _full_gc_move_in_progress.set_cond(in_progress);
1886 }
1887 
1888 void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) {
1889   set_gc_state(UPDATEREFS, in_progress);
1890 }
1891 
1892 void ShenandoahHeap::register_nmethod(nmethod* nm) {

1916     if (r->is_active()) {
1917       if (r->is_pinned()) {
1918         if (r->pin_count() == 0) {
1919           r->make_unpinned();
1920         }
1921       } else {
1922         if (r->pin_count() > 0) {
1923           r->make_pinned();
1924         }
1925       }
1926     }
1927   }
1928 
1929   assert_pinned_region_status();
1930 }
1931 
1932 #ifdef ASSERT
1933 void ShenandoahHeap::assert_pinned_region_status() {
1934   for (size_t i = 0; i < num_regions(); i++) {
1935     ShenandoahHeapRegion* r = get_region(i);
1936     assert((r->is_pinned() && r->pin_count() > 0) || (!r->is_pinned() && r->pin_count() == 0),
1937            "Region " SIZE_FORMAT " pinning status is inconsistent", i);



1938   }
1939 }
1940 #endif
1941 
1942 ConcurrentGCTimer* ShenandoahHeap::gc_timer() const {
1943   return _gc_timer;
1944 }
1945 
1946 void ShenandoahHeap::prepare_concurrent_roots() {
1947   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
1948   assert(!is_stw_gc_in_progress(), "Only concurrent GC");
1949   set_concurrent_strong_root_in_progress(!collection_set()->is_empty());
1950   set_concurrent_weak_root_in_progress(true);
1951   if (unload_classes()) {
1952     _unloader.prepare();
1953   }
1954 }
1955 
1956 void ShenandoahHeap::finish_concurrent_roots() {
1957   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
1958   assert(!is_stw_gc_in_progress(), "Only concurrent GC");
1959   if (unload_classes()) {
1960     _unloader.finish();
1961   }
1962 }
1963 
1964 #ifdef ASSERT
1965 void ShenandoahHeap::assert_gc_workers(uint nworkers) {
1966   assert(nworkers > 0 && nworkers <= max_workers(), "Sanity");
1967 
1968   if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
1969     if (UseDynamicNumberOfGCThreads) {
1970       assert(nworkers <= ParallelGCThreads, "Cannot use more than it has");
1971     } else {
1972       // Use ParallelGCThreads inside safepoints
1973       assert(nworkers == ParallelGCThreads, "Use ParallelGCThreads within safepoints");
1974     }
1975   } else {
1976     if (UseDynamicNumberOfGCThreads) {
1977       assert(nworkers <= ConcGCThreads, "Cannot use more than it has");
1978     } else {
1979       // Use ConcGCThreads outside safepoints
1980       assert(nworkers == ConcGCThreads, "Use ConcGCThreads outside safepoints");
1981     }
1982   }
1983 }
1984 #endif
1985 
1986 ShenandoahVerifier* ShenandoahHeap::verifier() {
1987   guarantee(ShenandoahVerify, "Should be enabled");
1988   assert (_verifier != nullptr, "sanity");
1989   return _verifier;
1990 }
1991 
1992 template<bool CONCURRENT>
1993 class ShenandoahUpdateHeapRefsTask : public WorkerTask {
1994 private:
1995   ShenandoahHeap* _heap;
1996   ShenandoahRegionIterator* _regions;
1997 public:
1998   ShenandoahUpdateHeapRefsTask(ShenandoahRegionIterator* regions) :
1999     WorkerTask("Shenandoah Update References"),
2000     _heap(ShenandoahHeap::heap()),
2001     _regions(regions) {
2002   }
2003 
2004   void work(uint worker_id) {
2005     if (CONCURRENT) {
2006       ShenandoahConcurrentWorkerSession worker_session(worker_id);
2007       ShenandoahSuspendibleThreadSetJoiner stsj;
2008       do_work<ShenandoahConcUpdateRefsClosure>();
2009     } else {
2010       ShenandoahParallelWorkerSession worker_session(worker_id);
2011       do_work<ShenandoahSTWUpdateRefsClosure>();
2012     }
2013   }
2014 
2015 private:
2016   template<class T>
2017   void do_work() {









2018     T cl;
2019     ShenandoahHeapRegion* r = _regions->next();
2020     ShenandoahMarkingContext* const ctx = _heap->complete_marking_context();
2021     while (r != nullptr) {
2022       HeapWord* update_watermark = r->get_update_watermark();
2023       assert (update_watermark >= r->bottom(), "sanity");
2024       if (r->is_active() && !r->is_cset()) {
2025         _heap->marked_object_oop_iterate(r, &cl, update_watermark);
2026       }
2027       if (ShenandoahPacing) {
2028         _heap->pacer()->report_updaterefs(pointer_delta(update_watermark, r->bottom()));
2029       }
2030       if (_heap->check_cancelled_gc_and_yield(CONCURRENT)) {
2031         return;
2032       }
2033       r = _regions->next();
2034     }
2035   }
2036 };
2037 
2038 void ShenandoahHeap::update_heap_references(bool concurrent) {
2039   assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
2040 
2041   if (concurrent) {
2042     ShenandoahUpdateHeapRefsTask<true> task(&_update_refs_iterator);
2043     workers()->run_task(&task);
2044   } else {
2045     ShenandoahUpdateHeapRefsTask<false> task(&_update_refs_iterator);
2046     workers()->run_task(&task);
2047   }
2048 }
2049 

2050 
2051 class ShenandoahFinalUpdateRefsUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
2052 private:
2053   ShenandoahHeapLock* const _lock;
2054 
2055 public:
2056   ShenandoahFinalUpdateRefsUpdateRegionStateClosure() : _lock(ShenandoahHeap::heap()->lock()) {}
2057 
2058   void heap_region_do(ShenandoahHeapRegion* r) {
2059     // Drop unnecessary "pinned" state from regions that does not have CP marks
2060     // anymore, as this would allow trashing them.
2061 
2062     if (r->is_active()) {
2063       if (r->is_pinned()) {
2064         if (r->pin_count() == 0) {
2065           ShenandoahHeapLocker locker(_lock);
2066           r->make_unpinned();
2067         }
2068       } else {
2069         if (r->pin_count() > 0) {
2070           ShenandoahHeapLocker locker(_lock);
2071           r->make_pinned();
2072         }
2073       }
2074     }
2075   }
2076 
2077   bool is_thread_safe() { return true; }
2078 };
2079 
2080 void ShenandoahHeap::update_heap_region_states(bool concurrent) {
2081   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2082   assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
2083 
2084   {
2085     ShenandoahGCPhase phase(concurrent ?
2086                             ShenandoahPhaseTimings::final_update_refs_update_region_states :
2087                             ShenandoahPhaseTimings::degen_gc_final_update_refs_update_region_states);
2088     ShenandoahFinalUpdateRefsUpdateRegionStateClosure cl;
2089     parallel_heap_region_iterate(&cl);
2090 
2091     assert_pinned_region_status();
2092   }
2093 
2094   {
2095     ShenandoahGCPhase phase(concurrent ?
2096                             ShenandoahPhaseTimings::final_update_refs_trash_cset :
2097                             ShenandoahPhaseTimings::degen_gc_final_update_refs_trash_cset);
2098     trash_cset_regions();
2099   }
2100 }
2101 





2102 void ShenandoahHeap::rebuild_free_set(bool concurrent) {
2103   {
2104     ShenandoahGCPhase phase(concurrent ?
2105                             ShenandoahPhaseTimings::final_update_refs_rebuild_freeset :
2106                             ShenandoahPhaseTimings::degen_gc_final_update_refs_rebuild_freeset);
2107     ShenandoahHeapLocker locker(lock());
2108     _free_set->rebuild();




































2109   }
2110 }
2111 
2112 void ShenandoahHeap::print_extended_on(outputStream *st) const {
2113   print_on(st);
2114   st->cr();
2115   print_heap_regions_on(st);
2116 }
2117 
2118 bool ShenandoahHeap::is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self) {
2119   size_t slice = r->index() / _bitmap_regions_per_slice;
2120 
2121   size_t regions_from = _bitmap_regions_per_slice * slice;
2122   size_t regions_to   = MIN2(num_regions(), _bitmap_regions_per_slice * (slice + 1));
2123   for (size_t g = regions_from; g < regions_to; g++) {
2124     assert (g / _bitmap_regions_per_slice == slice, "same slice");
2125     if (skip_self && g == r->index()) continue;
2126     if (get_region(g)->is_committed()) {
2127       return true;
2128     }

2176   }
2177 
2178   // Uncommit the bitmap slice:
2179   size_t slice = r->index() / _bitmap_regions_per_slice;
2180   size_t off = _bitmap_bytes_per_slice * slice;
2181   size_t len = _bitmap_bytes_per_slice;
2182   if (!os::uncommit_memory((char*)_bitmap_region.start() + off, len)) {
2183     return false;
2184   }
2185   return true;
2186 }
2187 
2188 void ShenandoahHeap::safepoint_synchronize_begin() {
2189   SuspendibleThreadSet::synchronize();
2190 }
2191 
2192 void ShenandoahHeap::safepoint_synchronize_end() {
2193   SuspendibleThreadSet::desynchronize();
2194 }
2195 
2196 void ShenandoahHeap::entry_uncommit(double shrink_before, size_t shrink_until) {
2197   static const char *msg = "Concurrent uncommit";
2198   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_uncommit, true /* log_heap_usage */);
2199   EventMark em("%s", msg);
2200 
2201   op_uncommit(shrink_before, shrink_until);
2202 }
2203 
2204 void ShenandoahHeap::try_inject_alloc_failure() {
2205   if (ShenandoahAllocFailureALot && !cancelled_gc() && ((os::random() % 1000) > 950)) {
2206     _inject_alloc_failure.set();
2207     os::naked_short_sleep(1);
2208     if (cancelled_gc()) {
2209       log_info(gc)("Allocation failure was successfully injected");
2210     }
2211   }
2212 }
2213 
2214 bool ShenandoahHeap::should_inject_alloc_failure() {
2215   return _inject_alloc_failure.is_set() && _inject_alloc_failure.try_unset();
2216 }
2217 
2218 void ShenandoahHeap::initialize_serviceability() {
2219   _memory_pool = new ShenandoahMemoryPool(this);
2220   _cycle_memory_manager.add_pool(_memory_pool);
2221   _stw_memory_manager.add_pool(_memory_pool);
2222 }
2223 
2224 GrowableArray<GCMemoryManager*> ShenandoahHeap::memory_managers() {
2225   GrowableArray<GCMemoryManager*> memory_managers(2);
2226   memory_managers.append(&_cycle_memory_manager);
2227   memory_managers.append(&_stw_memory_manager);
2228   return memory_managers;
2229 }
2230 
2231 GrowableArray<MemoryPool*> ShenandoahHeap::memory_pools() {
2232   GrowableArray<MemoryPool*> memory_pools(1);
2233   memory_pools.append(_memory_pool);
2234   return memory_pools;
2235 }
2236 
2237 MemoryUsage ShenandoahHeap::memory_usage() {
2238   return _memory_pool->get_memory_usage();
2239 }
2240 
2241 ShenandoahRegionIterator::ShenandoahRegionIterator() :
2242   _heap(ShenandoahHeap::heap()),
2243   _index(0) {}
2244 
2245 ShenandoahRegionIterator::ShenandoahRegionIterator(ShenandoahHeap* heap) :
2246   _heap(heap),
2247   _index(0) {}
2248 
2249 void ShenandoahRegionIterator::reset() {
2250   _index = 0;
2251 }
2252 
2253 bool ShenandoahRegionIterator::has_next() const {
2254   return _index < _heap->num_regions();
2255 }
2256 
2257 char ShenandoahHeap::gc_state() const {
2258   return _gc_state.raw_value();

2283   }
2284 }
2285 
2286 bool ShenandoahHeap::requires_barriers(stackChunkOop obj) const {
2287   if (is_idle()) return false;
2288 
2289   // Objects allocated after marking start are implicitly alive, don't need any barriers during
2290   // marking phase.
2291   if (is_concurrent_mark_in_progress() &&
2292      !marking_context()->allocated_after_mark_start(obj)) {
2293     return true;
2294   }
2295 
2296   // Can not guarantee obj is deeply good.
2297   if (has_forwarded_objects()) {
2298     return true;
2299   }
2300 
2301   return false;
2302 }























   1 /*
   2  * Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2013, 2022, Red Hat, Inc. All rights reserved.
   4  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
   5  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   6  *
   7  * This code is free software; you can redistribute it and/or modify it
   8  * under the terms of the GNU General Public License version 2 only, as
   9  * published by the Free Software Foundation.
  10  *
  11  * This code is distributed in the hope that it will be useful, but WITHOUT
  12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  14  * version 2 for more details (a copy is included in the LICENSE file that
  15  * accompanied this code).
  16  *
  17  * You should have received a copy of the GNU General Public License version
  18  * 2 along with this work; if not, write to the Free Software Foundation,
  19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20  *
  21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22  * or visit www.oracle.com if you need additional information or have any
  23  * questions.
  24  *
  25  */
  26 
  27 #include "precompiled.hpp"
  28 #include "memory/allocation.hpp"
  29 #include "memory/universe.hpp"
  30 
  31 #include "gc/shared/classUnloadingContext.hpp"
  32 #include "gc/shared/gcArguments.hpp"
  33 #include "gc/shared/gcTimer.hpp"
  34 #include "gc/shared/gcTraceTime.inline.hpp"
  35 #include "gc/shared/locationPrinter.inline.hpp"
  36 #include "gc/shared/memAllocator.hpp"
  37 #include "gc/shared/plab.hpp"
  38 #include "gc/shared/tlab_globals.hpp"
  39 
  40 #include "gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp"
  41 #include "gc/shenandoah/heuristics/shenandoahYoungHeuristics.hpp"
  42 #include "gc/shenandoah/shenandoahAllocRequest.hpp"
  43 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  44 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
  45 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  46 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  47 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
  48 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  49 #include "gc/shenandoah/shenandoahControlThread.hpp"
  50 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  51 #include "gc/shenandoah/shenandoahGenerationalEvacuationTask.hpp"
  52 #include "gc/shenandoah/shenandoahGenerationalHeap.hpp"
  53 #include "gc/shenandoah/shenandoahGlobalGeneration.hpp"
  54 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  55 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  56 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
  57 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  58 #include "gc/shenandoah/shenandoahInitLogger.hpp"
  59 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  60 #include "gc/shenandoah/shenandoahMemoryPool.hpp"

  61 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  62 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
  63 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  64 #include "gc/shenandoah/shenandoahPacer.inline.hpp"
  65 #include "gc/shenandoah/shenandoahPadding.hpp"
  66 #include "gc/shenandoah/shenandoahParallelCleaning.inline.hpp"
  67 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
  68 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
  69 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
  70 #include "gc/shenandoah/shenandoahSTWMark.hpp"
  71 #include "gc/shenandoah/shenandoahUtils.hpp"
  72 #include "gc/shenandoah/shenandoahVerifier.hpp"
  73 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
  74 #include "gc/shenandoah/shenandoahVMOperations.hpp"
  75 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
  76 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  77 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
  78 #include "gc/shenandoah/mode/shenandoahGenerationalMode.hpp"
  79 #include "gc/shenandoah/mode/shenandoahIUMode.hpp"
  80 #include "gc/shenandoah/mode/shenandoahPassiveMode.hpp"
  81 #include "gc/shenandoah/mode/shenandoahSATBMode.hpp"
  82 #include "utilities/globalDefinitions.hpp"
  83 
  84 #if INCLUDE_JFR
  85 #include "gc/shenandoah/shenandoahJfrSupport.hpp"
  86 #endif
  87 
  88 #include "classfile/systemDictionary.hpp"
  89 #include "code/codeCache.hpp"
  90 #include "memory/classLoaderMetaspace.hpp"
  91 #include "memory/metaspaceUtils.hpp"
  92 #include "oops/compressedOops.inline.hpp"
  93 #include "prims/jvmtiTagMap.hpp"
  94 #include "runtime/atomic.hpp"
  95 #include "runtime/globals.hpp"
  96 #include "runtime/interfaceSupport.inline.hpp"
  97 #include "runtime/java.hpp"
  98 #include "runtime/orderAccess.hpp"
  99 #include "runtime/safepointMechanism.hpp"
 100 #include "runtime/vmThread.hpp"
 101 #include "services/mallocTracker.hpp"
 102 #include "services/memTracker.hpp"
 103 #include "utilities/events.hpp"

 155 jint ShenandoahHeap::initialize() {
 156   //
 157   // Figure out heap sizing
 158   //
 159 
 160   size_t init_byte_size = InitialHeapSize;
 161   size_t min_byte_size  = MinHeapSize;
 162   size_t max_byte_size  = MaxHeapSize;
 163   size_t heap_alignment = HeapAlignment;
 164 
 165   size_t reg_size_bytes = ShenandoahHeapRegion::region_size_bytes();
 166 
 167   Universe::check_alignment(max_byte_size,  reg_size_bytes, "Shenandoah heap");
 168   Universe::check_alignment(init_byte_size, reg_size_bytes, "Shenandoah heap");
 169 
 170   _num_regions = ShenandoahHeapRegion::region_count();
 171   assert(_num_regions == (max_byte_size / reg_size_bytes),
 172          "Regions should cover entire heap exactly: " SIZE_FORMAT " != " SIZE_FORMAT "/" SIZE_FORMAT,
 173          _num_regions, max_byte_size, reg_size_bytes);
 174 



 175   size_t num_committed_regions = init_byte_size / reg_size_bytes;
 176   num_committed_regions = MIN2(num_committed_regions, _num_regions);
 177   assert(num_committed_regions <= _num_regions, "sanity");
 178   _initial_size = num_committed_regions * reg_size_bytes;
 179 
 180   size_t num_min_regions = min_byte_size / reg_size_bytes;
 181   num_min_regions = MIN2(num_min_regions, _num_regions);
 182   assert(num_min_regions <= _num_regions, "sanity");
 183   _minimum_size = num_min_regions * reg_size_bytes;
 184 
 185   // Default to max heap size.
 186   _soft_max_size = _num_regions * reg_size_bytes;
 187 
 188   _committed = _initial_size;
 189 
 190   size_t heap_page_size   = UseLargePages ? os::large_page_size() : os::vm_page_size();
 191   size_t bitmap_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
 192   size_t region_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
 193 
 194   //
 195   // Reserve and commit memory for heap
 196   //
 197 
 198   ReservedHeapSpace heap_rs = Universe::reserve_heap(max_byte_size, heap_alignment);
 199   initialize_reserved_region(heap_rs);
 200   _heap_region = MemRegion((HeapWord*)heap_rs.base(), heap_rs.size() / HeapWordSize);
 201   _heap_region_special = heap_rs.special();
 202 
 203   assert((((size_t) base()) & ShenandoahHeapRegion::region_size_bytes_mask()) == 0,
 204          "Misaligned heap: " PTR_FORMAT, p2i(base()));
 205   os::trace_page_sizes_for_requested_size("Heap",
 206                                           max_byte_size, heap_rs.page_size(), heap_alignment,
 207                                           heap_rs.base(), heap_rs.size());
 208 
 209 #if SHENANDOAH_OPTIMIZED_MARKTASK
 210   // The optimized ShenandoahMarkTask takes some bits away from the full object bits.
 211   // Fail if we ever attempt to address more than we can.
 212   if ((uintptr_t)heap_rs.end() >= ShenandoahMarkTask::max_addressable()) {
 213     FormatBuffer<512> buf("Shenandoah reserved [" PTR_FORMAT ", " PTR_FORMAT") for the heap, \n"
 214                           "but max object address is " PTR_FORMAT ". Try to reduce heap size, or try other \n"
 215                           "VM options that allocate heap at lower addresses (HeapBaseMinAddress, AllocateHeapAt, etc).",
 216                 p2i(heap_rs.base()), p2i(heap_rs.end()), ShenandoahMarkTask::max_addressable());
 217     vm_exit_during_initialization("Fatal Error", buf);
 218   }
 219 #endif
 220 
 221   ReservedSpace sh_rs = heap_rs.first_part(max_byte_size);
 222   if (!_heap_region_special) {
 223     os::commit_memory_or_exit(sh_rs.base(), _initial_size, heap_alignment, false,
 224                               "Cannot commit heap memory");
 225   }
 226 
 227   BarrierSet::set_barrier_set(new ShenandoahBarrierSet(this, _heap_region));
 228 
 229   // Now we know the number of regions and heap sizes, initialize the heuristics.
 230   initialize_heuristics();
 231 
 232   assert(_heap_region.byte_size() == heap_rs.size(), "Need to know reserved size for card table");
 233 
 234   //
 235   // Worker threads must be initialized after the barrier is configured
 236   //
 237   _workers = new ShenandoahWorkerThreads("Shenandoah GC Threads", _max_workers);
 238   if (_workers == nullptr) {
 239     vm_exit_during_initialization("Failed necessary allocation.");
 240   } else {
 241     _workers->initialize_workers();
 242   }
 243 
 244   if (ParallelGCThreads > 1) {
 245     _safepoint_workers = new ShenandoahWorkerThreads("Safepoint Cleanup Thread", ParallelGCThreads);
 246     _safepoint_workers->initialize_workers();
 247   }
 248 
 249   //
 250   // Reserve and commit memory for bitmap(s)
 251   //
 252 
 253   size_t bitmap_size_orig = ShenandoahMarkBitMap::compute_size(heap_rs.size());
 254   _bitmap_size = align_up(bitmap_size_orig, bitmap_page_size);
 255 
 256   size_t bitmap_bytes_per_region = reg_size_bytes / ShenandoahMarkBitMap::heap_map_factor();
 257 
 258   guarantee(bitmap_bytes_per_region != 0,
 259             "Bitmap bytes per region should not be zero");
 260   guarantee(is_power_of_2(bitmap_bytes_per_region),
 261             "Bitmap bytes per region should be power of two: " SIZE_FORMAT, bitmap_bytes_per_region);
 262 
 263   if (bitmap_page_size > bitmap_bytes_per_region) {
 264     _bitmap_regions_per_slice = bitmap_page_size / bitmap_bytes_per_region;
 265     _bitmap_bytes_per_slice = bitmap_page_size;
 266   } else {
 267     _bitmap_regions_per_slice = 1;
 268     _bitmap_bytes_per_slice = bitmap_bytes_per_region;
 269   }
 270 
 271   guarantee(_bitmap_regions_per_slice >= 1,
 272             "Should have at least one region per slice: " SIZE_FORMAT,
 273             _bitmap_regions_per_slice);
 274 
 275   guarantee(((_bitmap_bytes_per_slice) % bitmap_page_size) == 0,
 276             "Bitmap slices should be page-granular: bps = " SIZE_FORMAT ", page size = " SIZE_FORMAT,
 277             _bitmap_bytes_per_slice, bitmap_page_size);
 278 
 279   ReservedSpace bitmap(_bitmap_size, bitmap_page_size);
 280   os::trace_page_sizes_for_requested_size("Mark Bitmap",
 281                                           bitmap_size_orig, bitmap.page_size(), bitmap_page_size,
 282                                           bitmap.base(),
 283                                           bitmap.size());
 284   MemTracker::record_virtual_memory_type(bitmap.base(), mtGC);
 285   _bitmap_region = MemRegion((HeapWord*) bitmap.base(), bitmap.size() / HeapWordSize);
 286   _bitmap_region_special = bitmap.special();
 287 
 288   size_t bitmap_init_commit = _bitmap_bytes_per_slice *
 289     align_up(num_committed_regions, _bitmap_regions_per_slice) / _bitmap_regions_per_slice;
 290   bitmap_init_commit = MIN2(_bitmap_size, bitmap_init_commit);
 291   if (!_bitmap_region_special) {
 292     os::commit_memory_or_exit((char *) _bitmap_region.start(), bitmap_init_commit, bitmap_page_size, false,
 293                               "Cannot commit bitmap memory");
 294   }
 295 
 296   _marking_context = new ShenandoahMarkingContext(_heap_region, _bitmap_region, _num_regions);
 297 
 298   if (ShenandoahVerify) {
 299     ReservedSpace verify_bitmap(_bitmap_size, bitmap_page_size);
 300     os::trace_page_sizes_for_requested_size("Verify Bitmap",
 301                                             bitmap_size_orig, verify_bitmap.page_size(), bitmap_page_size,
 302                                             verify_bitmap.base(),
 303                                             verify_bitmap.size());
 304     if (!verify_bitmap.special()) {
 305       os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), bitmap_page_size, false,
 306                                 "Cannot commit verification bitmap memory");
 307     }
 308     MemTracker::record_virtual_memory_type(verify_bitmap.base(), mtGC);
 309     MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize);
 310     _verification_bit_map.initialize(_heap_region, verify_bitmap_region);
 311     _verifier = new ShenandoahVerifier(this, &_verification_bit_map);
 312   }
 313 
 314   // Reserve aux bitmap for use in object_iterate(). We don't commit it here.
 315   size_t aux_bitmap_page_size = bitmap_page_size;
 316 #ifdef LINUX
 317   // In THP "advise" mode, we refrain from advising the system to use large pages
 318   // since we know these commits will be short lived, and there is no reason to trash
 319   // the THP area with this bitmap.
 320   if (UseTransparentHugePages) {
 321     aux_bitmap_page_size = os::vm_page_size();
 322   }
 323 #endif
 324   ReservedSpace aux_bitmap(_bitmap_size, aux_bitmap_page_size);
 325   os::trace_page_sizes_for_requested_size("Aux Bitmap",
 326                                           bitmap_size_orig, aux_bitmap.page_size(), aux_bitmap_page_size,
 327                                           aux_bitmap.base(), aux_bitmap.size());
 328   MemTracker::record_virtual_memory_type(aux_bitmap.base(), mtGC);
 329   _aux_bitmap_region = MemRegion((HeapWord*) aux_bitmap.base(), aux_bitmap.size() / HeapWordSize);
 330   _aux_bitmap_region_special = aux_bitmap.special();
 331   _aux_bit_map.initialize(_heap_region, _aux_bitmap_region);
 332 
 333   //
 334   // Create regions and region sets
 335   //
 336   size_t region_align = align_up(sizeof(ShenandoahHeapRegion), SHENANDOAH_CACHE_LINE_SIZE);
 337   size_t region_storage_size_orig = region_align * _num_regions;
 338   size_t region_storage_size = align_up(region_storage_size_orig,
 339                                         MAX2(region_page_size, os::vm_allocation_granularity()));
 340 
 341   ReservedSpace region_storage(region_storage_size, region_page_size);
 342   os::trace_page_sizes_for_requested_size("Region Storage",
 343                                           region_storage_size_orig, region_storage.page_size(), region_page_size,
 344                                           region_storage.base(), region_storage.size());
 345   MemTracker::record_virtual_memory_type(region_storage.base(), mtGC);
 346   if (!region_storage.special()) {
 347     os::commit_memory_or_exit(region_storage.base(), region_storage_size, region_page_size, false,
 348                               "Cannot commit region memory");
 349   }
 350 
 351   // Try to fit the collection set bitmap at lower addresses. This optimizes code generation for cset checks.
 352   // Go up until a sensible limit (subject to encoding constraints) and try to reserve the space there.
 353   // If not successful, bite a bullet and allocate at whatever address.
 354   {
 355     const size_t cset_align = MAX2<size_t>(os::vm_page_size(), os::vm_allocation_granularity());
 356     const size_t cset_size = align_up(((size_t) sh_rs.base() + sh_rs.size()) >> ShenandoahHeapRegion::region_size_bytes_shift(), cset_align);
 357     const size_t cset_page_size = os::vm_page_size();
 358 
 359     uintptr_t min = round_up_power_of_2(cset_align);
 360     uintptr_t max = (1u << 30u);
 361     ReservedSpace cset_rs;
 362 
 363     for (uintptr_t addr = min; addr <= max; addr <<= 1u) {
 364       char* req_addr = (char*)addr;
 365       assert(is_aligned(req_addr, cset_align), "Should be aligned");
 366       cset_rs = ReservedSpace(cset_size, cset_align, cset_page_size, req_addr);
 367       if (cset_rs.is_reserved()) {
 368         assert(cset_rs.base() == req_addr, "Allocated where requested: " PTR_FORMAT ", " PTR_FORMAT, p2i(cset_rs.base()), addr);
 369         _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
 370         break;
 371       }
 372     }
 373 
 374     if (_collection_set == nullptr) {
 375       cset_rs = ReservedSpace(cset_size, cset_align, os::vm_page_size());
 376       _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
 377     }
 378     os::trace_page_sizes_for_requested_size("Collection Set",
 379                                             cset_size, cset_rs.page_size(), cset_page_size,
 380                                             cset_rs.base(),
 381                                             cset_rs.size());
 382   }
 383 
 384   _regions = NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, _num_regions, mtGC);
 385   _affiliations = NEW_C_HEAP_ARRAY(uint8_t, _num_regions, mtGC);
 386   _free_set = new ShenandoahFreeSet(this, _num_regions);
 387 
 388   {
 389     ShenandoahHeapLocker locker(lock());
 390 
 391 
 392     for (size_t i = 0; i < _num_regions; i++) {
 393       HeapWord* start = (HeapWord*)sh_rs.base() + ShenandoahHeapRegion::region_size_words() * i;
 394       bool is_committed = i < num_committed_regions;
 395       void* loc = region_storage.base() + i * region_align;
 396 
 397       ShenandoahHeapRegion* r = new (loc) ShenandoahHeapRegion(start, i, is_committed);
 398       assert(is_aligned(r, SHENANDOAH_CACHE_LINE_SIZE), "Sanity");
 399 
 400       _marking_context->initialize_top_at_mark_start(r);
 401       _regions[i] = r;
 402       assert(!collection_set()->is_in(i), "New region should not be in collection set");
 403 
 404       _affiliations[i] = ShenandoahAffiliation::FREE;
 405     }
 406 
 407     // Initialize to complete
 408     _marking_context->mark_complete();
 409     size_t young_cset_regions, old_cset_regions;
 410 
 411     // We are initializing free set.  We ignore cset region tallies.
 412     size_t first_old, last_old, num_old;
 413     _free_set->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old, last_old, num_old);
 414     _free_set->rebuild(young_cset_regions, old_cset_regions);
 415   }
 416 
 417   if (AlwaysPreTouch) {
 418     // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads,
 419     // before initialize() below zeroes it with initializing thread. For any given region,
 420     // we touch the region and the corresponding bitmaps from the same thread.
 421     ShenandoahPushWorkerScope scope(workers(), _max_workers, false);
 422 
 423     _pretouch_heap_page_size = heap_page_size;
 424     _pretouch_bitmap_page_size = bitmap_page_size;
 425 
 426 #ifdef LINUX
 427     // UseTransparentHugePages would madvise that backing memory can be coalesced into huge
 428     // pages. But, the kernel needs to know that every small page is used, in order to coalesce
 429     // them into huge one. Therefore, we need to pretouch with smaller pages.
 430     if (UseTransparentHugePages) {
 431       _pretouch_heap_page_size = (size_t)os::vm_page_size();
 432       _pretouch_bitmap_page_size = (size_t)os::vm_page_size();
 433     }
 434 #endif

 451   for (uint worker = 0; worker < _max_workers; worker++) {
 452     _liveness_cache[worker] = NEW_C_HEAP_ARRAY(ShenandoahLiveData, _num_regions, mtGC);
 453     Copy::fill_to_bytes(_liveness_cache[worker], _num_regions * sizeof(ShenandoahLiveData));
 454   }
 455 
 456   // There should probably be Shenandoah-specific options for these,
 457   // just as there are G1-specific options.
 458   {
 459     ShenandoahSATBMarkQueueSet& satbqs = ShenandoahBarrierSet::satb_mark_queue_set();
 460     satbqs.set_process_completed_buffers_threshold(20); // G1SATBProcessCompletedThreshold
 461     satbqs.set_buffer_enqueue_threshold_percentage(60); // G1SATBBufferEnqueueingThresholdPercent
 462   }
 463 
 464   _monitoring_support = new ShenandoahMonitoringSupport(this);
 465   _phase_timings = new ShenandoahPhaseTimings(max_workers());
 466   ShenandoahCodeRoots::initialize();
 467 
 468   if (ShenandoahPacing) {
 469     _pacer = new ShenandoahPacer(this);
 470     _pacer->setup_for_idle();


 471   }
 472 
 473   initialize_controller();
 474 
 475   print_init_logger();
 476 
 477   return JNI_OK;
 478 }
 479 
 480 void ShenandoahHeap::initialize_controller() {
 481   _control_thread = new ShenandoahControlThread();
 482 }
 483 
 484 void ShenandoahHeap::print_init_logger() const {
 485   ShenandoahInitLogger::print();
 486 }
 487 
 488 void ShenandoahHeap::initialize_mode() {
 489   if (ShenandoahGCMode != nullptr) {
 490     if (strcmp(ShenandoahGCMode, "satb") == 0) {
 491       _gc_mode = new ShenandoahSATBMode();
 492     } else if (strcmp(ShenandoahGCMode, "iu") == 0) {
 493       _gc_mode = new ShenandoahIUMode();
 494     } else if (strcmp(ShenandoahGCMode, "passive") == 0) {
 495       _gc_mode = new ShenandoahPassiveMode();
 496     } else if (strcmp(ShenandoahGCMode, "generational") == 0) {
 497       _gc_mode = new ShenandoahGenerationalMode();
 498     } else {
 499       vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option");
 500     }
 501   } else {
 502     vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option (null)");
 503   }
 504   _gc_mode->initialize_flags();
 505   if (_gc_mode->is_diagnostic() && !UnlockDiagnosticVMOptions) {
 506     vm_exit_during_initialization(
 507             err_msg("GC mode \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
 508                     _gc_mode->name()));
 509   }
 510   if (_gc_mode->is_experimental() && !UnlockExperimentalVMOptions) {
 511     vm_exit_during_initialization(
 512             err_msg("GC mode \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
 513                     _gc_mode->name()));
 514   }
 515 }
 516 
 517 void ShenandoahHeap::initialize_heuristics() {
 518   _global_generation = new ShenandoahGlobalGeneration(mode()->is_generational(), max_workers(), max_capacity(), max_capacity());
 519   _global_generation->initialize_heuristics(mode());
 520   _evac_tracker = new ShenandoahEvacuationTracker(mode()->is_generational());










 521 }
 522 
 523 #ifdef _MSC_VER
 524 #pragma warning( push )
 525 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
 526 #endif
 527 
 528 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
 529   CollectedHeap(),
 530   _gc_generation(nullptr),
 531   _active_generation(nullptr),
 532   _initial_size(0),

 533   _committed(0),
 534   _max_workers(MAX3(ConcGCThreads, ParallelGCThreads, 1U)),

 535   _workers(nullptr),
 536   _safepoint_workers(nullptr),
 537   _heap_region_special(false),
 538   _num_regions(0),
 539   _regions(nullptr),
 540   _affiliations(nullptr),
 541   _gc_state_changed(false),
 542   _gc_no_progress_count(0),
 543   _cancel_requested_time(0),
 544   _update_refs_iterator(this),
 545   _global_generation(nullptr),
 546   _control_thread(nullptr),
 547   _young_generation(nullptr),
 548   _old_generation(nullptr),
 549   _shenandoah_policy(policy),
 550   _gc_mode(nullptr),

 551   _free_set(nullptr),
 552   _pacer(nullptr),
 553   _verifier(nullptr),
 554   _phase_timings(nullptr),
 555   _evac_tracker(nullptr),
 556   _mmu_tracker(),
 557   _monitoring_support(nullptr),
 558   _memory_pool(nullptr),
 559   _stw_memory_manager("Shenandoah Pauses"),
 560   _cycle_memory_manager("Shenandoah Cycles"),
 561   _gc_timer(new ConcurrentGCTimer()),
 562   _soft_ref_policy(),
 563   _log_min_obj_alignment_in_bytes(LogMinObjAlignmentInBytes),

 564   _marking_context(nullptr),
 565   _bitmap_size(0),
 566   _bitmap_regions_per_slice(0),
 567   _bitmap_bytes_per_slice(0),
 568   _bitmap_region_special(false),
 569   _aux_bitmap_region_special(false),
 570   _liveness_cache(nullptr),
 571   _collection_set(nullptr)
 572 {
 573   // Initialize GC mode early, many subsequent initialization procedures depend on it
 574   initialize_mode();















 575 }
 576 
 577 #ifdef _MSC_VER
 578 #pragma warning( pop )
 579 #endif
 580 





























 581 void ShenandoahHeap::print_on(outputStream* st) const {
 582   st->print_cr("Shenandoah Heap");
 583   st->print_cr(" " SIZE_FORMAT "%s max, " SIZE_FORMAT "%s soft max, " SIZE_FORMAT "%s committed, " SIZE_FORMAT "%s used",
 584                byte_size_in_proper_unit(max_capacity()), proper_unit_for_byte_size(max_capacity()),
 585                byte_size_in_proper_unit(soft_max_capacity()), proper_unit_for_byte_size(soft_max_capacity()),
 586                byte_size_in_proper_unit(committed()),    proper_unit_for_byte_size(committed()),
 587                byte_size_in_proper_unit(used()),         proper_unit_for_byte_size(used()));
 588   st->print_cr(" " SIZE_FORMAT " x " SIZE_FORMAT"%s regions",
 589                num_regions(),
 590                byte_size_in_proper_unit(ShenandoahHeapRegion::region_size_bytes()),
 591                proper_unit_for_byte_size(ShenandoahHeapRegion::region_size_bytes()));
 592 
 593   st->print("Status: ");
 594   if (has_forwarded_objects())                 st->print("has forwarded objects, ");
 595   if (is_concurrent_old_mark_in_progress())    st->print("old marking, ");
 596   if (is_concurrent_young_mark_in_progress())  st->print("young marking, ");
 597   if (is_evacuation_in_progress())             st->print("evacuating, ");
 598   if (is_update_refs_in_progress())            st->print("updating refs, ");
 599   if (is_degenerated_gc_in_progress())         st->print("degenerated gc, ");
 600   if (is_full_gc_in_progress())                st->print("full gc, ");
 601   if (is_full_gc_move_in_progress())           st->print("full gc move, ");
 602   if (is_concurrent_weak_root_in_progress())   st->print("concurrent weak roots, ");
 603   if (is_concurrent_strong_root_in_progress() &&
 604       !is_concurrent_weak_root_in_progress())  st->print("concurrent strong roots, ");
 605 
 606   if (cancelled_gc()) {
 607     st->print("cancelled");
 608   } else {
 609     st->print("not cancelled");
 610   }
 611   st->cr();
 612 
 613   st->print_cr("Reserved region:");
 614   st->print_cr(" - [" PTR_FORMAT ", " PTR_FORMAT ") ",
 615                p2i(reserved_region().start()),
 616                p2i(reserved_region().end()));

 627   st->cr();
 628   MetaspaceUtils::print_on(st);
 629 
 630   if (Verbose) {
 631     st->cr();
 632     print_heap_regions_on(st);
 633   }
 634 }
 635 
 636 class ShenandoahInitWorkerGCLABClosure : public ThreadClosure {
 637 public:
 638   void do_thread(Thread* thread) {
 639     assert(thread != nullptr, "Sanity");
 640     assert(thread->is_Worker_thread(), "Only worker thread expected");
 641     ShenandoahThreadLocalData::initialize_gclab(thread);
 642   }
 643 };
 644 
 645 void ShenandoahHeap::post_initialize() {
 646   CollectedHeap::post_initialize();
 647   _mmu_tracker.initialize();
 648 
 649   MutexLocker ml(Threads_lock);
 650 
 651   ShenandoahInitWorkerGCLABClosure init_gclabs;
 652   _workers->threads_do(&init_gclabs);
 653 
 654   // gclab can not be initialized early during VM startup, as it can not determinate its max_size.
 655   // Now, we will let WorkerThreads to initialize gclab when new worker is created.
 656   _workers->set_initialize_gclab();
 657   if (_safepoint_workers != nullptr) {
 658     _safepoint_workers->threads_do(&init_gclabs);
 659     _safepoint_workers->set_initialize_gclab();
 660   }
 661 


 662   JFR_ONLY(ShenandoahJFRSupport::register_jfr_type_serializers());
 663 }
 664 
 665 ShenandoahHeuristics* ShenandoahHeap::heuristics() {
 666   return _global_generation->heuristics();
 667 }
 668 
 669 size_t ShenandoahHeap::used() const {
 670   return global_generation()->used();
 671 }
 672 
 673 size_t ShenandoahHeap::committed() const {
 674   return Atomic::load(&_committed);
 675 }
 676 
 677 void ShenandoahHeap::increase_committed(size_t bytes) {
 678   shenandoah_assert_heaplocked_or_safepoint();
 679   _committed += bytes;
 680 }
 681 
 682 void ShenandoahHeap::decrease_committed(size_t bytes) {
 683   shenandoah_assert_heaplocked_or_safepoint();
 684   _committed -= bytes;
 685 }
 686 
 687 // For tracking usage based on allocations, it should be the case that:
 688 // * The sum of regions::used == heap::used
 689 // * The sum of a generation's regions::used == generation::used
 690 // * The sum of a generation's humongous regions::free == generation::humongous_waste
 691 // These invariants are checked by the verifier on GC safepoints.
 692 //
 693 // Additional notes:
 694 // * When a mutator's allocation request causes a region to be retired, the
 695 //   free memory left in that region is considered waste. It does not contribute
 696 //   to the usage, but it _does_ contribute to allocation rate.
 697 // * The bottom of a PLAB must be aligned on card size. In some cases this will
 698 //   require padding in front of the PLAB (a filler object). Because this padding
 699 //   is included in the region's used memory we include the padding in the usage
 700 //   accounting as waste.
 701 // * Mutator allocations are used to compute an allocation rate. They are also
 702 //   sent to the Pacer for those purposes.
 703 // * There are three sources of waste:
 704 //  1. The padding used to align a PLAB on card size
 705 //  2. Region's free is less than minimum TLAB size and is retired
 706 //  3. The unused portion of memory in the last region of a humongous object
 707 void ShenandoahHeap::increase_used(const ShenandoahAllocRequest& req) {
 708   size_t actual_bytes = req.actual_size() * HeapWordSize;
 709   size_t wasted_bytes = req.waste() * HeapWordSize;
 710   ShenandoahGeneration* generation = generation_for(req.affiliation());
 711 
 712   if (req.is_gc_alloc()) {
 713     assert(wasted_bytes == 0 || req.type() == ShenandoahAllocRequest::_alloc_plab, "Only PLABs have waste");
 714     increase_used(generation, actual_bytes + wasted_bytes);
 715   } else {
 716     assert(req.is_mutator_alloc(), "Expected mutator alloc here");
 717     // padding and actual size both count towards allocation counter
 718     generation->increase_allocated(actual_bytes + wasted_bytes);
 719 
 720     // only actual size counts toward usage for mutator allocations
 721     increase_used(generation, actual_bytes);
 722 
 723     // notify pacer of both actual size and waste
 724     notify_mutator_alloc_words(req.actual_size(), req.waste());
 725 
 726     if (wasted_bytes > 0 && req.actual_size() > ShenandoahHeapRegion::humongous_threshold_words()) {
 727       increase_humongous_waste(generation,wasted_bytes);
 728     }
 729   }
 730 }
 731 
 732 void ShenandoahHeap::increase_humongous_waste(ShenandoahGeneration* generation, size_t bytes) {
 733   generation->increase_humongous_waste(bytes);
 734   if (!generation->is_global()) {
 735     global_generation()->increase_humongous_waste(bytes);
 736   }
 737 }
 738 
 739 void ShenandoahHeap::decrease_humongous_waste(ShenandoahGeneration* generation, size_t bytes) {
 740   generation->decrease_humongous_waste(bytes);
 741   if (!generation->is_global()) {
 742     global_generation()->decrease_humongous_waste(bytes);
 743   }
 744 }
 745 
 746 void ShenandoahHeap::increase_used(ShenandoahGeneration* generation, size_t bytes) {
 747   generation->increase_used(bytes);
 748   if (!generation->is_global()) {
 749     global_generation()->increase_used(bytes);
 750   }
 751 }
 752 
 753 void ShenandoahHeap::decrease_used(ShenandoahGeneration* generation, size_t bytes) {
 754   generation->decrease_used(bytes);
 755   if (!generation->is_global()) {
 756     global_generation()->decrease_used(bytes);
 757   }
 758 }
 759 
 760 void ShenandoahHeap::notify_mutator_alloc_words(size_t words, size_t waste) {
 761   if (ShenandoahPacing) {
 762     control_thread()->pacing_notify_alloc(words);
 763     if (waste > 0) {
 764       pacer()->claim_for_alloc(waste, true);
 765     }
 766   }
 767 }
 768 
 769 size_t ShenandoahHeap::capacity() const {
 770   return committed();
 771 }
 772 
 773 size_t ShenandoahHeap::max_capacity() const {
 774   return _num_regions * ShenandoahHeapRegion::region_size_bytes();
 775 }
 776 
 777 size_t ShenandoahHeap::soft_max_capacity() const {
 778   size_t v = Atomic::load(&_soft_max_size);
 779   assert(min_capacity() <= v && v <= max_capacity(),
 780          "Should be in bounds: " SIZE_FORMAT " <= " SIZE_FORMAT " <= " SIZE_FORMAT,
 781          min_capacity(), v, max_capacity());
 782   return v;
 783 }
 784 
 785 void ShenandoahHeap::set_soft_max_capacity(size_t v) {
 786   assert(min_capacity() <= v && v <= max_capacity(),
 787          "Should be in bounds: " SIZE_FORMAT " <= " SIZE_FORMAT " <= " SIZE_FORMAT,
 788          min_capacity(), v, max_capacity());
 789   Atomic::store(&_soft_max_size, v);
 790 }
 791 
 792 size_t ShenandoahHeap::min_capacity() const {
 793   return _minimum_size;
 794 }
 795 
 796 size_t ShenandoahHeap::initial_capacity() const {
 797   return _initial_size;
 798 }
 799 
 800 void ShenandoahHeap::maybe_uncommit(double shrink_before, size_t shrink_until) {
 801   assert (ShenandoahUncommit, "should be enabled");
 802 
 803   // Determine if there is work to do. This avoids taking heap lock if there is
 804   // no work available, avoids spamming logs with superfluous logging messages,
 805   // and minimises the amount of work while locks are taken.
 806 
 807   if (committed() <= shrink_until) return;
 808 
 809   bool has_work = false;
 810   for (size_t i = 0; i < num_regions(); i++) {
 811     ShenandoahHeapRegion* r = get_region(i);
 812     if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
 813       has_work = true;
 814       break;
 815     }
 816   }
 817 
 818   if (has_work) {
 819     static const char* msg = "Concurrent uncommit";
 820     ShenandoahConcurrentPhase gcPhase(msg, ShenandoahPhaseTimings::conc_uncommit, true /* log_heap_usage */);
 821     EventMark em("%s", msg);
 822 
 823     op_uncommit(shrink_before, shrink_until);
 824   }
 825 }
 826 
 827 void ShenandoahHeap::op_uncommit(double shrink_before, size_t shrink_until) {
 828   assert (ShenandoahUncommit, "should be enabled");
 829 
 830   // Application allocates from the beginning of the heap, and GC allocates at
 831   // the end of it. It is more efficient to uncommit from the end, so that applications
 832   // could enjoy the near committed regions. GC allocations are much less frequent,
 833   // and therefore can accept the committing costs.
 834 
 835   size_t count = 0;
 836   for (size_t i = num_regions(); i > 0; i--) { // care about size_t underflow
 837     ShenandoahHeapRegion* r = get_region(i - 1);
 838     if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
 839       ShenandoahHeapLocker locker(lock());
 840       if (r->is_empty_committed()) {
 841         if (committed() < shrink_until + ShenandoahHeapRegion::region_size_bytes()) {
 842           break;
 843         }
 844 
 845         r->make_uncommitted();
 846         count++;
 847       }
 848     }
 849     SpinPause(); // allow allocators to take the lock
 850   }
 851 
 852   if (count > 0) {
 853     notify_heap_changed();
 854   }
 855 }
 856 
 857 bool ShenandoahHeap::check_soft_max_changed() {
 858   size_t new_soft_max = Atomic::load(&SoftMaxHeapSize);
 859   size_t old_soft_max = soft_max_capacity();
 860   if (new_soft_max != old_soft_max) {
 861     new_soft_max = MAX2(min_capacity(), new_soft_max);
 862     new_soft_max = MIN2(max_capacity(), new_soft_max);
 863     if (new_soft_max != old_soft_max) {
 864       log_info(gc)("Soft Max Heap Size: " SIZE_FORMAT "%s -> " SIZE_FORMAT "%s",
 865                    byte_size_in_proper_unit(old_soft_max), proper_unit_for_byte_size(old_soft_max),
 866                    byte_size_in_proper_unit(new_soft_max), proper_unit_for_byte_size(new_soft_max)
 867       );
 868       set_soft_max_capacity(new_soft_max);
 869       return true;
 870     }
 871   }
 872   return false;
 873 }
 874 
 875 void ShenandoahHeap::notify_heap_changed() {
 876   // Update monitoring counters when we took a new region. This amortizes the
 877   // update costs on slow path.
 878   monitoring_support()->notify_heap_changed();
 879   _heap_changed.set();
 880 }
 881 
 882 void ShenandoahHeap::set_forced_counters_update(bool value) {
 883   monitoring_support()->set_forced_counters_update(value);
 884 }
 885 
 886 void ShenandoahHeap::handle_force_counters_update() {
 887   monitoring_support()->handle_force_counters_update();
 888 }
 889 
 890 HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) {
 891   // New object should fit the GCLAB size
 892   size_t min_size = MAX2(size, PLAB::min_size());
 893 
 894   // Figure out size of new GCLAB, looking back at heuristics. Expand aggressively.
 895   size_t new_size = ShenandoahThreadLocalData::gclab_size(thread) * 2;
 896 
 897   new_size = MIN2(new_size, PLAB::max_size());
 898   new_size = MAX2(new_size, PLAB::min_size());
 899 
 900   // Record new heuristic value even if we take any shortcut. This captures
 901   // the case when moderately-sized objects always take a shortcut. At some point,
 902   // heuristics should catch up with them.
 903   log_debug(gc, free)("Set new GCLAB size: " SIZE_FORMAT, new_size);
 904   ShenandoahThreadLocalData::set_gclab_size(thread, new_size);
 905 
 906   if (new_size < size) {
 907     // New size still does not fit the object. Fall back to shared allocation.
 908     // This avoids retiring perfectly good GCLABs, when we encounter a large object.
 909     log_debug(gc, free)("New gclab size (" SIZE_FORMAT ") is too small for " SIZE_FORMAT, new_size, size);
 910     return nullptr;
 911   }
 912 
 913   // Retire current GCLAB, and allocate a new one.
 914   PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
 915   gclab->retire();
 916 
 917   size_t actual_size = 0;
 918   HeapWord* gclab_buf = allocate_new_gclab(min_size, new_size, &actual_size);
 919   if (gclab_buf == nullptr) {
 920     return nullptr;
 921   }
 922 
 923   assert (size <= actual_size, "allocation should fit");
 924 
 925   if (ZeroTLAB) {
 926     // ..and clear it.
 927     Copy::zero_to_words(gclab_buf, actual_size);
 928   } else {
 929     // ...and zap just allocated object.
 930 #ifdef ASSERT
 931     // Skip mangling the space corresponding to the object header to
 932     // ensure that the returned space is not considered parsable by
 933     // any concurrent GC thread.
 934     size_t hdr_size = oopDesc::header_size();
 935     Copy::fill_to_words(gclab_buf + hdr_size, actual_size - hdr_size, badHeapWordVal);
 936 #endif // ASSERT
 937   }
 938   gclab->set_buf(gclab_buf, actual_size);
 939   return gclab->allocate(size);
 940 }
 941 
 942 // Called from stubs in JIT code or interpreter
 943 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t min_size,
 944                                             size_t requested_size,
 945                                             size_t* actual_size) {
 946   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_tlab(min_size, requested_size);
 947   HeapWord* res = allocate_memory(req);
 948   if (res != nullptr) {
 949     *actual_size = req.actual_size();
 950   } else {
 951     *actual_size = 0;
 952   }
 953   return res;
 954 }
 955 
 956 HeapWord* ShenandoahHeap::allocate_new_gclab(size_t min_size,
 957                                              size_t word_size,
 958                                              size_t* actual_size) {
 959   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_gclab(min_size, word_size);
 960   HeapWord* res = allocate_memory(req);
 961   if (res != nullptr) {
 962     *actual_size = req.actual_size();

 964     *actual_size = 0;
 965   }
 966   return res;
 967 }
 968 
 969 HeapWord* ShenandoahHeap::allocate_memory(ShenandoahAllocRequest& req) {
 970   intptr_t pacer_epoch = 0;
 971   bool in_new_region = false;
 972   HeapWord* result = nullptr;
 973 
 974   if (req.is_mutator_alloc()) {
 975     if (ShenandoahPacing) {
 976       pacer()->pace_for_alloc(req.size());
 977       pacer_epoch = pacer()->epoch();
 978     }
 979 
 980     if (!ShenandoahAllocFailureALot || !should_inject_alloc_failure()) {
 981       result = allocate_memory_under_lock(req, in_new_region);
 982     }
 983 
 984     // Check that gc overhead is not exceeded.
 985     //
 986     // Shenandoah will grind along for quite a while allocating one
 987     // object at a time using shared (non-tlab) allocations. This check
 988     // is testing that the GC overhead limit has not been exceeded.
 989     // This will notify the collector to start a cycle, but will raise
 990     // an OOME to the mutator if the last Full GCs have not made progress.
 991     if (result == nullptr && !req.is_lab_alloc() && get_gc_no_progress_count() > ShenandoahNoProgressThreshold) {
 992       control_thread()->handle_alloc_failure(req, false);
 993       return nullptr;
 994     }
 995 
 996     // Block until control thread reacted, then retry allocation.
 997     //
 998     // It might happen that one of the threads requesting allocation would unblock
 999     // way later after GC happened, only to fail the second allocation, because
1000     // other threads have already depleted the free storage. In this case, a better
1001     // strategy is to try again, as long as GC makes progress (or until at least
1002     // one full GC has completed).
1003     size_t original_count = shenandoah_policy()->full_gc_count();
1004     while (result == nullptr
1005         && (get_gc_no_progress_count() == 0 || original_count == shenandoah_policy()->full_gc_count())) {
1006       control_thread()->handle_alloc_failure(req, true);
1007       result = allocate_memory_under_lock(req, in_new_region);
1008     }
1009 
1010     if (log_is_enabled(Debug, gc, alloc)) {
1011       ResourceMark rm;
1012       log_debug(gc, alloc)("Thread: %s, Result: " PTR_FORMAT ", Request: %s, Size: " SIZE_FORMAT ", Original: " SIZE_FORMAT ", Latest: " SIZE_FORMAT,
1013                            Thread::current()->name(), p2i(result), req.type_string(), req.size(), original_count, get_gc_no_progress_count());
1014     }
1015   } else {
1016     assert(req.is_gc_alloc(), "Can only accept GC allocs here");
1017     result = allocate_memory_under_lock(req, in_new_region);
1018     // Do not call handle_alloc_failure() here, because we cannot block.
1019     // The allocation failure would be handled by the LRB slowpath with handle_alloc_failure_evac().
1020   }
1021 
1022   if (in_new_region) {
1023     notify_heap_changed();
1024   }
1025 
1026   if (result == nullptr) {
1027     req.set_actual_size(0);
1028   }
1029 
1030   // This is called regardless of the outcome of the allocation to account
1031   // for any waste created by retiring regions with this request.
1032   increase_used(req);
1033 
1034   if (result != nullptr) {
1035     size_t requested = req.size();
1036     size_t actual = req.actual_size();
1037 
1038     assert (req.is_lab_alloc() || (requested == actual),
1039             "Only LAB allocations are elastic: %s, requested = " SIZE_FORMAT ", actual = " SIZE_FORMAT,
1040             ShenandoahAllocRequest::alloc_type_to_string(req.type()), requested, actual);
1041 
1042     if (req.is_mutator_alloc()) {


1043       // If we requested more than we were granted, give the rest back to pacer.
1044       // This only matters if we are in the same pacing epoch: do not try to unpace
1045       // over the budget for the other phase.
1046       if (ShenandoahPacing && (pacer_epoch > 0) && (requested > actual)) {
1047         pacer()->unpace_for_alloc(pacer_epoch, requested - actual);
1048       }


1049     }
1050   }
1051 
1052   return result;
1053 }
1054 
1055 HeapWord* ShenandoahHeap::allocate_memory_under_lock(ShenandoahAllocRequest& req, bool& in_new_region) {
1056   // If we are dealing with mutator allocation, then we may need to block for safepoint.
1057   // We cannot block for safepoint for GC allocations, because there is a high chance
1058   // we are already running at safepoint or from stack watermark machinery, and we cannot
1059   // block again.
1060   ShenandoahHeapLocker locker(lock(), req.is_mutator_alloc());
1061 
1062   // Make sure the old generation has room for either evacuations or promotions before trying to allocate.
1063   if (req.is_old() && !old_generation()->can_allocate(req)) {
1064     return nullptr;
1065   }
1066 
1067   // If TLAB request size is greater than available, allocate() will attempt to downsize request to fit within available
1068   // memory.
1069   HeapWord* result = _free_set->allocate(req, in_new_region);
1070 
1071   // Record the plab configuration for this result and register the object.
1072   if (result != nullptr && req.is_old()) {
1073     old_generation()->configure_plab_for_current_thread(req);
1074     if (req.type() == ShenandoahAllocRequest::_alloc_shared_gc) {
1075       // Register the newly allocated object while we're holding the global lock since there's no synchronization
1076       // built in to the implementation of register_object().  There are potential races when multiple independent
1077       // threads are allocating objects, some of which might span the same card region.  For example, consider
1078       // a card table's memory region within which three objects are being allocated by three different threads:
1079       //
1080       // objects being "concurrently" allocated:
1081       //    [-----a------][-----b-----][--------------c------------------]
1082       //            [---- card table memory range --------------]
1083       //
1084       // Before any objects are allocated, this card's memory range holds no objects.  Note that allocation of object a
1085       // wants to set the starts-object, first-start, and last-start attributes of the preceding card region.
1086       // Allocation of object b wants to set the starts-object, first-start, and last-start attributes of this card region.
1087       // Allocation of object c also wants to set the starts-object, first-start, and last-start attributes of this
1088       // card region.
1089       //
1090       // The thread allocating b and the thread allocating c can "race" in various ways, resulting in confusion, such as
1091       // last-start representing object b while first-start represents object c.  This is why we need to require all
1092       // register_object() invocations to be "mutually exclusive" with respect to each card's memory range.
1093       old_generation()->card_scan()->register_object(result);
1094     }
1095   }
1096 
1097   return result;
1098 }
1099 
1100 HeapWord* ShenandoahHeap::mem_allocate(size_t size,
1101                                         bool*  gc_overhead_limit_was_exceeded) {
1102   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared(size);
1103   return allocate_memory(req);
1104 }
1105 
1106 MetaWord* ShenandoahHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
1107                                                              size_t size,
1108                                                              Metaspace::MetadataType mdtype) {
1109   MetaWord* result;
1110 
1111   // Inform metaspace OOM to GC heuristics if class unloading is possible.
1112   ShenandoahHeuristics* h = global_generation()->heuristics();
1113   if (h->can_unload_classes()) {
1114     h->record_metaspace_oom();
1115   }
1116 
1117   // Expand and retry allocation
1118   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
1119   if (result != nullptr) {
1120     return result;
1121   }
1122 
1123   // Start full GC
1124   collect(GCCause::_metadata_GC_clear_soft_refs);
1125 
1126   // Retry allocation
1127   result = loader_data->metaspace_non_null()->allocate(size, mdtype);
1128   if (result != nullptr) {
1129     return result;
1130   }
1131 
1132   // Expand and retry allocation
1133   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);

1186 private:
1187   void do_work() {
1188     ShenandoahConcurrentEvacuateRegionObjectClosure cl(_sh);
1189     ShenandoahHeapRegion* r;
1190     while ((r =_cs->claim_next()) != nullptr) {
1191       assert(r->has_live(), "Region " SIZE_FORMAT " should have been reclaimed early", r->index());
1192       _sh->marked_object_iterate(r, &cl);
1193 
1194       if (ShenandoahPacing) {
1195         _sh->pacer()->report_evac(r->used() >> LogHeapWordSize);
1196       }
1197 
1198       if (_sh->check_cancelled_gc_and_yield(_concurrent)) {
1199         break;
1200       }
1201     }
1202   }
1203 };
1204 
1205 void ShenandoahHeap::evacuate_collection_set(bool concurrent) {
1206   if (mode()->is_generational()) {
1207     ShenandoahRegionIterator regions;
1208     ShenandoahGenerationalEvacuationTask task(ShenandoahGenerationalHeap::heap(), &regions, concurrent);
1209     workers()->run_task(&task);
1210   } else {
1211     ShenandoahEvacuationTask task(this, _collection_set, concurrent);
1212     workers()->run_task(&task);
1213   }
1214 }
1215 
1216 oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) {
1217   assert(thread == Thread::current(), "Expected thread parameter to be current thread.");
1218   if (ShenandoahThreadLocalData::is_oom_during_evac(thread)) {
1219     // This thread went through the OOM during evac protocol. It is safe to return
1220     // the forward pointer. It must not attempt to evacuate any other objects.
1221     return ShenandoahBarrierSet::resolve_forwarded(p);
1222   }
1223 
1224   assert(ShenandoahThreadLocalData::is_evac_allowed(thread), "must be enclosed in oom-evac scope");
1225 
1226   ShenandoahHeapRegion* r = heap_region_containing(p);
1227   assert(!r->is_humongous(), "never evacuate humongous objects");
1228 
1229   ShenandoahAffiliation target_gen = r->affiliation();
1230   return try_evacuate_object(p, thread, r, target_gen);
1231 }
1232 
1233 oop ShenandoahHeap::try_evacuate_object(oop p, Thread* thread, ShenandoahHeapRegion* from_region,
1234                                                ShenandoahAffiliation target_gen) {
1235   assert(target_gen == YOUNG_GENERATION, "Only expect evacuations to young in this mode");
1236   assert(from_region->is_young(), "Only expect evacuations from young in this mode");
1237   bool alloc_from_lab = true;
1238   HeapWord* copy = nullptr;
1239   size_t size = p->size();
1240 
1241 #ifdef ASSERT
1242   if (ShenandoahOOMDuringEvacALot &&
1243       (os::random() & 1) == 0) { // Simulate OOM every ~2nd slow-path call
1244     copy = nullptr;
1245   } else {
1246 #endif
1247     if (UseTLAB) {
1248       copy = allocate_from_gclab(thread, size);
1249       if ((copy == nullptr) && (size < ShenandoahThreadLocalData::gclab_size(thread))) {
1250         // GCLAB allocation failed because we are bumping up against the limit on young evacuation reserve.  Try resetting
1251         // the desired GCLAB size and retry GCLAB allocation to avoid cascading of shared memory allocations.
1252         // TODO: is this right? using PLAB::min_size() here for gc lab size?
1253         ShenandoahThreadLocalData::set_gclab_size(thread, PLAB::min_size());
1254         copy = allocate_from_gclab(thread, size);
1255         // If we still get nullptr, we'll try a shared allocation below.
1256       }
1257     }
1258 
1259     if (copy == nullptr) {
1260       // If we failed to allocate in LAB, we'll try a shared allocation.
1261       ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared_gc(size, target_gen);
1262       copy = allocate_memory(req);
1263       alloc_from_lab = false;
1264     }
1265 #ifdef ASSERT
1266   }
1267 #endif
1268 
1269   if (copy == nullptr) {
1270     control_thread()->handle_alloc_failure_evac(size);
1271 
1272     _oom_evac_handler.handle_out_of_memory_during_evacuation();
1273 
1274     return ShenandoahBarrierSet::resolve_forwarded(p);
1275   }
1276 
1277   // Copy the object:
1278   _evac_tracker->begin_evacuation(thread, size * HeapWordSize);
1279   Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(p), copy, size);
1280 
1281   oop copy_val = cast_to_oop(copy);
1282 
1283   // Try to install the new forwarding pointer.
1284   ContinuationGCSupport::relativize_stack_chunk(copy_val);
1285 
1286   oop result = ShenandoahForwarding::try_update_forwardee(p, copy_val);
1287   if (result == copy_val) {
1288     // Successfully evacuated. Our copy is now the public one!
1289     _evac_tracker->end_evacuation(thread, size * HeapWordSize);
1290     shenandoah_assert_correct(nullptr, copy_val);
1291     return copy_val;
1292   }  else {
1293     // Failed to evacuate. We need to deal with the object that is left behind. Since this
1294     // new allocation is certainly after TAMS, it will be considered live in the next cycle.
1295     // But if it happens to contain references to evacuated regions, those references would
1296     // not get updated for this stale copy during this cycle, and we will crash while scanning
1297     // it the next cycle.
1298     if (alloc_from_lab) {
1299       // For LAB allocations, it is enough to rollback the allocation ptr. Either the next
1300       // object will overwrite this stale copy, or the filler object on LAB retirement will
1301       // do this.
1302       ShenandoahThreadLocalData::gclab(thread)->undo_allocation(copy, size);
1303     } else {
1304       // For non-LAB allocations, we have no way to retract the allocation, and
1305       // have to explicitly overwrite the copy with the filler object. With that overwrite,
1306       // we have to keep the fwdptr initialized and pointing to our (stale) copy.
1307       assert(size >= ShenandoahHeap::min_fill_size(), "previously allocated object known to be larger than min_size");
1308       fill_with_object(copy, size);
1309       shenandoah_assert_correct(nullptr, copy_val);
1310       // For non-LAB allocations, the object has already been registered
1311     }
1312     shenandoah_assert_correct(nullptr, result);
1313     return result;
1314   }
1315 }
1316 
1317 void ShenandoahHeap::trash_cset_regions() {
1318   ShenandoahHeapLocker locker(lock());
1319 
1320   ShenandoahCollectionSet* set = collection_set();
1321   ShenandoahHeapRegion* r;
1322   set->clear_current_index();
1323   while ((r = set->next()) != nullptr) {
1324     r->make_trash();
1325   }
1326   collection_set()->clear();
1327 }
1328 
1329 void ShenandoahHeap::print_heap_regions_on(outputStream* st) const {
1330   st->print_cr("Heap Regions:");
1331   st->print_cr("Region state: EU=empty-uncommitted, EC=empty-committed, R=regular, H=humongous start, HP=pinned humongous start");
1332   st->print_cr("              HC=humongous continuation, CS=collection set, TR=trash, P=pinned, CSP=pinned collection set");
1333   st->print_cr("BTE=bottom/top/end, TAMS=top-at-mark-start");
1334   st->print_cr("UWM=update watermark, U=used");
1335   st->print_cr("T=TLAB allocs, G=GCLAB allocs");
1336   st->print_cr("S=shared allocs, L=live data");
1337   st->print_cr("CP=critical pins");
1338 
1339   for (size_t i = 0; i < num_regions(); i++) {
1340     get_region(i)->print_on(st);
1341   }
1342 }
1343 
1344 size_t ShenandoahHeap::trash_humongous_region_at(ShenandoahHeapRegion* start) {
1345   assert(start->is_humongous_start(), "reclaim regions starting with the first one");
1346 
1347   oop humongous_obj = cast_to_oop(start->bottom());
1348   size_t size = humongous_obj->size();
1349   size_t required_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize);
1350   size_t index = start->index() + required_regions - 1;
1351 
1352   assert(!start->has_live(), "liveness must be zero");
1353 
1354   for(size_t i = 0; i < required_regions; i++) {
1355     // Reclaim from tail. Otherwise, assertion fails when printing region to trace log,
1356     // as it expects that every region belongs to a humongous region starting with a humongous start region.
1357     ShenandoahHeapRegion* region = get_region(index --);
1358 
1359     assert(region->is_humongous(), "expect correct humongous start or continuation");
1360     assert(!region->is_cset(), "Humongous region should not be in collection set");
1361 
1362     region->make_trash_immediate();
1363   }
1364   return required_regions;
1365 }
1366 
1367 class ShenandoahCheckCleanGCLABClosure : public ThreadClosure {
1368 public:
1369   ShenandoahCheckCleanGCLABClosure() {}
1370   void do_thread(Thread* thread) {
1371     PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1372     assert(gclab != nullptr, "GCLAB should be initialized for %s", thread->name());
1373     assert(gclab->words_remaining() == 0, "GCLAB should not need retirement");
1374 
1375     if (ShenandoahHeap::heap()->mode()->is_generational()) {
1376       PLAB* plab = ShenandoahThreadLocalData::plab(thread);
1377       assert(plab != nullptr, "PLAB should be initialized for %s", thread->name());
1378       assert(plab->words_remaining() == 0, "PLAB should not need retirement");
1379     }
1380   }
1381 };
1382 
1383 class ShenandoahRetireGCLABClosure : public ThreadClosure {
1384 private:
1385   bool const _resize;
1386 public:
1387   ShenandoahRetireGCLABClosure(bool resize) : _resize(resize) {}
1388   void do_thread(Thread* thread) {
1389     PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1390     assert(gclab != nullptr, "GCLAB should be initialized for %s", thread->name());
1391     gclab->retire();
1392     if (_resize && ShenandoahThreadLocalData::gclab_size(thread) > 0) {
1393       ShenandoahThreadLocalData::set_gclab_size(thread, 0);
1394     }
1395 
1396     if (ShenandoahHeap::heap()->mode()->is_generational()) {
1397       PLAB* plab = ShenandoahThreadLocalData::plab(thread);
1398       assert(plab != nullptr, "PLAB should be initialized for %s", thread->name());
1399 
1400       // There are two reasons to retire all plabs between old-gen evacuation passes.
1401       //  1. We need to make the plab memory parsable by remembered-set scanning.
1402       //  2. We need to establish a trustworthy UpdateWaterMark value within each old-gen heap region
1403       ShenandoahGenerationalHeap::heap()->retire_plab(plab, thread);
1404       if (_resize && ShenandoahThreadLocalData::plab_size(thread) > 0) {
1405         ShenandoahThreadLocalData::set_plab_size(thread, 0);
1406       }
1407     }
1408   }
1409 };
1410 
1411 void ShenandoahHeap::labs_make_parsable() {
1412   assert(UseTLAB, "Only call with UseTLAB");
1413 
1414   ShenandoahRetireGCLABClosure cl(false);
1415 
1416   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1417     ThreadLocalAllocBuffer& tlab = t->tlab();
1418     tlab.make_parsable();
1419     cl.do_thread(t);
1420   }
1421 
1422   workers()->threads_do(&cl);
1423 }
1424 
1425 void ShenandoahHeap::tlabs_retire(bool resize) {
1426   assert(UseTLAB, "Only call with UseTLAB");
1427   assert(!resize || ResizeTLAB, "Only call for resize when ResizeTLAB is enabled");

1489   }
1490   return nullptr;
1491 }
1492 
1493 bool ShenandoahHeap::block_is_obj(const HeapWord* addr) const {
1494   ShenandoahHeapRegion* r = heap_region_containing(addr);
1495   return r->block_is_obj(addr);
1496 }
1497 
1498 bool ShenandoahHeap::print_location(outputStream* st, void* addr) const {
1499   return BlockLocationPrinter<ShenandoahHeap>::print_location(st, addr);
1500 }
1501 
1502 void ShenandoahHeap::prepare_for_verify() {
1503   if (SafepointSynchronize::is_at_safepoint() && UseTLAB) {
1504     labs_make_parsable();
1505   }
1506 }
1507 
1508 void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const {
1509   if (_shenandoah_policy->is_at_shutdown()) {
1510     return;
1511   }
1512 
1513   if (_control_thread != nullptr) {
1514     tcl->do_thread(_control_thread);
1515   }
1516 
1517   workers()->threads_do(tcl);
1518   if (_safepoint_workers != nullptr) {
1519     _safepoint_workers->threads_do(tcl);
1520   }
1521 }
1522 
1523 void ShenandoahHeap::print_tracing_info() const {
1524   LogTarget(Info, gc, stats) lt;
1525   if (lt.is_enabled()) {
1526     ResourceMark rm;
1527     LogStream ls(lt);
1528 
1529     phase_timings()->print_global_on(&ls);
1530 
1531     ls.cr();
1532     ls.cr();
1533 
1534     shenandoah_policy()->print_gc_stats(&ls);
1535 
1536     ls.cr();
1537 
1538     evac_tracker()->print_global_on(&ls);
1539 
1540     ls.cr();
1541     ls.cr();
1542   }
1543 }
1544 
1545 void ShenandoahHeap::set_gc_generation(ShenandoahGeneration* generation) {
1546   shenandoah_assert_control_or_vm_thread_at_safepoint();
1547   _gc_generation = generation;
1548 }
1549 
1550 // Active generation may only be set by the VM thread at a safepoint.
1551 void ShenandoahHeap::set_active_generation() {
1552   assert(Thread::current()->is_VM_thread(), "Only the VM Thread");
1553   assert(SafepointSynchronize::is_at_safepoint(), "Only at a safepoint!");
1554   assert(_gc_generation != nullptr, "Will set _active_generation to nullptr");
1555   _active_generation = _gc_generation;
1556 }
1557 
1558 void ShenandoahHeap::on_cycle_start(GCCause::Cause cause, ShenandoahGeneration* generation) {
1559   shenandoah_policy()->record_collection_cause(cause);
1560 
1561   assert(gc_cause()  == GCCause::_no_gc, "Over-writing cause");
1562   assert(_gc_generation == nullptr, "Over-writing _gc_generation");
1563 
1564   set_gc_cause(cause);
1565   set_gc_generation(generation);
1566 
1567   generation->heuristics()->record_cycle_start();
1568 }
1569 
1570 void ShenandoahHeap::on_cycle_end(ShenandoahGeneration* generation) {
1571   assert(gc_cause() != GCCause::_no_gc, "cause wasn't set");
1572   assert(_gc_generation != nullptr, "_gc_generation wasn't set");
1573 
1574   generation->heuristics()->record_cycle_end();
1575   if (mode()->is_generational() && generation->is_global()) {
1576     // If we just completed a GLOBAL GC, claim credit for completion of young-gen and old-gen GC as well
1577     young_generation()->heuristics()->record_cycle_end();
1578     old_generation()->heuristics()->record_cycle_end();
1579   }
1580 
1581   set_gc_generation(nullptr);
1582   set_gc_cause(GCCause::_no_gc);
1583 }
1584 
1585 void ShenandoahHeap::verify(VerifyOption vo) {
1586   if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
1587     if (ShenandoahVerify) {
1588       verifier()->verify_generic(vo);
1589     } else {
1590       // TODO: Consider allocating verification bitmaps on demand,
1591       // and turn this on unconditionally.
1592     }
1593   }
1594 }
1595 size_t ShenandoahHeap::tlab_capacity(Thread *thr) const {
1596   return _free_set->capacity();
1597 }
1598 
1599 class ObjectIterateScanRootClosure : public BasicOopIterateClosure {
1600 private:
1601   MarkBitMap* _bitmap;
1602   ShenandoahScanObjectStack* _oop_stack;
1603   ShenandoahHeap* const _heap;
1604   ShenandoahMarkingContext* const _marking_context;

1886       if (start >= max) break;
1887 
1888       for (size_t i = cur; i < end; i++) {
1889         ShenandoahHeapRegion* current = _heap->get_region(i);
1890         _blk->heap_region_do(current);
1891       }
1892     }
1893   }
1894 };
1895 
1896 void ShenandoahHeap::parallel_heap_region_iterate(ShenandoahHeapRegionClosure* blk) const {
1897   assert(blk->is_thread_safe(), "Only thread-safe closures here");
1898   if (num_regions() > ShenandoahParallelRegionStride) {
1899     ShenandoahParallelHeapRegionTask task(blk);
1900     workers()->run_task(&task);
1901   } else {
1902     heap_region_iterate(blk);
1903   }
1904 }
1905 























1906 class ShenandoahRendezvousClosure : public HandshakeClosure {
1907 public:
1908   inline ShenandoahRendezvousClosure() : HandshakeClosure("ShenandoahRendezvous") {}
1909   inline void do_thread(Thread* thread) {}
1910 };
1911 
1912 void ShenandoahHeap::rendezvous_threads() {
1913   ShenandoahRendezvousClosure cl;
1914   Handshake::execute(&cl);
1915 }
1916 
1917 void ShenandoahHeap::recycle_trash() {
1918   free_set()->recycle_trash();
1919 }
1920 



































































































1921 void ShenandoahHeap::do_class_unloading() {
1922   _unloader.unload();
1923   if (mode()->is_generational()) {
1924     old_generation()->set_parseable(false);
1925   }
1926 }
1927 
1928 void ShenandoahHeap::stw_weak_refs(bool full_gc) {
1929   // Weak refs processing
1930   ShenandoahPhaseTimings::Phase phase = full_gc ? ShenandoahPhaseTimings::full_gc_weakrefs
1931                                                 : ShenandoahPhaseTimings::degen_gc_weakrefs;
1932   ShenandoahTimingsTracker t(phase);
1933   ShenandoahGCWorkerPhase worker_phase(phase);
1934   shenandoah_assert_generations_reconciled();
1935   gc_generation()->ref_processor()->process_references(phase, workers(), false /* concurrent */);
1936 }
1937 
1938 void ShenandoahHeap::prepare_update_heap_references(bool concurrent) {
1939   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
1940 
1941   // Evacuation is over, no GCLABs are needed anymore. GCLABs are under URWM, so we need to
1942   // make them parsable for update code to work correctly. Plus, we can compute new sizes
1943   // for future GCLABs here.
1944   if (UseTLAB) {
1945     ShenandoahGCPhase phase(concurrent ?
1946                             ShenandoahPhaseTimings::init_update_refs_manage_gclabs :
1947                             ShenandoahPhaseTimings::degen_gc_init_update_refs_manage_gclabs);
1948     gclabs_retire(ResizeTLAB);
1949   }
1950 
1951   _update_refs_iterator.reset();
1952 }
1953 
1954 void ShenandoahHeap::propagate_gc_state_to_java_threads() {
1955   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at Shenandoah safepoint");
1956   if (_gc_state_changed) {
1957     _gc_state_changed = false;
1958     char state = gc_state();
1959     for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1960       ShenandoahThreadLocalData::set_gc_state(t, state);
1961     }
1962   }
1963 }
1964 
1965 void ShenandoahHeap::set_gc_state(uint mask, bool value) {
1966   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at Shenandoah safepoint");
1967   _gc_state.set_cond(mask, value);
1968   _gc_state_changed = true;
1969   // Check that if concurrent weak root is set then active_gen isn't null
1970   assert(!is_concurrent_weak_root_in_progress() || active_generation() != nullptr, "Error");
1971   shenandoah_assert_generations_reconciled();
1972 }
1973 
1974 void ShenandoahHeap::set_concurrent_young_mark_in_progress(bool in_progress) {
1975   uint mask;
1976   assert(!has_forwarded_objects(), "Young marking is not concurrent with evacuation");
1977   if (!in_progress && is_concurrent_old_mark_in_progress()) {
1978     assert(mode()->is_generational(), "Only generational GC has old marking");
1979     assert(_gc_state.is_set(MARKING), "concurrent_old_marking_in_progress implies MARKING");
1980     // If old-marking is in progress when we turn off YOUNG_MARKING, leave MARKING (and OLD_MARKING) on
1981     mask = YOUNG_MARKING;
1982   } else {
1983     mask = MARKING | YOUNG_MARKING;
1984   }
1985   set_gc_state(mask, in_progress);
1986   manage_satb_barrier(in_progress);
1987 }
1988 
1989 void ShenandoahHeap::set_concurrent_old_mark_in_progress(bool in_progress) {
1990 #ifdef ASSERT
1991   // has_forwarded_objects() iff UPDATEREFS or EVACUATION
1992   bool has_forwarded = has_forwarded_objects();
1993   bool updating_or_evacuating = _gc_state.is_set(UPDATEREFS | EVACUATION);
1994   bool evacuating = _gc_state.is_set(EVACUATION);
1995   assert ((has_forwarded == updating_or_evacuating) || (evacuating && !has_forwarded && collection_set()->is_empty()),
1996           "Updating or evacuating iff has forwarded objects, or if evacuation phase is promoting in place without forwarding");
1997 #endif
1998   if (!in_progress && is_concurrent_young_mark_in_progress()) {
1999     // If young-marking is in progress when we turn off OLD_MARKING, leave MARKING (and YOUNG_MARKING) on
2000     assert(_gc_state.is_set(MARKING), "concurrent_young_marking_in_progress implies MARKING");
2001     set_gc_state(OLD_MARKING, in_progress);
2002   } else {
2003     set_gc_state(MARKING | OLD_MARKING, in_progress);
2004   }
2005   manage_satb_barrier(in_progress);
2006 }
2007 
2008 bool ShenandoahHeap::is_prepare_for_old_mark_in_progress() const {
2009   return old_generation()->is_preparing_for_mark();
2010 }
2011 
2012 void ShenandoahHeap::manage_satb_barrier(bool active) {
2013   if (is_concurrent_mark_in_progress()) {
2014     // Ignore request to deactivate barrier while concurrent mark is in progress.
2015     // Do not attempt to re-activate the barrier if it is already active.
2016     if (active && !ShenandoahBarrierSet::satb_mark_queue_set().is_active()) {
2017       ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(active, !active);
2018     }
2019   } else {
2020     // No concurrent marking is in progress so honor request to deactivate,
2021     // but only if the barrier is already active.
2022     if (!active && ShenandoahBarrierSet::satb_mark_queue_set().is_active()) {
2023       ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(active, !active);
2024     }
2025   }
2026 }
2027 
2028 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
2029   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only call this at safepoint");
2030   set_gc_state(EVACUATION, in_progress);
2031 }
2032 
2033 void ShenandoahHeap::set_concurrent_strong_root_in_progress(bool in_progress) {
2034   if (in_progress) {
2035     _concurrent_strong_root_in_progress.set();
2036   } else {
2037     _concurrent_strong_root_in_progress.unset();
2038   }
2039 }
2040 
2041 void ShenandoahHeap::set_concurrent_weak_root_in_progress(bool cond) {
2042   set_gc_state(WEAK_ROOTS, cond);
2043 }
2044 
2045 GCTracer* ShenandoahHeap::tracer() {
2046   return shenandoah_policy()->tracer();
2047 }
2048 
2049 size_t ShenandoahHeap::tlab_used(Thread* thread) const {
2050   return _free_set->used();
2051 }
2052 
2053 bool ShenandoahHeap::try_cancel_gc() {
2054   jbyte prev = _cancelled_gc.cmpxchg(CANCELLED, CANCELLABLE);
2055   return prev == CANCELLABLE;
2056 }
2057 
2058 void ShenandoahHeap::cancel_concurrent_mark() {
2059   if (mode()->is_generational()) {
2060     young_generation()->cancel_marking();
2061     old_generation()->cancel_marking();
2062   }
2063 
2064   global_generation()->cancel_marking();
2065 
2066   ShenandoahBarrierSet::satb_mark_queue_set().abandon_partial_marking();
2067 }
2068 
2069 void ShenandoahHeap::cancel_gc(GCCause::Cause cause) {
2070   if (try_cancel_gc()) {
2071     FormatBuffer<> msg("Cancelling GC: %s", GCCause::to_string(cause));
2072     log_info(gc)("%s", msg.buffer());
2073     Events::log(Thread::current(), "%s", msg.buffer());
2074     _cancel_requested_time = os::elapsedTime();
2075   }
2076 }
2077 
2078 uint ShenandoahHeap::max_workers() {
2079   return _max_workers;
2080 }
2081 
2082 void ShenandoahHeap::stop() {
2083   // The shutdown sequence should be able to terminate when GC is running.
2084 
2085   // Step 0. Notify policy to disable event recording.
2086   _shenandoah_policy->record_shutdown();
2087 
2088   // Step 1. Notify control thread that we are in shutdown.
2089   // Note that we cannot do that with stop(), because stop() is blocking and waits for the actual shutdown.
2090   // Doing stop() here would wait for the normal GC cycle to complete, never falling through to cancel below.
2091   control_thread()->prepare_for_graceful_shutdown();
2092 
2093   // Step 2. Notify GC workers that we are cancelling GC.
2094   cancel_gc(GCCause::_shenandoah_stop_vm);

2178 }
2179 
2180 void ShenandoahHeap::set_has_forwarded_objects(bool cond) {
2181   set_gc_state(HAS_FORWARDED, cond);
2182 }
2183 
2184 void ShenandoahHeap::set_unload_classes(bool uc) {
2185   _unload_classes.set_cond(uc);
2186 }
2187 
2188 bool ShenandoahHeap::unload_classes() const {
2189   return _unload_classes.is_set();
2190 }
2191 
2192 address ShenandoahHeap::in_cset_fast_test_addr() {
2193   ShenandoahHeap* heap = ShenandoahHeap::heap();
2194   assert(heap->collection_set() != nullptr, "Sanity");
2195   return (address) heap->collection_set()->biased_map_address();
2196 }
2197 




2198 void ShenandoahHeap::reset_bytes_allocated_since_gc_start() {
2199   if (mode()->is_generational()) {
2200     young_generation()->reset_bytes_allocated_since_gc_start();
2201     old_generation()->reset_bytes_allocated_since_gc_start();
2202   }
2203 
2204   global_generation()->reset_bytes_allocated_since_gc_start();
2205 }
2206 
2207 void ShenandoahHeap::set_degenerated_gc_in_progress(bool in_progress) {
2208   _degenerated_gc_in_progress.set_cond(in_progress);
2209 }
2210 
2211 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) {
2212   _full_gc_in_progress.set_cond(in_progress);
2213 }
2214 
2215 void ShenandoahHeap::set_full_gc_move_in_progress(bool in_progress) {
2216   assert (is_full_gc_in_progress(), "should be");
2217   _full_gc_move_in_progress.set_cond(in_progress);
2218 }
2219 
2220 void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) {
2221   set_gc_state(UPDATEREFS, in_progress);
2222 }
2223 
2224 void ShenandoahHeap::register_nmethod(nmethod* nm) {

2248     if (r->is_active()) {
2249       if (r->is_pinned()) {
2250         if (r->pin_count() == 0) {
2251           r->make_unpinned();
2252         }
2253       } else {
2254         if (r->pin_count() > 0) {
2255           r->make_pinned();
2256         }
2257       }
2258     }
2259   }
2260 
2261   assert_pinned_region_status();
2262 }
2263 
2264 #ifdef ASSERT
2265 void ShenandoahHeap::assert_pinned_region_status() {
2266   for (size_t i = 0; i < num_regions(); i++) {
2267     ShenandoahHeapRegion* r = get_region(i);
2268     shenandoah_assert_generations_reconciled();
2269     if (gc_generation()->contains(r)) {
2270       assert((r->is_pinned() && r->pin_count() > 0) || (!r->is_pinned() && r->pin_count() == 0),
2271              "Region " SIZE_FORMAT " pinning status is inconsistent", i);
2272     }
2273   }
2274 }
2275 #endif
2276 
2277 ConcurrentGCTimer* ShenandoahHeap::gc_timer() const {
2278   return _gc_timer;
2279 }
2280 
2281 void ShenandoahHeap::prepare_concurrent_roots() {
2282   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2283   assert(!is_stw_gc_in_progress(), "Only concurrent GC");
2284   set_concurrent_strong_root_in_progress(!collection_set()->is_empty());
2285   set_concurrent_weak_root_in_progress(true);
2286   if (unload_classes()) {
2287     _unloader.prepare();
2288   }
2289 }
2290 
2291 void ShenandoahHeap::finish_concurrent_roots() {
2292   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2293   assert(!is_stw_gc_in_progress(), "Only concurrent GC");
2294   if (unload_classes()) {
2295     _unloader.finish();
2296   }
2297 }
2298 
2299 #ifdef ASSERT
2300 void ShenandoahHeap::assert_gc_workers(uint nworkers) {
2301   assert(nworkers > 0 && nworkers <= max_workers(), "Sanity");
2302 
2303   if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
2304     // Use ParallelGCThreads inside safepoints
2305     assert(nworkers == ParallelGCThreads, "Use ParallelGCThreads (%u) within safepoint, not %u",
2306            ParallelGCThreads, nworkers);



2307   } else {
2308     // Use ConcGCThreads outside safepoints
2309     assert(nworkers == ConcGCThreads, "Use ConcGCThreads (%u) outside safepoints, %u",
2310            ConcGCThreads, nworkers);



2311   }
2312 }
2313 #endif
2314 
2315 ShenandoahVerifier* ShenandoahHeap::verifier() {
2316   guarantee(ShenandoahVerify, "Should be enabled");
2317   assert (_verifier != nullptr, "sanity");
2318   return _verifier;
2319 }
2320 
2321 template<bool CONCURRENT>
2322 class ShenandoahUpdateHeapRefsTask : public WorkerTask {
2323 private:
2324   ShenandoahHeap* _heap;
2325   ShenandoahRegionIterator* _regions;
2326 public:
2327   explicit ShenandoahUpdateHeapRefsTask(ShenandoahRegionIterator* regions) :
2328     WorkerTask("Shenandoah Update References"),
2329     _heap(ShenandoahHeap::heap()),
2330     _regions(regions) {
2331   }
2332 
2333   void work(uint worker_id) {
2334     if (CONCURRENT) {
2335       ShenandoahConcurrentWorkerSession worker_session(worker_id);
2336       ShenandoahSuspendibleThreadSetJoiner stsj;
2337       do_work<ShenandoahConcUpdateRefsClosure>(worker_id);
2338     } else {
2339       ShenandoahParallelWorkerSession worker_session(worker_id);
2340       do_work<ShenandoahSTWUpdateRefsClosure>(worker_id);
2341     }
2342   }
2343 
2344 private:
2345   template<class T>
2346   void do_work(uint worker_id) {
2347     if (CONCURRENT && (worker_id == 0)) {
2348       // We ask the first worker to replenish the Mutator free set by moving regions previously reserved to hold the
2349       // results of evacuation.  These reserves are no longer necessary because evacuation has completed.
2350       size_t cset_regions = _heap->collection_set()->count();
2351       // We cannot transfer any more regions than will be reclaimed when the existing collection set is recycled, because
2352       // we need the reclaimed collection set regions to replenish the collector reserves
2353       _heap->free_set()->move_collector_sets_to_mutator(cset_regions);
2354     }
2355     // If !CONCURRENT, there's no value in expanding Mutator free set
2356     T cl;
2357     ShenandoahHeapRegion* r = _regions->next();

2358     while (r != nullptr) {
2359       HeapWord* update_watermark = r->get_update_watermark();
2360       assert (update_watermark >= r->bottom(), "sanity");
2361       if (r->is_active() && !r->is_cset()) {
2362         _heap->marked_object_oop_iterate(r, &cl, update_watermark);
2363         if (ShenandoahPacing) {
2364           _heap->pacer()->report_updaterefs(pointer_delta(update_watermark, r->bottom()));
2365         }
2366       }
2367       if (_heap->check_cancelled_gc_and_yield(CONCURRENT)) {
2368         return;
2369       }
2370       r = _regions->next();
2371     }
2372   }
2373 };
2374 
2375 void ShenandoahHeap::update_heap_references(bool concurrent) {
2376   assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
2377 
2378   if (concurrent) {
2379     ShenandoahUpdateHeapRefsTask<true> task(&_update_refs_iterator);
2380     workers()->run_task(&task);
2381   } else {
2382     ShenandoahUpdateHeapRefsTask<false> task(&_update_refs_iterator);
2383     workers()->run_task(&task);
2384   }
2385 }
2386 
2387 ShenandoahSynchronizePinnedRegionStates::ShenandoahSynchronizePinnedRegionStates() : _lock(ShenandoahHeap::heap()->lock()) { }
2388 
2389 void ShenandoahSynchronizePinnedRegionStates::heap_region_do(ShenandoahHeapRegion* r) {
2390   // Drop "pinned" state from regions that no longer have a pinned count. Put
2391   // regions with a pinned count into the "pinned" state.
2392   if (r->is_active()) {
2393     if (r->is_pinned()) {
2394       if (r->pin_count() == 0) {
2395         ShenandoahHeapLocker locker(_lock);
2396         r->make_unpinned();
2397       }
2398     } else {
2399       if (r->pin_count() > 0) {
2400         ShenandoahHeapLocker locker(_lock);
2401         r->make_pinned();









2402       }
2403     }
2404   }
2405 }


2406 
2407 void ShenandoahHeap::update_heap_region_states(bool concurrent) {
2408   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2409   assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
2410 
2411   {
2412     ShenandoahGCPhase phase(concurrent ?
2413                             ShenandoahPhaseTimings::final_update_refs_update_region_states :
2414                             ShenandoahPhaseTimings::degen_gc_final_update_refs_update_region_states);
2415 
2416     final_update_refs_update_region_states();
2417 
2418     assert_pinned_region_status();
2419   }
2420 
2421   {
2422     ShenandoahGCPhase phase(concurrent ?
2423                             ShenandoahPhaseTimings::final_update_refs_trash_cset :
2424                             ShenandoahPhaseTimings::degen_gc_final_update_refs_trash_cset);
2425     trash_cset_regions();
2426   }
2427 }
2428 
2429 void ShenandoahHeap::final_update_refs_update_region_states() {
2430   ShenandoahSynchronizePinnedRegionStates cl;
2431   parallel_heap_region_iterate(&cl);
2432 }
2433 
2434 void ShenandoahHeap::rebuild_free_set(bool concurrent) {
2435   ShenandoahGCPhase phase(concurrent ?
2436                           ShenandoahPhaseTimings::final_update_refs_rebuild_freeset :
2437                           ShenandoahPhaseTimings::degen_gc_final_update_refs_rebuild_freeset);
2438   ShenandoahHeapLocker locker(lock());
2439   size_t young_cset_regions, old_cset_regions;
2440   size_t first_old_region, last_old_region, old_region_count;
2441   _free_set->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old_region, last_old_region, old_region_count);
2442   // If there are no old regions, first_old_region will be greater than last_old_region
2443   assert((first_old_region > last_old_region) ||
2444          ((last_old_region + 1 - first_old_region >= old_region_count) &&
2445           get_region(first_old_region)->is_old() && get_region(last_old_region)->is_old()),
2446          "sanity: old_region_count: " SIZE_FORMAT ", first_old_region: " SIZE_FORMAT ", last_old_region: " SIZE_FORMAT,
2447          old_region_count, first_old_region, last_old_region);
2448 
2449   if (mode()->is_generational()) {
2450 #ifdef ASSERT
2451     if (ShenandoahVerify) {
2452       verifier()->verify_before_rebuilding_free_set();
2453     }
2454 #endif
2455 
2456     // The computation of bytes_of_allocation_runway_before_gc_trigger is quite conservative so consider all of this
2457     // available for transfer to old. Note that transfer of humongous regions does not impact available.
2458     ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::heap();
2459     size_t allocation_runway = gen_heap->young_generation()->heuristics()->bytes_of_allocation_runway_before_gc_trigger(young_cset_regions);
2460     gen_heap->compute_old_generation_balance(allocation_runway, old_cset_regions);
2461 
2462     // Total old_available may have been expanded to hold anticipated promotions.  We trigger if the fragmented available
2463     // memory represents more than 16 regions worth of data.  Note that fragmentation may increase when we promote regular
2464     // regions in place when many of these regular regions have an abundant amount of available memory within them.  Fragmentation
2465     // will decrease as promote-by-copy consumes the available memory within these partially consumed regions.
2466     //
2467     // We consider old-gen to have excessive fragmentation if more than 12.5% of old-gen is free memory that resides
2468     // within partially consumed regions of memory.
2469   }
2470   // Rebuild free set based on adjusted generation sizes.
2471   _free_set->rebuild(young_cset_regions, old_cset_regions);
2472 
2473   if (mode()->is_generational()) {
2474     ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::heap();
2475     ShenandoahOldGeneration* old_gen = gen_heap->old_generation();
2476     old_gen->heuristics()->trigger_maybe(first_old_region, last_old_region, old_region_count, num_regions());
2477   }
2478 }
2479 
2480 void ShenandoahHeap::print_extended_on(outputStream *st) const {
2481   print_on(st);
2482   st->cr();
2483   print_heap_regions_on(st);
2484 }
2485 
2486 bool ShenandoahHeap::is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self) {
2487   size_t slice = r->index() / _bitmap_regions_per_slice;
2488 
2489   size_t regions_from = _bitmap_regions_per_slice * slice;
2490   size_t regions_to   = MIN2(num_regions(), _bitmap_regions_per_slice * (slice + 1));
2491   for (size_t g = regions_from; g < regions_to; g++) {
2492     assert (g / _bitmap_regions_per_slice == slice, "same slice");
2493     if (skip_self && g == r->index()) continue;
2494     if (get_region(g)->is_committed()) {
2495       return true;
2496     }

2544   }
2545 
2546   // Uncommit the bitmap slice:
2547   size_t slice = r->index() / _bitmap_regions_per_slice;
2548   size_t off = _bitmap_bytes_per_slice * slice;
2549   size_t len = _bitmap_bytes_per_slice;
2550   if (!os::uncommit_memory((char*)_bitmap_region.start() + off, len)) {
2551     return false;
2552   }
2553   return true;
2554 }
2555 
2556 void ShenandoahHeap::safepoint_synchronize_begin() {
2557   SuspendibleThreadSet::synchronize();
2558 }
2559 
2560 void ShenandoahHeap::safepoint_synchronize_end() {
2561   SuspendibleThreadSet::desynchronize();
2562 }
2563 








2564 void ShenandoahHeap::try_inject_alloc_failure() {
2565   if (ShenandoahAllocFailureALot && !cancelled_gc() && ((os::random() % 1000) > 950)) {
2566     _inject_alloc_failure.set();
2567     os::naked_short_sleep(1);
2568     if (cancelled_gc()) {
2569       log_info(gc)("Allocation failure was successfully injected");
2570     }
2571   }
2572 }
2573 
2574 bool ShenandoahHeap::should_inject_alloc_failure() {
2575   return _inject_alloc_failure.is_set() && _inject_alloc_failure.try_unset();
2576 }
2577 
2578 void ShenandoahHeap::initialize_serviceability() {
2579   _memory_pool = new ShenandoahMemoryPool(this);
2580   _cycle_memory_manager.add_pool(_memory_pool);
2581   _stw_memory_manager.add_pool(_memory_pool);
2582 }
2583 
2584 GrowableArray<GCMemoryManager*> ShenandoahHeap::memory_managers() {
2585   GrowableArray<GCMemoryManager*> memory_managers(2);
2586   memory_managers.append(&_cycle_memory_manager);
2587   memory_managers.append(&_stw_memory_manager);
2588   return memory_managers;
2589 }
2590 
2591 GrowableArray<MemoryPool*> ShenandoahHeap::memory_pools() {
2592   GrowableArray<MemoryPool*> memory_pools(1);
2593   memory_pools.append(_memory_pool);
2594   return memory_pools;
2595 }
2596 
2597 MemoryUsage ShenandoahHeap::memory_usage() {
2598   return MemoryUsage(_initial_size, used(), committed(), max_capacity());
2599 }
2600 
2601 ShenandoahRegionIterator::ShenandoahRegionIterator() :
2602   _heap(ShenandoahHeap::heap()),
2603   _index(0) {}
2604 
2605 ShenandoahRegionIterator::ShenandoahRegionIterator(ShenandoahHeap* heap) :
2606   _heap(heap),
2607   _index(0) {}
2608 
2609 void ShenandoahRegionIterator::reset() {
2610   _index = 0;
2611 }
2612 
2613 bool ShenandoahRegionIterator::has_next() const {
2614   return _index < _heap->num_regions();
2615 }
2616 
2617 char ShenandoahHeap::gc_state() const {
2618   return _gc_state.raw_value();

2643   }
2644 }
2645 
2646 bool ShenandoahHeap::requires_barriers(stackChunkOop obj) const {
2647   if (is_idle()) return false;
2648 
2649   // Objects allocated after marking start are implicitly alive, don't need any barriers during
2650   // marking phase.
2651   if (is_concurrent_mark_in_progress() &&
2652      !marking_context()->allocated_after_mark_start(obj)) {
2653     return true;
2654   }
2655 
2656   // Can not guarantee obj is deeply good.
2657   if (has_forwarded_objects()) {
2658     return true;
2659   }
2660 
2661   return false;
2662 }
2663 
2664 ShenandoahGeneration* ShenandoahHeap::generation_for(ShenandoahAffiliation affiliation) const {
2665   if (!mode()->is_generational()) {
2666     return global_generation();
2667   } else if (affiliation == YOUNG_GENERATION) {
2668     return young_generation();
2669   } else if (affiliation == OLD_GENERATION) {
2670     return old_generation();
2671   }
2672 
2673   ShouldNotReachHere();
2674   return nullptr;
2675 }
2676 
2677 void ShenandoahHeap::log_heap_status(const char* msg) const {
2678   if (mode()->is_generational()) {
2679     young_generation()->log_status(msg);
2680     old_generation()->log_status(msg);
2681   } else {
2682     global_generation()->log_status(msg);
2683   }
2684 }
< prev index next >