< prev index next >

src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp

Print this page

   1 /*
   2  * Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2013, 2022, Red Hat, Inc. All rights reserved.

   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "memory/allocation.hpp"
  28 #include "memory/universe.hpp"
  29 
  30 #include "gc/shared/classUnloadingContext.hpp"
  31 #include "gc/shared/fullGCForwarding.hpp"
  32 #include "gc/shared/gcArguments.hpp"
  33 #include "gc/shared/gcTimer.hpp"
  34 #include "gc/shared/gcTraceTime.inline.hpp"
  35 #include "gc/shared/locationPrinter.inline.hpp"
  36 #include "gc/shared/memAllocator.hpp"
  37 #include "gc/shared/plab.hpp"
  38 #include "gc/shared/tlab_globals.hpp"
  39 



  40 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  41 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
  42 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  43 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  44 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
  45 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  46 #include "gc/shenandoah/shenandoahControlThread.hpp"
  47 #include "gc/shenandoah/shenandoahFreeSet.hpp"



  48 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  49 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  50 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
  51 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  52 #include "gc/shenandoah/shenandoahInitLogger.hpp"
  53 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  54 #include "gc/shenandoah/shenandoahMemoryPool.hpp"
  55 #include "gc/shenandoah/shenandoahMetrics.hpp"
  56 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"

  57 #include "gc/shenandoah/shenandoahPacer.inline.hpp"
  58 #include "gc/shenandoah/shenandoahPadding.hpp"
  59 #include "gc/shenandoah/shenandoahParallelCleaning.inline.hpp"
  60 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
  61 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"

  62 #include "gc/shenandoah/shenandoahSTWMark.hpp"
  63 #include "gc/shenandoah/shenandoahUtils.hpp"
  64 #include "gc/shenandoah/shenandoahVerifier.hpp"
  65 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
  66 #include "gc/shenandoah/shenandoahVMOperations.hpp"
  67 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
  68 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"


  69 #include "gc/shenandoah/mode/shenandoahPassiveMode.hpp"
  70 #include "gc/shenandoah/mode/shenandoahSATBMode.hpp"


  71 #if INCLUDE_JFR
  72 #include "gc/shenandoah/shenandoahJfrSupport.hpp"
  73 #endif
  74 
  75 #include "cds/archiveHeapWriter.hpp"
  76 #include "classfile/systemDictionary.hpp"
  77 #include "code/codeCache.hpp"
  78 #include "memory/classLoaderMetaspace.hpp"
  79 #include "memory/metaspaceUtils.hpp"
  80 #include "nmt/mallocTracker.hpp"
  81 #include "nmt/memTracker.hpp"
  82 #include "oops/compressedOops.inline.hpp"
  83 #include "prims/jvmtiTagMap.hpp"
  84 #include "runtime/atomic.hpp"
  85 #include "runtime/globals.hpp"
  86 #include "runtime/interfaceSupport.inline.hpp"
  87 #include "runtime/java.hpp"
  88 #include "runtime/orderAccess.hpp"
  89 #include "runtime/safepointMechanism.hpp"
  90 #include "runtime/stackWatermarkSet.hpp"

 144 jint ShenandoahHeap::initialize() {
 145   //
 146   // Figure out heap sizing
 147   //
 148 
 149   size_t init_byte_size = InitialHeapSize;
 150   size_t min_byte_size  = MinHeapSize;
 151   size_t max_byte_size  = MaxHeapSize;
 152   size_t heap_alignment = HeapAlignment;
 153 
 154   size_t reg_size_bytes = ShenandoahHeapRegion::region_size_bytes();
 155 
 156   Universe::check_alignment(max_byte_size,  reg_size_bytes, "Shenandoah heap");
 157   Universe::check_alignment(init_byte_size, reg_size_bytes, "Shenandoah heap");
 158 
 159   _num_regions = ShenandoahHeapRegion::region_count();
 160   assert(_num_regions == (max_byte_size / reg_size_bytes),
 161          "Regions should cover entire heap exactly: " SIZE_FORMAT " != " SIZE_FORMAT "/" SIZE_FORMAT,
 162          _num_regions, max_byte_size, reg_size_bytes);
 163 
 164   // Now we know the number of regions, initialize the heuristics.
 165   initialize_heuristics();
 166 
 167   size_t num_committed_regions = init_byte_size / reg_size_bytes;
 168   num_committed_regions = MIN2(num_committed_regions, _num_regions);
 169   assert(num_committed_regions <= _num_regions, "sanity");
 170   _initial_size = num_committed_regions * reg_size_bytes;
 171 
 172   size_t num_min_regions = min_byte_size / reg_size_bytes;
 173   num_min_regions = MIN2(num_min_regions, _num_regions);
 174   assert(num_min_regions <= _num_regions, "sanity");
 175   _minimum_size = num_min_regions * reg_size_bytes;
 176 
 177   // Default to max heap size.
 178   _soft_max_size = _num_regions * reg_size_bytes;
 179 
 180   _committed = _initial_size;
 181 
 182   size_t heap_page_size   = UseLargePages ? os::large_page_size() : os::vm_page_size();
 183   size_t bitmap_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
 184   size_t region_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
 185 
 186   //

 200                                           heap_rs.size(), heap_rs.page_size());
 201 
 202 #if SHENANDOAH_OPTIMIZED_MARKTASK
 203   // The optimized ShenandoahMarkTask takes some bits away from the full object bits.
 204   // Fail if we ever attempt to address more than we can.
 205   if ((uintptr_t)heap_rs.end() >= ShenandoahMarkTask::max_addressable()) {
 206     FormatBuffer<512> buf("Shenandoah reserved [" PTR_FORMAT ", " PTR_FORMAT") for the heap, \n"
 207                           "but max object address is " PTR_FORMAT ". Try to reduce heap size, or try other \n"
 208                           "VM options that allocate heap at lower addresses (HeapBaseMinAddress, AllocateHeapAt, etc).",
 209                 p2i(heap_rs.base()), p2i(heap_rs.end()), ShenandoahMarkTask::max_addressable());
 210     vm_exit_during_initialization("Fatal Error", buf);
 211   }
 212 #endif
 213 
 214   ReservedSpace sh_rs = heap_rs.first_part(max_byte_size);
 215   if (!_heap_region_special) {
 216     os::commit_memory_or_exit(sh_rs.base(), _initial_size, heap_alignment, false,
 217                               "Cannot commit heap memory");
 218   }
 219 






















 220   //
 221   // Reserve and commit memory for bitmap(s)
 222   //
 223 
 224   size_t bitmap_size_orig = ShenandoahMarkBitMap::compute_size(heap_rs.size());
 225   _bitmap_size = align_up(bitmap_size_orig, bitmap_page_size);
 226 
 227   size_t bitmap_bytes_per_region = reg_size_bytes / ShenandoahMarkBitMap::heap_map_factor();
 228 
 229   guarantee(bitmap_bytes_per_region != 0,
 230             "Bitmap bytes per region should not be zero");
 231   guarantee(is_power_of_2(bitmap_bytes_per_region),
 232             "Bitmap bytes per region should be power of two: " SIZE_FORMAT, bitmap_bytes_per_region);
 233 
 234   if (bitmap_page_size > bitmap_bytes_per_region) {
 235     _bitmap_regions_per_slice = bitmap_page_size / bitmap_bytes_per_region;
 236     _bitmap_bytes_per_slice = bitmap_page_size;
 237   } else {
 238     _bitmap_regions_per_slice = 1;
 239     _bitmap_bytes_per_slice = bitmap_bytes_per_region;
 240   }
 241 
 242   guarantee(_bitmap_regions_per_slice >= 1,
 243             "Should have at least one region per slice: " SIZE_FORMAT,
 244             _bitmap_regions_per_slice);
 245 
 246   guarantee(((_bitmap_bytes_per_slice) % bitmap_page_size) == 0,
 247             "Bitmap slices should be page-granular: bps = " SIZE_FORMAT ", page size = " SIZE_FORMAT,
 248             _bitmap_bytes_per_slice, bitmap_page_size);
 249 
 250   ReservedSpace bitmap(_bitmap_size, bitmap_page_size);
 251   os::trace_page_sizes_for_requested_size("Mark Bitmap",
 252                                           bitmap_size_orig, bitmap_page_size,
 253                                           bitmap.base(),
 254                                           bitmap.size(), bitmap.page_size());
 255   MemTracker::record_virtual_memory_tag(bitmap.base(), mtGC);
 256   _bitmap_region = MemRegion((HeapWord*) bitmap.base(), bitmap.size() / HeapWordSize);
 257   _bitmap_region_special = bitmap.special();
 258 
 259   size_t bitmap_init_commit = _bitmap_bytes_per_slice *
 260                               align_up(num_committed_regions, _bitmap_regions_per_slice) / _bitmap_regions_per_slice;
 261   bitmap_init_commit = MIN2(_bitmap_size, bitmap_init_commit);
 262   if (!_bitmap_region_special) {
 263     os::commit_memory_or_exit((char *) _bitmap_region.start(), bitmap_init_commit, bitmap_page_size, false,
 264                               "Cannot commit bitmap memory");
 265   }
 266 
 267   _marking_context = new ShenandoahMarkingContext(_heap_region, _bitmap_region, _num_regions, _max_workers);
 268 
 269   if (ShenandoahVerify) {
 270     ReservedSpace verify_bitmap(_bitmap_size, bitmap_page_size);
 271     os::trace_page_sizes_for_requested_size("Verify Bitmap",
 272                                             bitmap_size_orig, bitmap_page_size,
 273                                             verify_bitmap.base(),
 274                                             verify_bitmap.size(), verify_bitmap.page_size());
 275     if (!verify_bitmap.special()) {
 276       os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), bitmap_page_size, false,
 277                                 "Cannot commit verification bitmap memory");
 278     }
 279     MemTracker::record_virtual_memory_tag(verify_bitmap.base(), mtGC);
 280     MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize);
 281     _verification_bit_map.initialize(_heap_region, verify_bitmap_region);
 282     _verifier = new ShenandoahVerifier(this, &_verification_bit_map);
 283   }
 284 
 285   // Reserve aux bitmap for use in object_iterate(). We don't commit it here.
 286   size_t aux_bitmap_page_size = bitmap_page_size;
 287 

 331       assert(is_aligned(req_addr, cset_align), "Should be aligned");
 332       cset_rs = ReservedSpace(cset_size, cset_align, cset_page_size, req_addr);
 333       if (cset_rs.is_reserved()) {
 334         assert(cset_rs.base() == req_addr, "Allocated where requested: " PTR_FORMAT ", " PTR_FORMAT, p2i(cset_rs.base()), addr);
 335         _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
 336         break;
 337       }
 338     }
 339 
 340     if (_collection_set == nullptr) {
 341       cset_rs = ReservedSpace(cset_size, cset_align, os::vm_page_size());
 342       _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
 343     }
 344     os::trace_page_sizes_for_requested_size("Collection Set",
 345                                             cset_size, cset_page_size,
 346                                             cset_rs.base(),
 347                                             cset_rs.size(), cset_rs.page_size());
 348   }
 349 
 350   _regions = NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, _num_regions, mtGC);

 351   _free_set = new ShenandoahFreeSet(this, _num_regions);
 352 
 353   {
 354     ShenandoahHeapLocker locker(lock());
 355 
 356     for (size_t i = 0; i < _num_regions; i++) {
 357       HeapWord* start = (HeapWord*)sh_rs.base() + ShenandoahHeapRegion::region_size_words() * i;
 358       bool is_committed = i < num_committed_regions;
 359       void* loc = region_storage.base() + i * region_align;
 360 
 361       ShenandoahHeapRegion* r = new (loc) ShenandoahHeapRegion(start, i, is_committed);
 362       assert(is_aligned(r, SHENANDOAH_CACHE_LINE_SIZE), "Sanity");
 363 
 364       _marking_context->initialize_top_at_mark_start(r);
 365       _regions[i] = r;
 366       assert(!collection_set()->is_in(i), "New region should not be in collection set");


 367     }
 368 
 369     // Initialize to complete
 370     _marking_context->mark_complete();

 371 
 372     _free_set->rebuild();



 373   }
 374 
 375   if (AlwaysPreTouch) {
 376     // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads,
 377     // before initialize() below zeroes it with initializing thread. For any given region,
 378     // we touch the region and the corresponding bitmaps from the same thread.
 379     ShenandoahPushWorkerScope scope(workers(), _max_workers, false);
 380 
 381     _pretouch_heap_page_size = heap_page_size;
 382     _pretouch_bitmap_page_size = bitmap_page_size;
 383 
 384     // OS memory managers may want to coalesce back-to-back pages. Make their jobs
 385     // simpler by pre-touching continuous spaces (heap and bitmap) separately.
 386 
 387     ShenandoahPretouchBitmapTask bcl(bitmap.base(), _bitmap_size, _pretouch_bitmap_page_size);
 388     _workers->run_task(&bcl);
 389 
 390     ShenandoahPretouchHeapTask hcl(_pretouch_heap_page_size);
 391     _workers->run_task(&hcl);
 392   }

 401     Copy::fill_to_bytes(_liveness_cache[worker], _num_regions * sizeof(ShenandoahLiveData));
 402   }
 403 
 404   // There should probably be Shenandoah-specific options for these,
 405   // just as there are G1-specific options.
 406   {
 407     ShenandoahSATBMarkQueueSet& satbqs = ShenandoahBarrierSet::satb_mark_queue_set();
 408     satbqs.set_process_completed_buffers_threshold(20); // G1SATBProcessCompletedThreshold
 409     satbqs.set_buffer_enqueue_threshold_percentage(60); // G1SATBBufferEnqueueingThresholdPercent
 410   }
 411 
 412   _monitoring_support = new ShenandoahMonitoringSupport(this);
 413   _phase_timings = new ShenandoahPhaseTimings(max_workers());
 414   ShenandoahCodeRoots::initialize();
 415 
 416   if (ShenandoahPacing) {
 417     _pacer = new ShenandoahPacer(this);
 418     _pacer->setup_for_idle();
 419   }
 420 
 421   _control_thread = new ShenandoahControlThread();
 422 
 423   ShenandoahInitLogger::print();
 424 
 425   FullGCForwarding::initialize(_heap_region);
 426 
 427   return JNI_OK;
 428 }
 429 








 430 void ShenandoahHeap::initialize_mode() {
 431   if (ShenandoahGCMode != nullptr) {
 432     if (strcmp(ShenandoahGCMode, "satb") == 0) {
 433       _gc_mode = new ShenandoahSATBMode();
 434     } else if (strcmp(ShenandoahGCMode, "passive") == 0) {
 435       _gc_mode = new ShenandoahPassiveMode();


 436     } else {
 437       vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option");
 438     }
 439   } else {
 440     vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option (null)");
 441   }
 442   _gc_mode->initialize_flags();
 443   if (_gc_mode->is_diagnostic() && !UnlockDiagnosticVMOptions) {
 444     vm_exit_during_initialization(
 445             err_msg("GC mode \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
 446                     _gc_mode->name()));
 447   }
 448   if (_gc_mode->is_experimental() && !UnlockExperimentalVMOptions) {
 449     vm_exit_during_initialization(
 450             err_msg("GC mode \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
 451                     _gc_mode->name()));
 452   }
 453 }
 454 
 455 void ShenandoahHeap::initialize_heuristics() {
 456   assert(_gc_mode != nullptr, "Must be initialized");
 457   _heuristics = _gc_mode->initialize_heuristics();
 458 
 459   if (_heuristics->is_diagnostic() && !UnlockDiagnosticVMOptions) {
 460     vm_exit_during_initialization(
 461             err_msg("Heuristics \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
 462                     _heuristics->name()));
 463   }
 464   if (_heuristics->is_experimental() && !UnlockExperimentalVMOptions) {
 465     vm_exit_during_initialization(
 466             err_msg("Heuristics \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
 467                     _heuristics->name()));
 468   }
 469 }
 470 
 471 #ifdef _MSC_VER
 472 #pragma warning( push )
 473 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
 474 #endif
 475 
 476 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
 477   CollectedHeap(),


 478   _initial_size(0),
 479   _used(0),
 480   _committed(0),
 481   _bytes_allocated_since_gc_start(0),
 482   _max_workers(MAX2(ConcGCThreads, ParallelGCThreads)),
 483   _workers(nullptr),
 484   _safepoint_workers(nullptr),
 485   _heap_region_special(false),
 486   _num_regions(0),
 487   _regions(nullptr),
 488   _update_refs_iterator(this),
 489   _gc_state_changed(false),
 490   _gc_no_progress_count(0),



 491   _control_thread(nullptr),


 492   _shenandoah_policy(policy),
 493   _gc_mode(nullptr),
 494   _heuristics(nullptr),
 495   _free_set(nullptr),
 496   _pacer(nullptr),
 497   _verifier(nullptr),
 498   _phase_timings(nullptr),

 499   _monitoring_support(nullptr),
 500   _memory_pool(nullptr),
 501   _stw_memory_manager("Shenandoah Pauses"),
 502   _cycle_memory_manager("Shenandoah Cycles"),
 503   _gc_timer(new ConcurrentGCTimer()),
 504   _log_min_obj_alignment_in_bytes(LogMinObjAlignmentInBytes),
 505   _ref_processor(new ShenandoahReferenceProcessor(MAX2(_max_workers, 1U))),
 506   _marking_context(nullptr),
 507   _bitmap_size(0),
 508   _bitmap_regions_per_slice(0),
 509   _bitmap_bytes_per_slice(0),
 510   _bitmap_region_special(false),
 511   _aux_bitmap_region_special(false),
 512   _liveness_cache(nullptr),
 513   _collection_set(nullptr)
 514 {
 515   // Initialize GC mode early, so we can adjust barrier support
 516   initialize_mode();
 517   BarrierSet::set_barrier_set(new ShenandoahBarrierSet(this));
 518 
 519   _max_workers = MAX2(_max_workers, 1U);
 520   _workers = new ShenandoahWorkerThreads("Shenandoah GC Threads", _max_workers);
 521   if (_workers == nullptr) {
 522     vm_exit_during_initialization("Failed necessary allocation.");
 523   } else {
 524     _workers->initialize_workers();
 525   }
 526 
 527   if (ParallelGCThreads > 1) {
 528     _safepoint_workers = new ShenandoahWorkerThreads("Safepoint Cleanup Thread",
 529                                                 ParallelGCThreads);
 530     _safepoint_workers->initialize_workers();
 531   }
 532 }
 533 
 534 #ifdef _MSC_VER
 535 #pragma warning( pop )
 536 #endif
 537 
 538 class ShenandoahResetBitmapTask : public WorkerTask {
 539 private:
 540   ShenandoahRegionIterator _regions;
 541 
 542 public:
 543   ShenandoahResetBitmapTask() :
 544     WorkerTask("Shenandoah Reset Bitmap") {}
 545 
 546   void work(uint worker_id) {
 547     ShenandoahHeapRegion* region = _regions.next();
 548     ShenandoahHeap* heap = ShenandoahHeap::heap();
 549     ShenandoahMarkingContext* const ctx = heap->marking_context();
 550     while (region != nullptr) {
 551       if (heap->is_bitmap_slice_committed(region)) {
 552         ctx->clear_bitmap(region);
 553       }
 554       region = _regions.next();
 555     }
 556   }
 557 };
 558 
 559 void ShenandoahHeap::reset_mark_bitmap() {
 560   assert_gc_workers(_workers->active_workers());
 561   mark_incomplete_marking_context();
 562 
 563   ShenandoahResetBitmapTask task;
 564   _workers->run_task(&task);
 565 }
 566 
 567 void ShenandoahHeap::print_on(outputStream* st) const {
 568   st->print_cr("Shenandoah Heap");
 569   st->print_cr(" " SIZE_FORMAT "%s max, " SIZE_FORMAT "%s soft max, " SIZE_FORMAT "%s committed, " SIZE_FORMAT "%s used",
 570                byte_size_in_proper_unit(max_capacity()), proper_unit_for_byte_size(max_capacity()),
 571                byte_size_in_proper_unit(soft_max_capacity()), proper_unit_for_byte_size(soft_max_capacity()),
 572                byte_size_in_proper_unit(committed()),    proper_unit_for_byte_size(committed()),
 573                byte_size_in_proper_unit(used()),         proper_unit_for_byte_size(used()));
 574   st->print_cr(" " SIZE_FORMAT " x " SIZE_FORMAT"%s regions",
 575                num_regions(),
 576                byte_size_in_proper_unit(ShenandoahHeapRegion::region_size_bytes()),
 577                proper_unit_for_byte_size(ShenandoahHeapRegion::region_size_bytes()));
 578 
 579   st->print("Status: ");
 580   if (has_forwarded_objects())                 st->print("has forwarded objects, ");
 581   if (is_concurrent_mark_in_progress())        st->print("marking, ");





 582   if (is_evacuation_in_progress())             st->print("evacuating, ");
 583   if (is_update_refs_in_progress())            st->print("updating refs, ");
 584   if (is_degenerated_gc_in_progress())         st->print("degenerated gc, ");
 585   if (is_full_gc_in_progress())                st->print("full gc, ");
 586   if (is_full_gc_move_in_progress())           st->print("full gc move, ");
 587   if (is_concurrent_weak_root_in_progress())   st->print("concurrent weak roots, ");
 588   if (is_concurrent_strong_root_in_progress() &&
 589       !is_concurrent_weak_root_in_progress())  st->print("concurrent strong roots, ");
 590 
 591   if (cancelled_gc()) {
 592     st->print("cancelled");
 593   } else {
 594     st->print("not cancelled");
 595   }
 596   st->cr();
 597 
 598   st->print_cr("Reserved region:");
 599   st->print_cr(" - [" PTR_FORMAT ", " PTR_FORMAT ") ",
 600                p2i(reserved_region().start()),
 601                p2i(reserved_region().end()));

 612   st->cr();
 613   MetaspaceUtils::print_on(st);
 614 
 615   if (Verbose) {
 616     st->cr();
 617     print_heap_regions_on(st);
 618   }
 619 }
 620 
 621 class ShenandoahInitWorkerGCLABClosure : public ThreadClosure {
 622 public:
 623   void do_thread(Thread* thread) {
 624     assert(thread != nullptr, "Sanity");
 625     assert(thread->is_Worker_thread(), "Only worker thread expected");
 626     ShenandoahThreadLocalData::initialize_gclab(thread);
 627   }
 628 };
 629 
 630 void ShenandoahHeap::post_initialize() {
 631   CollectedHeap::post_initialize();


 632   MutexLocker ml(Threads_lock);
 633 
 634   ShenandoahInitWorkerGCLABClosure init_gclabs;
 635   _workers->threads_do(&init_gclabs);
 636 
 637   // gclab can not be initialized early during VM startup, as it can not determinate its max_size.
 638   // Now, we will let WorkerThreads to initialize gclab when new worker is created.
 639   _workers->set_initialize_gclab();
 640   if (_safepoint_workers != nullptr) {
 641     _safepoint_workers->threads_do(&init_gclabs);
 642     _safepoint_workers->set_initialize_gclab();
 643   }
 644 
 645   _heuristics->initialize();
 646 
 647   JFR_ONLY(ShenandoahJFRSupport::register_jfr_type_serializers();)
 648 }
 649 




 650 size_t ShenandoahHeap::used() const {
 651   return Atomic::load(&_used);
 652 }
 653 
 654 size_t ShenandoahHeap::committed() const {
 655   return Atomic::load(&_committed);
 656 }
 657 
 658 size_t ShenandoahHeap::available() const {
 659   return free_set()->available();
 660 }
 661 
 662 void ShenandoahHeap::increase_committed(size_t bytes) {
 663   shenandoah_assert_heaplocked_or_safepoint();
 664   _committed += bytes;
 665 }
 666 
 667 void ShenandoahHeap::decrease_committed(size_t bytes) {
 668   shenandoah_assert_heaplocked_or_safepoint();
 669   _committed -= bytes;
 670 }
 671 
 672 void ShenandoahHeap::increase_used(size_t bytes) {
 673   Atomic::add(&_used, bytes, memory_order_relaxed);









































 674 }
 675 
 676 void ShenandoahHeap::set_used(size_t bytes) {
 677   Atomic::store(&_used, bytes);



 678 }
 679 
 680 void ShenandoahHeap::decrease_used(size_t bytes) {
 681   assert(used() >= bytes, "never decrease heap size by more than we've left");
 682   Atomic::sub(&_used, bytes, memory_order_relaxed);


 683 }
 684 
 685 void ShenandoahHeap::increase_allocated(size_t bytes) {
 686   Atomic::add(&_bytes_allocated_since_gc_start, bytes, memory_order_relaxed);



 687 }
 688 
 689 void ShenandoahHeap::notify_mutator_alloc_words(size_t words, bool waste) {
 690   size_t bytes = words * HeapWordSize;
 691   if (!waste) {
 692     increase_used(bytes);
 693   }
 694   increase_allocated(bytes);


 695   if (ShenandoahPacing) {
 696     control_thread()->pacing_notify_alloc(words);
 697     if (waste) {
 698       pacer()->claim_for_alloc<true>(words);
 699     }
 700   }
 701 }
 702 
 703 size_t ShenandoahHeap::capacity() const {
 704   return committed();
 705 }
 706 
 707 size_t ShenandoahHeap::max_capacity() const {
 708   return _num_regions * ShenandoahHeapRegion::region_size_bytes();
 709 }
 710 
 711 size_t ShenandoahHeap::soft_max_capacity() const {
 712   size_t v = Atomic::load(&_soft_max_size);
 713   assert(min_capacity() <= v && v <= max_capacity(),
 714          "Should be in bounds: " SIZE_FORMAT " <= " SIZE_FORMAT " <= " SIZE_FORMAT,
 715          min_capacity(), v, max_capacity());
 716   return v;
 717 }
 718 

 808   size_t old_soft_max = soft_max_capacity();
 809   if (new_soft_max != old_soft_max) {
 810     new_soft_max = MAX2(min_capacity(), new_soft_max);
 811     new_soft_max = MIN2(max_capacity(), new_soft_max);
 812     if (new_soft_max != old_soft_max) {
 813       log_info(gc)("Soft Max Heap Size: " SIZE_FORMAT "%s -> " SIZE_FORMAT "%s",
 814                    byte_size_in_proper_unit(old_soft_max), proper_unit_for_byte_size(old_soft_max),
 815                    byte_size_in_proper_unit(new_soft_max), proper_unit_for_byte_size(new_soft_max)
 816       );
 817       set_soft_max_capacity(new_soft_max);
 818       return true;
 819     }
 820   }
 821   return false;
 822 }
 823 
 824 void ShenandoahHeap::notify_heap_changed() {
 825   // Update monitoring counters when we took a new region. This amortizes the
 826   // update costs on slow path.
 827   monitoring_support()->notify_heap_changed();
 828 
 829   // This is called from allocation path, and thus should be fast.
 830   _heap_changed.try_set();
 831 }
 832 
 833 void ShenandoahHeap::set_forced_counters_update(bool value) {
 834   monitoring_support()->set_forced_counters_update(value);
 835 }
 836 
 837 void ShenandoahHeap::handle_force_counters_update() {
 838   monitoring_support()->handle_force_counters_update();
 839 }
 840 
 841 HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) {
 842   // New object should fit the GCLAB size
 843   size_t min_size = MAX2(size, PLAB::min_size());
 844 
 845   // Figure out size of new GCLAB, looking back at heuristics. Expand aggressively.
 846   size_t new_size = ShenandoahThreadLocalData::gclab_size(thread) * 2;

 847   new_size = MIN2(new_size, PLAB::max_size());
 848   new_size = MAX2(new_size, PLAB::min_size());
 849 
 850   // Record new heuristic value even if we take any shortcut. This captures
 851   // the case when moderately-sized objects always take a shortcut. At some point,
 852   // heuristics should catch up with them.

 853   ShenandoahThreadLocalData::set_gclab_size(thread, new_size);
 854 
 855   if (new_size < size) {
 856     // New size still does not fit the object. Fall back to shared allocation.
 857     // This avoids retiring perfectly good GCLABs, when we encounter a large object.

 858     return nullptr;
 859   }
 860 
 861   // Retire current GCLAB, and allocate a new one.
 862   PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
 863   gclab->retire();
 864 
 865   size_t actual_size = 0;
 866   HeapWord* gclab_buf = allocate_new_gclab(min_size, new_size, &actual_size);
 867   if (gclab_buf == nullptr) {
 868     return nullptr;
 869   }
 870 
 871   assert (size <= actual_size, "allocation should fit");
 872 
 873   // ...and clear or zap just allocated TLAB, if needed.
 874   if (ZeroTLAB) {
 875     Copy::zero_to_words(gclab_buf, actual_size);
 876   } else if (ZapTLAB) {
 877     // Skip mangling the space corresponding to the object header to
 878     // ensure that the returned space is not considered parsable by
 879     // any concurrent GC thread.
 880     size_t hdr_size = oopDesc::header_size();
 881     Copy::fill_to_words(gclab_buf + hdr_size, actual_size - hdr_size, badHeapWordVal);
 882   }
 883   gclab->set_buf(gclab_buf, actual_size);
 884   return gclab->allocate(size);
 885 }
 886 

 887 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t min_size,
 888                                             size_t requested_size,
 889                                             size_t* actual_size) {
 890   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_tlab(min_size, requested_size);
 891   HeapWord* res = allocate_memory(req);
 892   if (res != nullptr) {
 893     *actual_size = req.actual_size();
 894   } else {
 895     *actual_size = 0;
 896   }
 897   return res;
 898 }
 899 
 900 HeapWord* ShenandoahHeap::allocate_new_gclab(size_t min_size,
 901                                              size_t word_size,
 902                                              size_t* actual_size) {
 903   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_gclab(min_size, word_size);
 904   HeapWord* res = allocate_memory(req);
 905   if (res != nullptr) {
 906     *actual_size = req.actual_size();

 915   bool in_new_region = false;
 916   HeapWord* result = nullptr;
 917 
 918   if (req.is_mutator_alloc()) {
 919     if (ShenandoahPacing) {
 920       pacer()->pace_for_alloc(req.size());
 921       pacer_epoch = pacer()->epoch();
 922     }
 923 
 924     if (!ShenandoahAllocFailureALot || !should_inject_alloc_failure()) {
 925       result = allocate_memory_under_lock(req, in_new_region);
 926     }
 927 
 928     // Check that gc overhead is not exceeded.
 929     //
 930     // Shenandoah will grind along for quite a while allocating one
 931     // object at a time using shared (non-tlab) allocations. This check
 932     // is testing that the GC overhead limit has not been exceeded.
 933     // This will notify the collector to start a cycle, but will raise
 934     // an OOME to the mutator if the last Full GCs have not made progress.

 935     if (result == nullptr && !req.is_lab_alloc() && get_gc_no_progress_count() > ShenandoahNoProgressThreshold) {
 936       control_thread()->handle_alloc_failure(req, false);

 937       return nullptr;
 938     }
 939 
 940     if (result == nullptr) {
 941       // Block until control thread reacted, then retry allocation.
 942       //
 943       // It might happen that one of the threads requesting allocation would unblock
 944       // way later after GC happened, only to fail the second allocation, because
 945       // other threads have already depleted the free storage. In this case, a better
 946       // strategy is to try again, until at least one full GC has completed.
 947       //
 948       // Stop retrying and return nullptr to cause OOMError exception if our allocation failed even after:
 949       //   a) We experienced a GC that had good progress, or
 950       //   b) We experienced at least one Full GC (whether or not it had good progress)
 951       //
 952       // TODO: Consider GLOBAL GC rather than Full GC to remediate OOM condition: https://bugs.openjdk.org/browse/JDK-8335910
 953 
 954       size_t original_count = shenandoah_policy()->full_gc_count();
 955       while ((result == nullptr) && (original_count == shenandoah_policy()->full_gc_count())) {
 956         control_thread()->handle_alloc_failure(req, true);
 957         result = allocate_memory_under_lock(req, in_new_region);
 958       }
 959       if (result != nullptr) {
 960         // If our allocation request has been satisifed after it initially failed, we count this as good gc progress
 961         notify_gc_progress();
 962       }
 963       if (log_is_enabled(Debug, gc, alloc)) {
 964         ResourceMark rm;
 965         log_debug(gc, alloc)("Thread: %s, Result: " PTR_FORMAT ", Request: %s, Size: " SIZE_FORMAT
 966                              ", Original: " SIZE_FORMAT ", Latest: " SIZE_FORMAT,
 967                              Thread::current()->name(), p2i(result), req.type_string(), req.size(),
 968                              original_count, get_gc_no_progress_count());
 969       }
 970     }
 971   } else {
 972     assert(req.is_gc_alloc(), "Can only accept GC allocs here");
 973     result = allocate_memory_under_lock(req, in_new_region);
 974     // Do not call handle_alloc_failure() here, because we cannot block.
 975     // The allocation failure would be handled by the LRB slowpath with handle_alloc_failure_evac().
 976   }
 977 
 978   if (in_new_region) {
 979     notify_heap_changed();
 980   }
 981 








 982   if (result != nullptr) {
 983     size_t requested = req.size();
 984     size_t actual = req.actual_size();
 985 
 986     assert (req.is_lab_alloc() || (requested == actual),
 987             "Only LAB allocations are elastic: %s, requested = " SIZE_FORMAT ", actual = " SIZE_FORMAT,
 988             ShenandoahAllocRequest::alloc_type_to_string(req.type()), requested, actual);
 989 
 990     if (req.is_mutator_alloc()) {
 991       notify_mutator_alloc_words(actual, false);
 992 
 993       // If we requested more than we were granted, give the rest back to pacer.
 994       // This only matters if we are in the same pacing epoch: do not try to unpace
 995       // over the budget for the other phase.
 996       if (ShenandoahPacing && (pacer_epoch > 0) && (requested > actual)) {
 997         pacer()->unpace_for_alloc(pacer_epoch, requested - actual);
 998       }
 999     } else {
1000       increase_used(actual*HeapWordSize);
1001     }
1002   }
1003 
1004   return result;
1005 }
1006 
1007 HeapWord* ShenandoahHeap::allocate_memory_under_lock(ShenandoahAllocRequest& req, bool& in_new_region) {
1008   // If we are dealing with mutator allocation, then we may need to block for safepoint.
1009   // We cannot block for safepoint for GC allocations, because there is a high chance
1010   // we are already running at safepoint or from stack watermark machinery, and we cannot
1011   // block again.
1012   ShenandoahHeapLocker locker(lock(), req.is_mutator_alloc());
1013   return _free_set->allocate(req, in_new_region);




































1014 }
1015 
1016 HeapWord* ShenandoahHeap::mem_allocate(size_t size,
1017                                         bool*  gc_overhead_limit_was_exceeded) {
1018   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared(size);
1019   return allocate_memory(req);
1020 }
1021 
1022 MetaWord* ShenandoahHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
1023                                                              size_t size,
1024                                                              Metaspace::MetadataType mdtype) {
1025   MetaWord* result;
1026 
1027   // Inform metaspace OOM to GC heuristics if class unloading is possible.
1028   if (heuristics()->can_unload_classes()) {
1029     ShenandoahHeuristics* h = heuristics();
1030     h->record_metaspace_oom();
1031   }
1032 
1033   // Expand and retry allocation
1034   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
1035   if (result != nullptr) {
1036     return result;
1037   }
1038 
1039   // Start full GC
1040   collect(GCCause::_metadata_GC_clear_soft_refs);
1041 
1042   // Retry allocation
1043   result = loader_data->metaspace_non_null()->allocate(size, mdtype);
1044   if (result != nullptr) {
1045     return result;
1046   }
1047 
1048   // Expand and retry allocation
1049   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);

1107       assert(r->has_live(), "Region " SIZE_FORMAT " should have been reclaimed early", r->index());
1108       _sh->marked_object_iterate(r, &cl);
1109 
1110       if (ShenandoahPacing) {
1111         _sh->pacer()->report_evac(r->used() >> LogHeapWordSize);
1112       }
1113 
1114       if (_sh->check_cancelled_gc_and_yield(_concurrent)) {
1115         break;
1116       }
1117     }
1118   }
1119 };
1120 
1121 void ShenandoahHeap::evacuate_collection_set(bool concurrent) {
1122   ShenandoahEvacuationTask task(this, _collection_set, concurrent);
1123   workers()->run_task(&task);
1124 }
1125 
1126 oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) {
1127   if (ShenandoahThreadLocalData::is_oom_during_evac(Thread::current())) {
1128     // This thread went through the OOM during evac protocol and it is safe to return
1129     // the forward pointer. It must not attempt to evacuate any more.

1130     return ShenandoahBarrierSet::resolve_forwarded(p);
1131   }
1132 
1133   assert(ShenandoahThreadLocalData::is_evac_allowed(thread), "must be enclosed in oom-evac scope");
1134 
1135   size_t size = ShenandoahForwarding::size(p);

1136 
1137   assert(!heap_region_containing(p)->is_humongous(), "never evacuate humongous objects");


1138 
1139   bool alloc_from_gclab = true;




1140   HeapWord* copy = nullptr;

1141 
1142 #ifdef ASSERT
1143   if (ShenandoahOOMDuringEvacALot &&
1144       (os::random() & 1) == 0) { // Simulate OOM every ~2nd slow-path call
1145     copy = nullptr;
1146   } else {
1147 #endif
1148     if (UseTLAB) {
1149       copy = allocate_from_gclab(thread, size);
1150     }
1151     if (copy == nullptr) {
1152       ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared_gc(size);

1153       copy = allocate_memory(req);
1154       alloc_from_gclab = false;
1155     }
1156 #ifdef ASSERT
1157   }
1158 #endif
1159 
1160   if (copy == nullptr) {
1161     control_thread()->handle_alloc_failure_evac(size);
1162 
1163     _oom_evac_handler.handle_out_of_memory_during_evacuation();
1164 
1165     return ShenandoahBarrierSet::resolve_forwarded(p);
1166   }
1167 
1168   // Copy the object:
1169   Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(p), copy, size);
1170 
1171   // Try to install the new forwarding pointer.
1172   oop copy_val = cast_to_oop(copy);
1173   oop result = ShenandoahForwarding::try_update_forwardee(p, copy_val);
1174   if (result == copy_val) {
1175     // Successfully evacuated. Our copy is now the public one!
1176     ContinuationGCSupport::relativize_stack_chunk(copy_val);
1177     shenandoah_assert_correct(nullptr, copy_val);
1178     return copy_val;
1179   }  else {
1180     // Failed to evacuate. We need to deal with the object that is left behind. Since this
1181     // new allocation is certainly after TAMS, it will be considered live in the next cycle.
1182     // But if it happens to contain references to evacuated regions, those references would
1183     // not get updated for this stale copy during this cycle, and we will crash while scanning
1184     // it the next cycle.
1185     //
1186     // For GCLAB allocations, it is enough to rollback the allocation ptr. Either the next
1187     // object will overwrite this stale copy, or the filler object on LAB retirement will
1188     // do this. For non-GCLAB allocations, we have no way to retract the allocation, and
1189     // have to explicitly overwrite the copy with the filler object. With that overwrite,
1190     // we have to keep the fwdptr initialized and pointing to our (stale) copy.
1191     if (alloc_from_gclab) {
1192       ShenandoahThreadLocalData::gclab(thread)->undo_allocation(copy, size);
1193     } else {




1194       fill_with_object(copy, size);
1195       shenandoah_assert_correct(nullptr, copy_val);

1196     }
1197     shenandoah_assert_correct(nullptr, result);
1198     return result;
1199   }
1200 }
1201 
1202 void ShenandoahHeap::trash_cset_regions() {
1203   ShenandoahHeapLocker locker(lock());
1204 
1205   ShenandoahCollectionSet* set = collection_set();
1206   ShenandoahHeapRegion* r;
1207   set->clear_current_index();
1208   while ((r = set->next()) != nullptr) {
1209     r->make_trash();
1210   }
1211   collection_set()->clear();
1212 }
1213 
1214 void ShenandoahHeap::print_heap_regions_on(outputStream* st) const {
1215   st->print_cr("Heap Regions:");
1216   st->print_cr("Region state: EU=empty-uncommitted, EC=empty-committed, R=regular, H=humongous start, HP=pinned humongous start");
1217   st->print_cr("              HC=humongous continuation, CS=collection set, TR=trash, P=pinned, CSP=pinned collection set");
1218   st->print_cr("BTE=bottom/top/end, TAMS=top-at-mark-start");
1219   st->print_cr("UWM=update watermark, U=used");
1220   st->print_cr("T=TLAB allocs, G=GCLAB allocs");
1221   st->print_cr("S=shared allocs, L=live data");
1222   st->print_cr("CP=critical pins");
1223 
1224   for (size_t i = 0; i < num_regions(); i++) {
1225     get_region(i)->print_on(st);
1226   }
1227 }
1228 
1229 void ShenandoahHeap::trash_humongous_region_at(ShenandoahHeapRegion* start) {
1230   assert(start->is_humongous_start(), "reclaim regions starting with the first one");
1231 
1232   oop humongous_obj = cast_to_oop(start->bottom());
1233   size_t size = humongous_obj->size();
1234   size_t required_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize);
1235   size_t index = start->index() + required_regions - 1;
1236 
1237   assert(!start->has_live(), "liveness must be zero");
1238 
1239   for(size_t i = 0; i < required_regions; i++) {
1240     // Reclaim from tail. Otherwise, assertion fails when printing region to trace log,
1241     // as it expects that every region belongs to a humongous region starting with a humongous start region.
1242     ShenandoahHeapRegion* region = get_region(index --);
1243 
1244     assert(region->is_humongous(), "expect correct humongous start or continuation");
1245     assert(!region->is_cset(), "Humongous region should not be in collection set");
1246 
1247     region->make_trash_immediate();
1248   }

1249 }
1250 
1251 class ShenandoahCheckCleanGCLABClosure : public ThreadClosure {
1252 public:
1253   ShenandoahCheckCleanGCLABClosure() {}
1254   void do_thread(Thread* thread) {
1255     PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1256     assert(gclab != nullptr, "GCLAB should be initialized for %s", thread->name());
1257     assert(gclab->words_remaining() == 0, "GCLAB should not need retirement");






1258   }
1259 };
1260 
1261 class ShenandoahRetireGCLABClosure : public ThreadClosure {
1262 private:
1263   bool const _resize;
1264 public:
1265   ShenandoahRetireGCLABClosure(bool resize) : _resize(resize) {}
1266   void do_thread(Thread* thread) {
1267     PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1268     assert(gclab != nullptr, "GCLAB should be initialized for %s", thread->name());
1269     gclab->retire();
1270     if (_resize && ShenandoahThreadLocalData::gclab_size(thread) > 0) {
1271       ShenandoahThreadLocalData::set_gclab_size(thread, 0);
1272     }













1273   }
1274 };
1275 
1276 void ShenandoahHeap::labs_make_parsable() {
1277   assert(UseTLAB, "Only call with UseTLAB");
1278 
1279   ShenandoahRetireGCLABClosure cl(false);
1280 
1281   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1282     ThreadLocalAllocBuffer& tlab = t->tlab();
1283     tlab.make_parsable();
1284     cl.do_thread(t);
1285   }
1286 
1287   workers()->threads_do(&cl);
1288 }
1289 
1290 void ShenandoahHeap::tlabs_retire(bool resize) {
1291   assert(UseTLAB, "Only call with UseTLAB");
1292   assert(!resize || ResizeTLAB, "Only call for resize when ResizeTLAB is enabled");

1386 }
1387 
1388 void ShenandoahHeap::print_tracing_info() const {
1389   LogTarget(Info, gc, stats) lt;
1390   if (lt.is_enabled()) {
1391     ResourceMark rm;
1392     LogStream ls(lt);
1393 
1394     phase_timings()->print_global_on(&ls);
1395 
1396     ls.cr();
1397     ls.cr();
1398 
1399     shenandoah_policy()->print_gc_stats(&ls);
1400 
1401     ls.cr();
1402     ls.cr();
1403   }
1404 }
1405 








































1406 void ShenandoahHeap::verify(VerifyOption vo) {
1407   if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
1408     if (ShenandoahVerify) {
1409       verifier()->verify_generic(vo);
1410     } else {
1411       // TODO: Consider allocating verification bitmaps on demand,
1412       // and turn this on unconditionally.
1413     }
1414   }
1415 }
1416 size_t ShenandoahHeap::tlab_capacity(Thread *thr) const {
1417   return _free_set->capacity();
1418 }
1419 
1420 class ObjectIterateScanRootClosure : public BasicOopIterateClosure {
1421 private:
1422   MarkBitMap* _bitmap;
1423   ShenandoahScanObjectStack* _oop_stack;
1424   ShenandoahHeap* const _heap;
1425   ShenandoahMarkingContext* const _marking_context;

1735   } else {
1736     heap_region_iterate(blk);
1737   }
1738 }
1739 
1740 class ShenandoahRendezvousClosure : public HandshakeClosure {
1741 public:
1742   inline ShenandoahRendezvousClosure(const char* name) : HandshakeClosure(name) {}
1743   inline void do_thread(Thread* thread) {}
1744 };
1745 
1746 void ShenandoahHeap::rendezvous_threads(const char* name) {
1747   ShenandoahRendezvousClosure cl(name);
1748   Handshake::execute(&cl);
1749 }
1750 
1751 void ShenandoahHeap::recycle_trash() {
1752   free_set()->recycle_trash();
1753 }
1754 
1755 class ShenandoahResetUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
1756 private:
1757   ShenandoahMarkingContext* const _ctx;
1758 public:
1759   ShenandoahResetUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
1760 
1761   void heap_region_do(ShenandoahHeapRegion* r) {
1762     if (r->is_active()) {
1763       // Reset live data and set TAMS optimistically. We would recheck these under the pause
1764       // anyway to capture any updates that happened since now.
1765       r->clear_live_data();
1766       _ctx->capture_top_at_mark_start(r);
1767     }
1768   }
1769 
1770   bool is_thread_safe() { return true; }
1771 };
1772 
1773 void ShenandoahHeap::prepare_gc() {
1774   reset_mark_bitmap();
1775 
1776   ShenandoahResetUpdateRegionStateClosure cl;
1777   parallel_heap_region_iterate(&cl);
1778 }
1779 
1780 class ShenandoahFinalMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
1781 private:
1782   ShenandoahMarkingContext* const _ctx;
1783   ShenandoahHeapLock* const _lock;
1784 
1785 public:
1786   ShenandoahFinalMarkUpdateRegionStateClosure() :
1787     _ctx(ShenandoahHeap::heap()->complete_marking_context()), _lock(ShenandoahHeap::heap()->lock()) {}
1788 
1789   void heap_region_do(ShenandoahHeapRegion* r) {
1790     if (r->is_active()) {
1791       // All allocations past TAMS are implicitly live, adjust the region data.
1792       // Bitmaps/TAMS are swapped at this point, so we need to poll complete bitmap.
1793       HeapWord *tams = _ctx->top_at_mark_start(r);
1794       HeapWord *top = r->top();
1795       if (top > tams) {
1796         r->increase_live_data_alloc_words(pointer_delta(top, tams));
1797       }
1798 
1799       // We are about to select the collection set, make sure it knows about
1800       // current pinning status. Also, this allows trashing more regions that
1801       // now have their pinning status dropped.
1802       if (r->is_pinned()) {
1803         if (r->pin_count() == 0) {
1804           ShenandoahHeapLocker locker(_lock);
1805           r->make_unpinned();
1806         }
1807       } else {
1808         if (r->pin_count() > 0) {
1809           ShenandoahHeapLocker locker(_lock);
1810           r->make_pinned();
1811         }
1812       }
1813 
1814       // Remember limit for updating refs. It's guaranteed that we get no
1815       // from-space-refs written from here on.
1816       r->set_update_watermark_at_safepoint(r->top());
1817     } else {
1818       assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index());
1819       assert(_ctx->top_at_mark_start(r) == r->top(),
1820              "Region " SIZE_FORMAT " should have correct TAMS", r->index());
1821     }
1822   }
1823 
1824   bool is_thread_safe() { return true; }
1825 };
1826 
1827 void ShenandoahHeap::prepare_regions_and_collection_set(bool concurrent) {
1828   assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
1829   {
1830     ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::final_update_region_states :
1831                                          ShenandoahPhaseTimings::degen_gc_final_update_region_states);
1832     ShenandoahFinalMarkUpdateRegionStateClosure cl;
1833     parallel_heap_region_iterate(&cl);
1834 
1835     assert_pinned_region_status();
1836   }
1837 
1838   {
1839     ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::choose_cset :
1840                                          ShenandoahPhaseTimings::degen_gc_choose_cset);
1841     ShenandoahHeapLocker locker(lock());
1842     _collection_set->clear();
1843     heuristics()->choose_collection_set(_collection_set);
1844   }
1845 
1846   {
1847     ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::final_rebuild_freeset :
1848                                          ShenandoahPhaseTimings::degen_gc_final_rebuild_freeset);
1849     ShenandoahHeapLocker locker(lock());
1850     _free_set->rebuild();
1851   }
1852 }
1853 
1854 void ShenandoahHeap::do_class_unloading() {
1855   _unloader.unload();



1856 }
1857 
1858 void ShenandoahHeap::stw_weak_refs(bool full_gc) {
1859   // Weak refs processing
1860   ShenandoahPhaseTimings::Phase phase = full_gc ? ShenandoahPhaseTimings::full_gc_weakrefs
1861                                                 : ShenandoahPhaseTimings::degen_gc_weakrefs;
1862   ShenandoahTimingsTracker t(phase);
1863   ShenandoahGCWorkerPhase worker_phase(phase);
1864   ref_processor()->process_references(phase, workers(), false /* concurrent */);

1865 }
1866 
1867 void ShenandoahHeap::prepare_update_heap_references(bool concurrent) {
1868   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
1869 
1870   // Evacuation is over, no GCLABs are needed anymore. GCLABs are under URWM, so we need to
1871   // make them parsable for update code to work correctly. Plus, we can compute new sizes
1872   // for future GCLABs here.
1873   if (UseTLAB) {
1874     ShenandoahGCPhase phase(concurrent ?
1875                             ShenandoahPhaseTimings::init_update_refs_manage_gclabs :
1876                             ShenandoahPhaseTimings::degen_gc_init_update_refs_manage_gclabs);
1877     gclabs_retire(ResizeTLAB);
1878   }
1879 
1880   _update_refs_iterator.reset();
1881 }
1882 
1883 void ShenandoahHeap::propagate_gc_state_to_java_threads() {
1884   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at Shenandoah safepoint");
1885   if (_gc_state_changed) {
1886     _gc_state_changed = false;
1887     char state = gc_state();
1888     for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1889       ShenandoahThreadLocalData::set_gc_state(t, state);
1890     }
1891   }
1892 }
1893 
1894 void ShenandoahHeap::set_gc_state(uint mask, bool value) {
1895   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at Shenandoah safepoint");
1896   _gc_state.set_cond(mask, value);
1897   _gc_state_changed = true;


















1898 }
1899 
1900 void ShenandoahHeap::set_concurrent_mark_in_progress(bool in_progress) {
1901   assert(!has_forwarded_objects(), "Not expected before/after mark phase");
1902   set_gc_state(MARKING, in_progress);
1903   ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);

































1904 }
1905 
1906 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
1907   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only call this at safepoint");
1908   set_gc_state(EVACUATION, in_progress);
1909 }
1910 
1911 void ShenandoahHeap::set_concurrent_strong_root_in_progress(bool in_progress) {
1912   if (in_progress) {
1913     _concurrent_strong_root_in_progress.set();
1914   } else {
1915     _concurrent_strong_root_in_progress.unset();
1916   }
1917 }
1918 
1919 void ShenandoahHeap::set_concurrent_weak_root_in_progress(bool cond) {
1920   set_gc_state(WEAK_ROOTS, cond);
1921 }
1922 
1923 GCTracer* ShenandoahHeap::tracer() {
1924   return shenandoah_policy()->tracer();
1925 }
1926 
1927 size_t ShenandoahHeap::tlab_used(Thread* thread) const {
1928   return _free_set->used();
1929 }
1930 
1931 bool ShenandoahHeap::try_cancel_gc() {
1932   jbyte prev = _cancelled_gc.cmpxchg(CANCELLED, CANCELLABLE);
1933   return prev == CANCELLABLE;
1934 }
1935 











1936 void ShenandoahHeap::cancel_gc(GCCause::Cause cause) {
1937   if (try_cancel_gc()) {
1938     FormatBuffer<> msg("Cancelling GC: %s", GCCause::to_string(cause));
1939     log_info(gc)("%s", msg.buffer());
1940     Events::log(Thread::current(), "%s", msg.buffer());

1941   }
1942 }
1943 
1944 uint ShenandoahHeap::max_workers() {
1945   return _max_workers;
1946 }
1947 
1948 void ShenandoahHeap::stop() {
1949   // The shutdown sequence should be able to terminate when GC is running.
1950 
1951   // Step 0. Notify policy to disable event recording and prevent visiting gc threads during shutdown
1952   _shenandoah_policy->record_shutdown();
1953 
1954   // Step 1. Notify control thread that we are in shutdown.
1955   // Note that we cannot do that with stop(), because stop() is blocking and waits for the actual shutdown.
1956   // Doing stop() here would wait for the normal GC cycle to complete, never falling through to cancel below.
1957   control_thread()->prepare_for_graceful_shutdown();
1958 
1959   // Step 2. Notify GC workers that we are cancelling GC.
1960   cancel_gc(GCCause::_shenandoah_stop_vm);

2044 }
2045 
2046 void ShenandoahHeap::set_has_forwarded_objects(bool cond) {
2047   set_gc_state(HAS_FORWARDED, cond);
2048 }
2049 
2050 void ShenandoahHeap::set_unload_classes(bool uc) {
2051   _unload_classes.set_cond(uc);
2052 }
2053 
2054 bool ShenandoahHeap::unload_classes() const {
2055   return _unload_classes.is_set();
2056 }
2057 
2058 address ShenandoahHeap::in_cset_fast_test_addr() {
2059   ShenandoahHeap* heap = ShenandoahHeap::heap();
2060   assert(heap->collection_set() != nullptr, "Sanity");
2061   return (address) heap->collection_set()->biased_map_address();
2062 }
2063 
2064 size_t ShenandoahHeap::bytes_allocated_since_gc_start() const {
2065   return Atomic::load(&_bytes_allocated_since_gc_start);
2066 }
2067 
2068 void ShenandoahHeap::reset_bytes_allocated_since_gc_start() {
2069   Atomic::store(&_bytes_allocated_since_gc_start, (size_t)0);





2070 }
2071 
2072 void ShenandoahHeap::set_degenerated_gc_in_progress(bool in_progress) {
2073   _degenerated_gc_in_progress.set_cond(in_progress);
2074 }
2075 
2076 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) {
2077   _full_gc_in_progress.set_cond(in_progress);
2078 }
2079 
2080 void ShenandoahHeap::set_full_gc_move_in_progress(bool in_progress) {
2081   assert (is_full_gc_in_progress(), "should be");
2082   _full_gc_move_in_progress.set_cond(in_progress);
2083 }
2084 
2085 void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) {
2086   set_gc_state(UPDATEREFS, in_progress);
2087 }
2088 
2089 void ShenandoahHeap::register_nmethod(nmethod* nm) {

2113     if (r->is_active()) {
2114       if (r->is_pinned()) {
2115         if (r->pin_count() == 0) {
2116           r->make_unpinned();
2117         }
2118       } else {
2119         if (r->pin_count() > 0) {
2120           r->make_pinned();
2121         }
2122       }
2123     }
2124   }
2125 
2126   assert_pinned_region_status();
2127 }
2128 
2129 #ifdef ASSERT
2130 void ShenandoahHeap::assert_pinned_region_status() {
2131   for (size_t i = 0; i < num_regions(); i++) {
2132     ShenandoahHeapRegion* r = get_region(i);
2133     assert((r->is_pinned() && r->pin_count() > 0) || (!r->is_pinned() && r->pin_count() == 0),
2134            "Region " SIZE_FORMAT " pinning status is inconsistent", i);



2135   }
2136 }
2137 #endif
2138 
2139 ConcurrentGCTimer* ShenandoahHeap::gc_timer() const {
2140   return _gc_timer;
2141 }
2142 
2143 void ShenandoahHeap::prepare_concurrent_roots() {
2144   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2145   assert(!is_stw_gc_in_progress(), "Only concurrent GC");
2146   set_concurrent_strong_root_in_progress(!collection_set()->is_empty());
2147   set_concurrent_weak_root_in_progress(true);
2148   if (unload_classes()) {
2149     _unloader.prepare();
2150   }
2151 }
2152 
2153 void ShenandoahHeap::finish_concurrent_roots() {
2154   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");

2169   } else {
2170     // Use ConcGCThreads outside safepoints
2171     assert(nworkers == ConcGCThreads, "Use ConcGCThreads (%u) outside safepoints, %u",
2172            ConcGCThreads, nworkers);
2173   }
2174 }
2175 #endif
2176 
2177 ShenandoahVerifier* ShenandoahHeap::verifier() {
2178   guarantee(ShenandoahVerify, "Should be enabled");
2179   assert (_verifier != nullptr, "sanity");
2180   return _verifier;
2181 }
2182 
2183 template<bool CONCURRENT>
2184 class ShenandoahUpdateHeapRefsTask : public WorkerTask {
2185 private:
2186   ShenandoahHeap* _heap;
2187   ShenandoahRegionIterator* _regions;
2188 public:
2189   ShenandoahUpdateHeapRefsTask(ShenandoahRegionIterator* regions) :
2190     WorkerTask("Shenandoah Update References"),
2191     _heap(ShenandoahHeap::heap()),
2192     _regions(regions) {
2193   }
2194 
2195   void work(uint worker_id) {
2196     if (CONCURRENT) {
2197       ShenandoahConcurrentWorkerSession worker_session(worker_id);
2198       ShenandoahSuspendibleThreadSetJoiner stsj;
2199       do_work<ShenandoahConcUpdateRefsClosure>(worker_id);
2200     } else {
2201       ShenandoahParallelWorkerSession worker_session(worker_id);
2202       do_work<ShenandoahNonConcUpdateRefsClosure>(worker_id);
2203     }
2204   }
2205 
2206 private:
2207   template<class T>
2208   void do_work(uint worker_id) {
2209     T cl;
2210     if (CONCURRENT && (worker_id == 0)) {
2211       // We ask the first worker to replenish the Mutator free set by moving regions previously reserved to hold the
2212       // results of evacuation.  These reserves are no longer necessary because evacuation has completed.
2213       size_t cset_regions = _heap->collection_set()->count();
2214       // We cannot transfer any more regions than will be reclaimed when the existing collection set is recycled because
2215       // we need the reclaimed collection set regions to replenish the collector reserves



2216       _heap->free_set()->move_regions_from_collector_to_mutator(cset_regions);
2217     }
2218     // If !CONCURRENT, there's no value in expanding Mutator free set
2219 
2220     ShenandoahHeapRegion* r = _regions->next();
2221     ShenandoahMarkingContext* const ctx = _heap->complete_marking_context();
2222     while (r != nullptr) {
2223       HeapWord* update_watermark = r->get_update_watermark();
2224       assert (update_watermark >= r->bottom(), "sanity");
2225       if (r->is_active() && !r->is_cset()) {
2226         _heap->marked_object_oop_iterate(r, &cl, update_watermark);
2227       }
2228       if (ShenandoahPacing) {
2229         _heap->pacer()->report_updaterefs(pointer_delta(update_watermark, r->bottom()));
2230       }
2231       if (_heap->check_cancelled_gc_and_yield(CONCURRENT)) {
2232         return;
2233       }
2234       r = _regions->next();
2235     }
2236   }
2237 };
2238 
2239 void ShenandoahHeap::update_heap_references(bool concurrent) {
2240   assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
2241 
2242   if (concurrent) {
2243     ShenandoahUpdateHeapRefsTask<true> task(&_update_refs_iterator);
2244     workers()->run_task(&task);
2245   } else {
2246     ShenandoahUpdateHeapRefsTask<false> task(&_update_refs_iterator);
2247     workers()->run_task(&task);
2248   }
2249 }
2250 

2251 
2252 class ShenandoahFinalUpdateRefsUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
2253 private:
2254   ShenandoahHeapLock* const _lock;
2255 
2256 public:
2257   ShenandoahFinalUpdateRefsUpdateRegionStateClosure() : _lock(ShenandoahHeap::heap()->lock()) {}
2258 
2259   void heap_region_do(ShenandoahHeapRegion* r) {
2260     // Drop unnecessary "pinned" state from regions that does not have CP marks
2261     // anymore, as this would allow trashing them.
2262 
2263     if (r->is_active()) {
2264       if (r->is_pinned()) {
2265         if (r->pin_count() == 0) {
2266           ShenandoahHeapLocker locker(_lock);
2267           r->make_unpinned();
2268         }
2269       } else {
2270         if (r->pin_count() > 0) {
2271           ShenandoahHeapLocker locker(_lock);
2272           r->make_pinned();
2273         }
2274       }
2275     }
2276   }
2277 
2278   bool is_thread_safe() { return true; }
2279 };
2280 
2281 void ShenandoahHeap::update_heap_region_states(bool concurrent) {
2282   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2283   assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
2284 
2285   {
2286     ShenandoahGCPhase phase(concurrent ?
2287                             ShenandoahPhaseTimings::final_update_refs_update_region_states :
2288                             ShenandoahPhaseTimings::degen_gc_final_update_refs_update_region_states);
2289     ShenandoahFinalUpdateRefsUpdateRegionStateClosure cl;
2290     parallel_heap_region_iterate(&cl);
2291 
2292     assert_pinned_region_status();
2293   }
2294 
2295   {
2296     ShenandoahGCPhase phase(concurrent ?
2297                             ShenandoahPhaseTimings::final_update_refs_trash_cset :
2298                             ShenandoahPhaseTimings::degen_gc_final_update_refs_trash_cset);
2299     trash_cset_regions();
2300   }
2301 }
2302 





2303 void ShenandoahHeap::rebuild_free_set(bool concurrent) {
2304   {
2305     ShenandoahGCPhase phase(concurrent ?
2306                             ShenandoahPhaseTimings::final_update_refs_rebuild_freeset :
2307                             ShenandoahPhaseTimings::degen_gc_final_update_refs_rebuild_freeset);
2308     ShenandoahHeapLocker locker(lock());
2309     _free_set->rebuild();




































2310   }
2311 }
2312 
2313 void ShenandoahHeap::print_extended_on(outputStream *st) const {
2314   print_on(st);
2315   st->cr();
2316   print_heap_regions_on(st);
2317 }
2318 
2319 bool ShenandoahHeap::is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self) {
2320   size_t slice = r->index() / _bitmap_regions_per_slice;
2321 
2322   size_t regions_from = _bitmap_regions_per_slice * slice;
2323   size_t regions_to   = MIN2(num_regions(), _bitmap_regions_per_slice * (slice + 1));
2324   for (size_t g = regions_from; g < regions_to; g++) {
2325     assert (g / _bitmap_regions_per_slice == slice, "same slice");
2326     if (skip_self && g == r->index()) continue;
2327     if (get_region(g)->is_committed()) {
2328       return true;
2329     }

2412 void ShenandoahHeap::initialize_serviceability() {
2413   _memory_pool = new ShenandoahMemoryPool(this);
2414   _cycle_memory_manager.add_pool(_memory_pool);
2415   _stw_memory_manager.add_pool(_memory_pool);
2416 }
2417 
2418 GrowableArray<GCMemoryManager*> ShenandoahHeap::memory_managers() {
2419   GrowableArray<GCMemoryManager*> memory_managers(2);
2420   memory_managers.append(&_cycle_memory_manager);
2421   memory_managers.append(&_stw_memory_manager);
2422   return memory_managers;
2423 }
2424 
2425 GrowableArray<MemoryPool*> ShenandoahHeap::memory_pools() {
2426   GrowableArray<MemoryPool*> memory_pools(1);
2427   memory_pools.append(_memory_pool);
2428   return memory_pools;
2429 }
2430 
2431 MemoryUsage ShenandoahHeap::memory_usage() {
2432   return _memory_pool->get_memory_usage();
2433 }
2434 
2435 ShenandoahRegionIterator::ShenandoahRegionIterator() :
2436   _heap(ShenandoahHeap::heap()),
2437   _index(0) {}
2438 
2439 ShenandoahRegionIterator::ShenandoahRegionIterator(ShenandoahHeap* heap) :
2440   _heap(heap),
2441   _index(0) {}
2442 
2443 void ShenandoahRegionIterator::reset() {
2444   _index = 0;
2445 }
2446 
2447 bool ShenandoahRegionIterator::has_next() const {
2448   return _index < _heap->num_regions();
2449 }
2450 
2451 char ShenandoahHeap::gc_state() const {
2452   return _gc_state.raw_value();

2554   }
2555 
2556   // No unclaimed tail at the end of archive space.
2557   assert(cur == end,
2558          "Archive space should be fully used: " PTR_FORMAT " " PTR_FORMAT,
2559          p2i(cur), p2i(end));
2560 
2561   // Region bounds are good.
2562   ShenandoahHeapRegion* begin_reg = heap_region_containing(start);
2563   ShenandoahHeapRegion* end_reg = heap_region_containing(end);
2564   assert(begin_reg->is_regular(), "Must be");
2565   assert(end_reg->is_regular(), "Must be");
2566   assert(begin_reg->bottom() == start,
2567          "Must agree: archive-space-start: " PTR_FORMAT ", begin-region-bottom: " PTR_FORMAT,
2568          p2i(start), p2i(begin_reg->bottom()));
2569   assert(end_reg->top() == end,
2570          "Must agree: archive-space-end: " PTR_FORMAT ", end-region-top: " PTR_FORMAT,
2571          p2i(end), p2i(end_reg->top()));
2572 #endif
2573 }
























   1 /*
   2  * Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2013, 2022, Red Hat, Inc. All rights reserved.
   4  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
   5  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   6  *
   7  * This code is free software; you can redistribute it and/or modify it
   8  * under the terms of the GNU General Public License version 2 only, as
   9  * published by the Free Software Foundation.
  10  *
  11  * This code is distributed in the hope that it will be useful, but WITHOUT
  12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  14  * version 2 for more details (a copy is included in the LICENSE file that
  15  * accompanied this code).
  16  *
  17  * You should have received a copy of the GNU General Public License version
  18  * 2 along with this work; if not, write to the Free Software Foundation,
  19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20  *
  21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22  * or visit www.oracle.com if you need additional information or have any
  23  * questions.
  24  *
  25  */
  26 
  27 #include "precompiled.hpp"
  28 #include "memory/allocation.hpp"
  29 #include "memory/universe.hpp"
  30 
  31 #include "gc/shared/classUnloadingContext.hpp"
  32 #include "gc/shared/fullGCForwarding.hpp"
  33 #include "gc/shared/gcArguments.hpp"
  34 #include "gc/shared/gcTimer.hpp"
  35 #include "gc/shared/gcTraceTime.inline.hpp"
  36 #include "gc/shared/locationPrinter.inline.hpp"
  37 #include "gc/shared/memAllocator.hpp"
  38 #include "gc/shared/plab.hpp"
  39 #include "gc/shared/tlab_globals.hpp"
  40 
  41 #include "gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp"
  42 #include "gc/shenandoah/heuristics/shenandoahYoungHeuristics.hpp"
  43 #include "gc/shenandoah/shenandoahAllocRequest.hpp"
  44 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  45 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
  46 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  47 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  48 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
  49 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  50 #include "gc/shenandoah/shenandoahControlThread.hpp"
  51 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  52 #include "gc/shenandoah/shenandoahGenerationalEvacuationTask.hpp"
  53 #include "gc/shenandoah/shenandoahGenerationalHeap.hpp"
  54 #include "gc/shenandoah/shenandoahGlobalGeneration.hpp"
  55 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  56 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  57 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
  58 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  59 #include "gc/shenandoah/shenandoahInitLogger.hpp"
  60 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  61 #include "gc/shenandoah/shenandoahMemoryPool.hpp"

  62 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  63 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
  64 #include "gc/shenandoah/shenandoahPacer.inline.hpp"
  65 #include "gc/shenandoah/shenandoahPadding.hpp"
  66 #include "gc/shenandoah/shenandoahParallelCleaning.inline.hpp"
  67 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
  68 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
  69 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
  70 #include "gc/shenandoah/shenandoahSTWMark.hpp"
  71 #include "gc/shenandoah/shenandoahUtils.hpp"
  72 #include "gc/shenandoah/shenandoahVerifier.hpp"
  73 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
  74 #include "gc/shenandoah/shenandoahVMOperations.hpp"
  75 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
  76 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  77 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
  78 #include "gc/shenandoah/mode/shenandoahGenerationalMode.hpp"
  79 #include "gc/shenandoah/mode/shenandoahPassiveMode.hpp"
  80 #include "gc/shenandoah/mode/shenandoahSATBMode.hpp"
  81 #include "utilities/globalDefinitions.hpp"
  82 
  83 #if INCLUDE_JFR
  84 #include "gc/shenandoah/shenandoahJfrSupport.hpp"
  85 #endif
  86 
  87 #include "cds/archiveHeapWriter.hpp"
  88 #include "classfile/systemDictionary.hpp"
  89 #include "code/codeCache.hpp"
  90 #include "memory/classLoaderMetaspace.hpp"
  91 #include "memory/metaspaceUtils.hpp"
  92 #include "nmt/mallocTracker.hpp"
  93 #include "nmt/memTracker.hpp"
  94 #include "oops/compressedOops.inline.hpp"
  95 #include "prims/jvmtiTagMap.hpp"
  96 #include "runtime/atomic.hpp"
  97 #include "runtime/globals.hpp"
  98 #include "runtime/interfaceSupport.inline.hpp"
  99 #include "runtime/java.hpp"
 100 #include "runtime/orderAccess.hpp"
 101 #include "runtime/safepointMechanism.hpp"
 102 #include "runtime/stackWatermarkSet.hpp"

 156 jint ShenandoahHeap::initialize() {
 157   //
 158   // Figure out heap sizing
 159   //
 160 
 161   size_t init_byte_size = InitialHeapSize;
 162   size_t min_byte_size  = MinHeapSize;
 163   size_t max_byte_size  = MaxHeapSize;
 164   size_t heap_alignment = HeapAlignment;
 165 
 166   size_t reg_size_bytes = ShenandoahHeapRegion::region_size_bytes();
 167 
 168   Universe::check_alignment(max_byte_size,  reg_size_bytes, "Shenandoah heap");
 169   Universe::check_alignment(init_byte_size, reg_size_bytes, "Shenandoah heap");
 170 
 171   _num_regions = ShenandoahHeapRegion::region_count();
 172   assert(_num_regions == (max_byte_size / reg_size_bytes),
 173          "Regions should cover entire heap exactly: " SIZE_FORMAT " != " SIZE_FORMAT "/" SIZE_FORMAT,
 174          _num_regions, max_byte_size, reg_size_bytes);
 175 



 176   size_t num_committed_regions = init_byte_size / reg_size_bytes;
 177   num_committed_regions = MIN2(num_committed_regions, _num_regions);
 178   assert(num_committed_regions <= _num_regions, "sanity");
 179   _initial_size = num_committed_regions * reg_size_bytes;
 180 
 181   size_t num_min_regions = min_byte_size / reg_size_bytes;
 182   num_min_regions = MIN2(num_min_regions, _num_regions);
 183   assert(num_min_regions <= _num_regions, "sanity");
 184   _minimum_size = num_min_regions * reg_size_bytes;
 185 
 186   // Default to max heap size.
 187   _soft_max_size = _num_regions * reg_size_bytes;
 188 
 189   _committed = _initial_size;
 190 
 191   size_t heap_page_size   = UseLargePages ? os::large_page_size() : os::vm_page_size();
 192   size_t bitmap_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
 193   size_t region_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
 194 
 195   //

 209                                           heap_rs.size(), heap_rs.page_size());
 210 
 211 #if SHENANDOAH_OPTIMIZED_MARKTASK
 212   // The optimized ShenandoahMarkTask takes some bits away from the full object bits.
 213   // Fail if we ever attempt to address more than we can.
 214   if ((uintptr_t)heap_rs.end() >= ShenandoahMarkTask::max_addressable()) {
 215     FormatBuffer<512> buf("Shenandoah reserved [" PTR_FORMAT ", " PTR_FORMAT") for the heap, \n"
 216                           "but max object address is " PTR_FORMAT ". Try to reduce heap size, or try other \n"
 217                           "VM options that allocate heap at lower addresses (HeapBaseMinAddress, AllocateHeapAt, etc).",
 218                 p2i(heap_rs.base()), p2i(heap_rs.end()), ShenandoahMarkTask::max_addressable());
 219     vm_exit_during_initialization("Fatal Error", buf);
 220   }
 221 #endif
 222 
 223   ReservedSpace sh_rs = heap_rs.first_part(max_byte_size);
 224   if (!_heap_region_special) {
 225     os::commit_memory_or_exit(sh_rs.base(), _initial_size, heap_alignment, false,
 226                               "Cannot commit heap memory");
 227   }
 228 
 229   BarrierSet::set_barrier_set(new ShenandoahBarrierSet(this, _heap_region));
 230 
 231   // Now we know the number of regions and heap sizes, initialize the heuristics.
 232   initialize_heuristics();
 233 
 234   assert(_heap_region.byte_size() == heap_rs.size(), "Need to know reserved size for card table");
 235 
 236   //
 237   // Worker threads must be initialized after the barrier is configured
 238   //
 239   _workers = new ShenandoahWorkerThreads("Shenandoah GC Threads", _max_workers);
 240   if (_workers == nullptr) {
 241     vm_exit_during_initialization("Failed necessary allocation.");
 242   } else {
 243     _workers->initialize_workers();
 244   }
 245 
 246   if (ParallelGCThreads > 1) {
 247     _safepoint_workers = new ShenandoahWorkerThreads("Safepoint Cleanup Thread", ParallelGCThreads);
 248     _safepoint_workers->initialize_workers();
 249   }
 250 
 251   //
 252   // Reserve and commit memory for bitmap(s)
 253   //
 254 
 255   size_t bitmap_size_orig = ShenandoahMarkBitMap::compute_size(heap_rs.size());
 256   _bitmap_size = align_up(bitmap_size_orig, bitmap_page_size);
 257 
 258   size_t bitmap_bytes_per_region = reg_size_bytes / ShenandoahMarkBitMap::heap_map_factor();
 259 
 260   guarantee(bitmap_bytes_per_region != 0,
 261             "Bitmap bytes per region should not be zero");
 262   guarantee(is_power_of_2(bitmap_bytes_per_region),
 263             "Bitmap bytes per region should be power of two: " SIZE_FORMAT, bitmap_bytes_per_region);
 264 
 265   if (bitmap_page_size > bitmap_bytes_per_region) {
 266     _bitmap_regions_per_slice = bitmap_page_size / bitmap_bytes_per_region;
 267     _bitmap_bytes_per_slice = bitmap_page_size;
 268   } else {
 269     _bitmap_regions_per_slice = 1;
 270     _bitmap_bytes_per_slice = bitmap_bytes_per_region;
 271   }
 272 
 273   guarantee(_bitmap_regions_per_slice >= 1,
 274             "Should have at least one region per slice: " SIZE_FORMAT,
 275             _bitmap_regions_per_slice);
 276 
 277   guarantee(((_bitmap_bytes_per_slice) % bitmap_page_size) == 0,
 278             "Bitmap slices should be page-granular: bps = " SIZE_FORMAT ", page size = " SIZE_FORMAT,
 279             _bitmap_bytes_per_slice, bitmap_page_size);
 280 
 281   ReservedSpace bitmap(_bitmap_size, bitmap_page_size);
 282   os::trace_page_sizes_for_requested_size("Mark Bitmap",
 283                                           bitmap_size_orig, bitmap_page_size,
 284                                           bitmap.base(),
 285                                           bitmap.size(), bitmap.page_size());
 286   MemTracker::record_virtual_memory_tag(bitmap.base(), mtGC);
 287   _bitmap_region = MemRegion((HeapWord*) bitmap.base(), bitmap.size() / HeapWordSize);
 288   _bitmap_region_special = bitmap.special();
 289 
 290   size_t bitmap_init_commit = _bitmap_bytes_per_slice *
 291     align_up(num_committed_regions, _bitmap_regions_per_slice) / _bitmap_regions_per_slice;
 292   bitmap_init_commit = MIN2(_bitmap_size, bitmap_init_commit);
 293   if (!_bitmap_region_special) {
 294     os::commit_memory_or_exit((char *) _bitmap_region.start(), bitmap_init_commit, bitmap_page_size, false,
 295                               "Cannot commit bitmap memory");
 296   }
 297 
 298   _marking_context = new ShenandoahMarkingContext(_heap_region, _bitmap_region, _num_regions);
 299 
 300   if (ShenandoahVerify) {
 301     ReservedSpace verify_bitmap(_bitmap_size, bitmap_page_size);
 302     os::trace_page_sizes_for_requested_size("Verify Bitmap",
 303                                             bitmap_size_orig, bitmap_page_size,
 304                                             verify_bitmap.base(),
 305                                             verify_bitmap.size(), verify_bitmap.page_size());
 306     if (!verify_bitmap.special()) {
 307       os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), bitmap_page_size, false,
 308                                 "Cannot commit verification bitmap memory");
 309     }
 310     MemTracker::record_virtual_memory_tag(verify_bitmap.base(), mtGC);
 311     MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize);
 312     _verification_bit_map.initialize(_heap_region, verify_bitmap_region);
 313     _verifier = new ShenandoahVerifier(this, &_verification_bit_map);
 314   }
 315 
 316   // Reserve aux bitmap for use in object_iterate(). We don't commit it here.
 317   size_t aux_bitmap_page_size = bitmap_page_size;
 318 

 362       assert(is_aligned(req_addr, cset_align), "Should be aligned");
 363       cset_rs = ReservedSpace(cset_size, cset_align, cset_page_size, req_addr);
 364       if (cset_rs.is_reserved()) {
 365         assert(cset_rs.base() == req_addr, "Allocated where requested: " PTR_FORMAT ", " PTR_FORMAT, p2i(cset_rs.base()), addr);
 366         _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
 367         break;
 368       }
 369     }
 370 
 371     if (_collection_set == nullptr) {
 372       cset_rs = ReservedSpace(cset_size, cset_align, os::vm_page_size());
 373       _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
 374     }
 375     os::trace_page_sizes_for_requested_size("Collection Set",
 376                                             cset_size, cset_page_size,
 377                                             cset_rs.base(),
 378                                             cset_rs.size(), cset_rs.page_size());
 379   }
 380 
 381   _regions = NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, _num_regions, mtGC);
 382   _affiliations = NEW_C_HEAP_ARRAY(uint8_t, _num_regions, mtGC);
 383   _free_set = new ShenandoahFreeSet(this, _num_regions);
 384 
 385   {
 386     ShenandoahHeapLocker locker(lock());
 387 
 388     for (size_t i = 0; i < _num_regions; i++) {
 389       HeapWord* start = (HeapWord*)sh_rs.base() + ShenandoahHeapRegion::region_size_words() * i;
 390       bool is_committed = i < num_committed_regions;
 391       void* loc = region_storage.base() + i * region_align;
 392 
 393       ShenandoahHeapRegion* r = new (loc) ShenandoahHeapRegion(start, i, is_committed);
 394       assert(is_aligned(r, SHENANDOAH_CACHE_LINE_SIZE), "Sanity");
 395 
 396       _marking_context->initialize_top_at_mark_start(r);
 397       _regions[i] = r;
 398       assert(!collection_set()->is_in(i), "New region should not be in collection set");
 399 
 400       _affiliations[i] = ShenandoahAffiliation::FREE;
 401     }
 402 
 403     // Initialize to complete
 404     _marking_context->mark_complete();
 405     size_t young_cset_regions, old_cset_regions;
 406 
 407     // We are initializing free set.  We ignore cset region tallies.
 408     size_t first_old, last_old, num_old;
 409     _free_set->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old, last_old, num_old);
 410     _free_set->finish_rebuild(young_cset_regions, old_cset_regions, num_old);
 411   }
 412 
 413   if (AlwaysPreTouch) {
 414     // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads,
 415     // before initialize() below zeroes it with initializing thread. For any given region,
 416     // we touch the region and the corresponding bitmaps from the same thread.
 417     ShenandoahPushWorkerScope scope(workers(), _max_workers, false);
 418 
 419     _pretouch_heap_page_size = heap_page_size;
 420     _pretouch_bitmap_page_size = bitmap_page_size;
 421 
 422     // OS memory managers may want to coalesce back-to-back pages. Make their jobs
 423     // simpler by pre-touching continuous spaces (heap and bitmap) separately.
 424 
 425     ShenandoahPretouchBitmapTask bcl(bitmap.base(), _bitmap_size, _pretouch_bitmap_page_size);
 426     _workers->run_task(&bcl);
 427 
 428     ShenandoahPretouchHeapTask hcl(_pretouch_heap_page_size);
 429     _workers->run_task(&hcl);
 430   }

 439     Copy::fill_to_bytes(_liveness_cache[worker], _num_regions * sizeof(ShenandoahLiveData));
 440   }
 441 
 442   // There should probably be Shenandoah-specific options for these,
 443   // just as there are G1-specific options.
 444   {
 445     ShenandoahSATBMarkQueueSet& satbqs = ShenandoahBarrierSet::satb_mark_queue_set();
 446     satbqs.set_process_completed_buffers_threshold(20); // G1SATBProcessCompletedThreshold
 447     satbqs.set_buffer_enqueue_threshold_percentage(60); // G1SATBBufferEnqueueingThresholdPercent
 448   }
 449 
 450   _monitoring_support = new ShenandoahMonitoringSupport(this);
 451   _phase_timings = new ShenandoahPhaseTimings(max_workers());
 452   ShenandoahCodeRoots::initialize();
 453 
 454   if (ShenandoahPacing) {
 455     _pacer = new ShenandoahPacer(this);
 456     _pacer->setup_for_idle();
 457   }
 458 
 459   initialize_controller();
 460 
 461   print_init_logger();
 462 
 463   FullGCForwarding::initialize(_heap_region);
 464 
 465   return JNI_OK;
 466 }
 467 
 468 void ShenandoahHeap::initialize_controller() {
 469   _control_thread = new ShenandoahControlThread();
 470 }
 471 
 472 void ShenandoahHeap::print_init_logger() const {
 473   ShenandoahInitLogger::print();
 474 }
 475 
 476 void ShenandoahHeap::initialize_mode() {
 477   if (ShenandoahGCMode != nullptr) {
 478     if (strcmp(ShenandoahGCMode, "satb") == 0) {
 479       _gc_mode = new ShenandoahSATBMode();
 480     } else if (strcmp(ShenandoahGCMode, "passive") == 0) {
 481       _gc_mode = new ShenandoahPassiveMode();
 482     } else if (strcmp(ShenandoahGCMode, "generational") == 0) {
 483       _gc_mode = new ShenandoahGenerationalMode();
 484     } else {
 485       vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option");
 486     }
 487   } else {
 488     vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option (null)");
 489   }
 490   _gc_mode->initialize_flags();
 491   if (_gc_mode->is_diagnostic() && !UnlockDiagnosticVMOptions) {
 492     vm_exit_during_initialization(
 493             err_msg("GC mode \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
 494                     _gc_mode->name()));
 495   }
 496   if (_gc_mode->is_experimental() && !UnlockExperimentalVMOptions) {
 497     vm_exit_during_initialization(
 498             err_msg("GC mode \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
 499                     _gc_mode->name()));
 500   }
 501 }
 502 
 503 void ShenandoahHeap::initialize_heuristics() {
 504   _global_generation = new ShenandoahGlobalGeneration(mode()->is_generational(), max_workers(), max_capacity(), max_capacity());
 505   _global_generation->initialize_heuristics(mode());











 506 }
 507 
 508 #ifdef _MSC_VER
 509 #pragma warning( push )
 510 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
 511 #endif
 512 
 513 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
 514   CollectedHeap(),
 515   _gc_generation(nullptr),
 516   _active_generation(nullptr),
 517   _initial_size(0),

 518   _committed(0),
 519   _max_workers(MAX3(ConcGCThreads, ParallelGCThreads, 1U)),

 520   _workers(nullptr),
 521   _safepoint_workers(nullptr),
 522   _heap_region_special(false),
 523   _num_regions(0),
 524   _regions(nullptr),
 525   _affiliations(nullptr),
 526   _gc_state_changed(false),
 527   _gc_no_progress_count(0),
 528   _cancel_requested_time(0),
 529   _update_refs_iterator(this),
 530   _global_generation(nullptr),
 531   _control_thread(nullptr),
 532   _young_generation(nullptr),
 533   _old_generation(nullptr),
 534   _shenandoah_policy(policy),
 535   _gc_mode(nullptr),

 536   _free_set(nullptr),
 537   _pacer(nullptr),
 538   _verifier(nullptr),
 539   _phase_timings(nullptr),
 540   _mmu_tracker(),
 541   _monitoring_support(nullptr),
 542   _memory_pool(nullptr),
 543   _stw_memory_manager("Shenandoah Pauses"),
 544   _cycle_memory_manager("Shenandoah Cycles"),
 545   _gc_timer(new ConcurrentGCTimer()),
 546   _log_min_obj_alignment_in_bytes(LogMinObjAlignmentInBytes),

 547   _marking_context(nullptr),
 548   _bitmap_size(0),
 549   _bitmap_regions_per_slice(0),
 550   _bitmap_bytes_per_slice(0),
 551   _bitmap_region_special(false),
 552   _aux_bitmap_region_special(false),
 553   _liveness_cache(nullptr),
 554   _collection_set(nullptr)
 555 {
 556   // Initialize GC mode early, many subsequent initialization procedures depend on it
 557   initialize_mode();















 558 }
 559 
 560 #ifdef _MSC_VER
 561 #pragma warning( pop )
 562 #endif
 563 





























 564 void ShenandoahHeap::print_on(outputStream* st) const {
 565   st->print_cr("Shenandoah Heap");
 566   st->print_cr(" " SIZE_FORMAT "%s max, " SIZE_FORMAT "%s soft max, " SIZE_FORMAT "%s committed, " SIZE_FORMAT "%s used",
 567                byte_size_in_proper_unit(max_capacity()), proper_unit_for_byte_size(max_capacity()),
 568                byte_size_in_proper_unit(soft_max_capacity()), proper_unit_for_byte_size(soft_max_capacity()),
 569                byte_size_in_proper_unit(committed()),    proper_unit_for_byte_size(committed()),
 570                byte_size_in_proper_unit(used()),         proper_unit_for_byte_size(used()));
 571   st->print_cr(" " SIZE_FORMAT " x " SIZE_FORMAT"%s regions",
 572                num_regions(),
 573                byte_size_in_proper_unit(ShenandoahHeapRegion::region_size_bytes()),
 574                proper_unit_for_byte_size(ShenandoahHeapRegion::region_size_bytes()));
 575 
 576   st->print("Status: ");
 577   if (has_forwarded_objects())                 st->print("has forwarded objects, ");
 578   if (!mode()->is_generational()) {
 579     if (is_concurrent_mark_in_progress())      st->print("marking,");
 580   } else {
 581     if (is_concurrent_old_mark_in_progress())    st->print("old marking, ");
 582     if (is_concurrent_young_mark_in_progress())  st->print("young marking, ");
 583   }
 584   if (is_evacuation_in_progress())             st->print("evacuating, ");
 585   if (is_update_refs_in_progress())            st->print("updating refs, ");
 586   if (is_degenerated_gc_in_progress())         st->print("degenerated gc, ");
 587   if (is_full_gc_in_progress())                st->print("full gc, ");
 588   if (is_full_gc_move_in_progress())           st->print("full gc move, ");
 589   if (is_concurrent_weak_root_in_progress())   st->print("concurrent weak roots, ");
 590   if (is_concurrent_strong_root_in_progress() &&
 591       !is_concurrent_weak_root_in_progress())  st->print("concurrent strong roots, ");
 592 
 593   if (cancelled_gc()) {
 594     st->print("cancelled");
 595   } else {
 596     st->print("not cancelled");
 597   }
 598   st->cr();
 599 
 600   st->print_cr("Reserved region:");
 601   st->print_cr(" - [" PTR_FORMAT ", " PTR_FORMAT ") ",
 602                p2i(reserved_region().start()),
 603                p2i(reserved_region().end()));

 614   st->cr();
 615   MetaspaceUtils::print_on(st);
 616 
 617   if (Verbose) {
 618     st->cr();
 619     print_heap_regions_on(st);
 620   }
 621 }
 622 
 623 class ShenandoahInitWorkerGCLABClosure : public ThreadClosure {
 624 public:
 625   void do_thread(Thread* thread) {
 626     assert(thread != nullptr, "Sanity");
 627     assert(thread->is_Worker_thread(), "Only worker thread expected");
 628     ShenandoahThreadLocalData::initialize_gclab(thread);
 629   }
 630 };
 631 
 632 void ShenandoahHeap::post_initialize() {
 633   CollectedHeap::post_initialize();
 634   _mmu_tracker.initialize();
 635 
 636   MutexLocker ml(Threads_lock);
 637 
 638   ShenandoahInitWorkerGCLABClosure init_gclabs;
 639   _workers->threads_do(&init_gclabs);
 640 
 641   // gclab can not be initialized early during VM startup, as it can not determinate its max_size.
 642   // Now, we will let WorkerThreads to initialize gclab when new worker is created.
 643   _workers->set_initialize_gclab();
 644   if (_safepoint_workers != nullptr) {
 645     _safepoint_workers->threads_do(&init_gclabs);
 646     _safepoint_workers->set_initialize_gclab();
 647   }
 648 


 649   JFR_ONLY(ShenandoahJFRSupport::register_jfr_type_serializers();)
 650 }
 651 
 652 ShenandoahHeuristics* ShenandoahHeap::heuristics() {
 653   return _global_generation->heuristics();
 654 }
 655 
 656 size_t ShenandoahHeap::used() const {
 657   return global_generation()->used();
 658 }
 659 
 660 size_t ShenandoahHeap::committed() const {
 661   return Atomic::load(&_committed);
 662 }
 663 




 664 void ShenandoahHeap::increase_committed(size_t bytes) {
 665   shenandoah_assert_heaplocked_or_safepoint();
 666   _committed += bytes;
 667 }
 668 
 669 void ShenandoahHeap::decrease_committed(size_t bytes) {
 670   shenandoah_assert_heaplocked_or_safepoint();
 671   _committed -= bytes;
 672 }
 673 
 674 // For tracking usage based on allocations, it should be the case that:
 675 // * The sum of regions::used == heap::used
 676 // * The sum of a generation's regions::used == generation::used
 677 // * The sum of a generation's humongous regions::free == generation::humongous_waste
 678 // These invariants are checked by the verifier on GC safepoints.
 679 //
 680 // Additional notes:
 681 // * When a mutator's allocation request causes a region to be retired, the
 682 //   free memory left in that region is considered waste. It does not contribute
 683 //   to the usage, but it _does_ contribute to allocation rate.
 684 // * The bottom of a PLAB must be aligned on card size. In some cases this will
 685 //   require padding in front of the PLAB (a filler object). Because this padding
 686 //   is included in the region's used memory we include the padding in the usage
 687 //   accounting as waste.
 688 // * Mutator allocations are used to compute an allocation rate. They are also
 689 //   sent to the Pacer for those purposes.
 690 // * There are three sources of waste:
 691 //  1. The padding used to align a PLAB on card size
 692 //  2. Region's free is less than minimum TLAB size and is retired
 693 //  3. The unused portion of memory in the last region of a humongous object
 694 void ShenandoahHeap::increase_used(const ShenandoahAllocRequest& req) {
 695   size_t actual_bytes = req.actual_size() * HeapWordSize;
 696   size_t wasted_bytes = req.waste() * HeapWordSize;
 697   ShenandoahGeneration* generation = generation_for(req.affiliation());
 698 
 699   if (req.is_gc_alloc()) {
 700     assert(wasted_bytes == 0 || req.type() == ShenandoahAllocRequest::_alloc_plab, "Only PLABs have waste");
 701     increase_used(generation, actual_bytes + wasted_bytes);
 702   } else {
 703     assert(req.is_mutator_alloc(), "Expected mutator alloc here");
 704     // padding and actual size both count towards allocation counter
 705     generation->increase_allocated(actual_bytes + wasted_bytes);
 706 
 707     // only actual size counts toward usage for mutator allocations
 708     increase_used(generation, actual_bytes);
 709 
 710     // notify pacer of both actual size and waste
 711     notify_mutator_alloc_words(req.actual_size(), req.waste());
 712 
 713     if (wasted_bytes > 0 && ShenandoahHeapRegion::requires_humongous(req.actual_size())) {
 714       increase_humongous_waste(generation,wasted_bytes);
 715     }
 716   }
 717 }
 718 
 719 void ShenandoahHeap::increase_humongous_waste(ShenandoahGeneration* generation, size_t bytes) {
 720   generation->increase_humongous_waste(bytes);
 721   if (!generation->is_global()) {
 722     global_generation()->increase_humongous_waste(bytes);
 723   }
 724 }
 725 
 726 void ShenandoahHeap::decrease_humongous_waste(ShenandoahGeneration* generation, size_t bytes) {
 727   generation->decrease_humongous_waste(bytes);
 728   if (!generation->is_global()) {
 729     global_generation()->decrease_humongous_waste(bytes);
 730   }
 731 }
 732 
 733 void ShenandoahHeap::increase_used(ShenandoahGeneration* generation, size_t bytes) {
 734   generation->increase_used(bytes);
 735   if (!generation->is_global()) {
 736     global_generation()->increase_used(bytes);
 737   }
 738 }
 739 
 740 void ShenandoahHeap::decrease_used(ShenandoahGeneration* generation, size_t bytes) {
 741   generation->decrease_used(bytes);
 742   if (!generation->is_global()) {
 743     global_generation()->decrease_used(bytes);
 744   }
 745 }
 746 
 747 void ShenandoahHeap::notify_mutator_alloc_words(size_t words, size_t waste) {
 748   if (ShenandoahPacing) {
 749     control_thread()->pacing_notify_alloc(words);
 750     if (waste > 0) {
 751       pacer()->claim_for_alloc<true>(waste);
 752     }
 753   }
 754 }
 755 
 756 size_t ShenandoahHeap::capacity() const {
 757   return committed();
 758 }
 759 
 760 size_t ShenandoahHeap::max_capacity() const {
 761   return _num_regions * ShenandoahHeapRegion::region_size_bytes();
 762 }
 763 
 764 size_t ShenandoahHeap::soft_max_capacity() const {
 765   size_t v = Atomic::load(&_soft_max_size);
 766   assert(min_capacity() <= v && v <= max_capacity(),
 767          "Should be in bounds: " SIZE_FORMAT " <= " SIZE_FORMAT " <= " SIZE_FORMAT,
 768          min_capacity(), v, max_capacity());
 769   return v;
 770 }
 771 

 861   size_t old_soft_max = soft_max_capacity();
 862   if (new_soft_max != old_soft_max) {
 863     new_soft_max = MAX2(min_capacity(), new_soft_max);
 864     new_soft_max = MIN2(max_capacity(), new_soft_max);
 865     if (new_soft_max != old_soft_max) {
 866       log_info(gc)("Soft Max Heap Size: " SIZE_FORMAT "%s -> " SIZE_FORMAT "%s",
 867                    byte_size_in_proper_unit(old_soft_max), proper_unit_for_byte_size(old_soft_max),
 868                    byte_size_in_proper_unit(new_soft_max), proper_unit_for_byte_size(new_soft_max)
 869       );
 870       set_soft_max_capacity(new_soft_max);
 871       return true;
 872     }
 873   }
 874   return false;
 875 }
 876 
 877 void ShenandoahHeap::notify_heap_changed() {
 878   // Update monitoring counters when we took a new region. This amortizes the
 879   // update costs on slow path.
 880   monitoring_support()->notify_heap_changed();
 881   _heap_changed.set();


 882 }
 883 
 884 void ShenandoahHeap::set_forced_counters_update(bool value) {
 885   monitoring_support()->set_forced_counters_update(value);
 886 }
 887 
 888 void ShenandoahHeap::handle_force_counters_update() {
 889   monitoring_support()->handle_force_counters_update();
 890 }
 891 
 892 HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) {
 893   // New object should fit the GCLAB size
 894   size_t min_size = MAX2(size, PLAB::min_size());
 895 
 896   // Figure out size of new GCLAB, looking back at heuristics. Expand aggressively.
 897   size_t new_size = ShenandoahThreadLocalData::gclab_size(thread) * 2;
 898 
 899   new_size = MIN2(new_size, PLAB::max_size());
 900   new_size = MAX2(new_size, PLAB::min_size());
 901 
 902   // Record new heuristic value even if we take any shortcut. This captures
 903   // the case when moderately-sized objects always take a shortcut. At some point,
 904   // heuristics should catch up with them.
 905   log_debug(gc, free)("Set new GCLAB size: " SIZE_FORMAT, new_size);
 906   ShenandoahThreadLocalData::set_gclab_size(thread, new_size);
 907 
 908   if (new_size < size) {
 909     // New size still does not fit the object. Fall back to shared allocation.
 910     // This avoids retiring perfectly good GCLABs, when we encounter a large object.
 911     log_debug(gc, free)("New gclab size (" SIZE_FORMAT ") is too small for " SIZE_FORMAT, new_size, size);
 912     return nullptr;
 913   }
 914 
 915   // Retire current GCLAB, and allocate a new one.
 916   PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
 917   gclab->retire();
 918 
 919   size_t actual_size = 0;
 920   HeapWord* gclab_buf = allocate_new_gclab(min_size, new_size, &actual_size);
 921   if (gclab_buf == nullptr) {
 922     return nullptr;
 923   }
 924 
 925   assert (size <= actual_size, "allocation should fit");
 926 
 927   // ...and clear or zap just allocated TLAB, if needed.
 928   if (ZeroTLAB) {
 929     Copy::zero_to_words(gclab_buf, actual_size);
 930   } else if (ZapTLAB) {
 931     // Skip mangling the space corresponding to the object header to
 932     // ensure that the returned space is not considered parsable by
 933     // any concurrent GC thread.
 934     size_t hdr_size = oopDesc::header_size();
 935     Copy::fill_to_words(gclab_buf + hdr_size, actual_size - hdr_size, badHeapWordVal);
 936   }
 937   gclab->set_buf(gclab_buf, actual_size);
 938   return gclab->allocate(size);
 939 }
 940 
 941 // Called from stubs in JIT code or interpreter
 942 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t min_size,
 943                                             size_t requested_size,
 944                                             size_t* actual_size) {
 945   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_tlab(min_size, requested_size);
 946   HeapWord* res = allocate_memory(req);
 947   if (res != nullptr) {
 948     *actual_size = req.actual_size();
 949   } else {
 950     *actual_size = 0;
 951   }
 952   return res;
 953 }
 954 
 955 HeapWord* ShenandoahHeap::allocate_new_gclab(size_t min_size,
 956                                              size_t word_size,
 957                                              size_t* actual_size) {
 958   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_gclab(min_size, word_size);
 959   HeapWord* res = allocate_memory(req);
 960   if (res != nullptr) {
 961     *actual_size = req.actual_size();

 970   bool in_new_region = false;
 971   HeapWord* result = nullptr;
 972 
 973   if (req.is_mutator_alloc()) {
 974     if (ShenandoahPacing) {
 975       pacer()->pace_for_alloc(req.size());
 976       pacer_epoch = pacer()->epoch();
 977     }
 978 
 979     if (!ShenandoahAllocFailureALot || !should_inject_alloc_failure()) {
 980       result = allocate_memory_under_lock(req, in_new_region);
 981     }
 982 
 983     // Check that gc overhead is not exceeded.
 984     //
 985     // Shenandoah will grind along for quite a while allocating one
 986     // object at a time using shared (non-tlab) allocations. This check
 987     // is testing that the GC overhead limit has not been exceeded.
 988     // This will notify the collector to start a cycle, but will raise
 989     // an OOME to the mutator if the last Full GCs have not made progress.
 990     // gc_no_progress_count is incremented following each degen or full GC that fails to achieve is_good_progress().
 991     if (result == nullptr && !req.is_lab_alloc() && get_gc_no_progress_count() > ShenandoahNoProgressThreshold) {
 992       control_thread()->handle_alloc_failure(req, false);
 993       req.set_actual_size(0);
 994       return nullptr;
 995     }
 996 
 997     if (result == nullptr) {
 998       // Block until control thread reacted, then retry allocation.
 999       //
1000       // It might happen that one of the threads requesting allocation would unblock
1001       // way later after GC happened, only to fail the second allocation, because
1002       // other threads have already depleted the free storage. In this case, a better
1003       // strategy is to try again, until at least one full GC has completed.
1004       //
1005       // Stop retrying and return nullptr to cause OOMError exception if our allocation failed even after:
1006       //   a) We experienced a GC that had good progress, or
1007       //   b) We experienced at least one Full GC (whether or not it had good progress)


1008 
1009       size_t original_count = shenandoah_policy()->full_gc_count();
1010       while ((result == nullptr) && (original_count == shenandoah_policy()->full_gc_count())) {
1011         control_thread()->handle_alloc_failure(req, true);
1012         result = allocate_memory_under_lock(req, in_new_region);
1013       }
1014       if (result != nullptr) {
1015         // If our allocation request has been satisifed after it initially failed, we count this as good gc progress
1016         notify_gc_progress();
1017       }
1018       if (log_develop_is_enabled(Debug, gc, alloc)) {
1019         ResourceMark rm;
1020         log_debug(gc, alloc)("Thread: %s, Result: " PTR_FORMAT ", Request: %s, Size: " SIZE_FORMAT
1021                              ", Original: " SIZE_FORMAT ", Latest: " SIZE_FORMAT,
1022                              Thread::current()->name(), p2i(result), req.type_string(), req.size(),
1023                              original_count, get_gc_no_progress_count());
1024       }
1025     }
1026   } else {
1027     assert(req.is_gc_alloc(), "Can only accept GC allocs here");
1028     result = allocate_memory_under_lock(req, in_new_region);
1029     // Do not call handle_alloc_failure() here, because we cannot block.
1030     // The allocation failure would be handled by the LRB slowpath with handle_alloc_failure_evac().
1031   }
1032 
1033   if (in_new_region) {
1034     notify_heap_changed();
1035   }
1036 
1037   if (result == nullptr) {
1038     req.set_actual_size(0);
1039   }
1040 
1041   // This is called regardless of the outcome of the allocation to account
1042   // for any waste created by retiring regions with this request.
1043   increase_used(req);
1044 
1045   if (result != nullptr) {
1046     size_t requested = req.size();
1047     size_t actual = req.actual_size();
1048 
1049     assert (req.is_lab_alloc() || (requested == actual),
1050             "Only LAB allocations are elastic: %s, requested = " SIZE_FORMAT ", actual = " SIZE_FORMAT,
1051             ShenandoahAllocRequest::alloc_type_to_string(req.type()), requested, actual);
1052 
1053     if (req.is_mutator_alloc()) {


1054       // If we requested more than we were granted, give the rest back to pacer.
1055       // This only matters if we are in the same pacing epoch: do not try to unpace
1056       // over the budget for the other phase.
1057       if (ShenandoahPacing && (pacer_epoch > 0) && (requested > actual)) {
1058         pacer()->unpace_for_alloc(pacer_epoch, requested - actual);
1059       }


1060     }
1061   }
1062 
1063   return result;
1064 }
1065 
1066 HeapWord* ShenandoahHeap::allocate_memory_under_lock(ShenandoahAllocRequest& req, bool& in_new_region) {
1067   // If we are dealing with mutator allocation, then we may need to block for safepoint.
1068   // We cannot block for safepoint for GC allocations, because there is a high chance
1069   // we are already running at safepoint or from stack watermark machinery, and we cannot
1070   // block again.
1071   ShenandoahHeapLocker locker(lock(), req.is_mutator_alloc());
1072 
1073   // Make sure the old generation has room for either evacuations or promotions before trying to allocate.
1074   if (req.is_old() && !old_generation()->can_allocate(req)) {
1075     return nullptr;
1076   }
1077 
1078   // If TLAB request size is greater than available, allocate() will attempt to downsize request to fit within available
1079   // memory.
1080   HeapWord* result = _free_set->allocate(req, in_new_region);
1081 
1082   // Record the plab configuration for this result and register the object.
1083   if (result != nullptr && req.is_old()) {
1084     old_generation()->configure_plab_for_current_thread(req);
1085     if (req.type() == ShenandoahAllocRequest::_alloc_shared_gc) {
1086       // Register the newly allocated object while we're holding the global lock since there's no synchronization
1087       // built in to the implementation of register_object().  There are potential races when multiple independent
1088       // threads are allocating objects, some of which might span the same card region.  For example, consider
1089       // a card table's memory region within which three objects are being allocated by three different threads:
1090       //
1091       // objects being "concurrently" allocated:
1092       //    [-----a------][-----b-----][--------------c------------------]
1093       //            [---- card table memory range --------------]
1094       //
1095       // Before any objects are allocated, this card's memory range holds no objects.  Note that allocation of object a
1096       // wants to set the starts-object, first-start, and last-start attributes of the preceding card region.
1097       // Allocation of object b wants to set the starts-object, first-start, and last-start attributes of this card region.
1098       // Allocation of object c also wants to set the starts-object, first-start, and last-start attributes of this
1099       // card region.
1100       //
1101       // The thread allocating b and the thread allocating c can "race" in various ways, resulting in confusion, such as
1102       // last-start representing object b while first-start represents object c.  This is why we need to require all
1103       // register_object() invocations to be "mutually exclusive" with respect to each card's memory range.
1104       old_generation()->card_scan()->register_object(result);
1105     }
1106   }
1107 
1108   return result;
1109 }
1110 
1111 HeapWord* ShenandoahHeap::mem_allocate(size_t size,
1112                                         bool*  gc_overhead_limit_was_exceeded) {
1113   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared(size);
1114   return allocate_memory(req);
1115 }
1116 
1117 MetaWord* ShenandoahHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
1118                                                              size_t size,
1119                                                              Metaspace::MetadataType mdtype) {
1120   MetaWord* result;
1121 
1122   // Inform metaspace OOM to GC heuristics if class unloading is possible.
1123   ShenandoahHeuristics* h = global_generation()->heuristics();
1124   if (h->can_unload_classes()) {
1125     h->record_metaspace_oom();
1126   }
1127 
1128   // Expand and retry allocation
1129   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
1130   if (result != nullptr) {
1131     return result;
1132   }
1133 
1134   // Start full GC
1135   collect(GCCause::_metadata_GC_clear_soft_refs);
1136 
1137   // Retry allocation
1138   result = loader_data->metaspace_non_null()->allocate(size, mdtype);
1139   if (result != nullptr) {
1140     return result;
1141   }
1142 
1143   // Expand and retry allocation
1144   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);

1202       assert(r->has_live(), "Region " SIZE_FORMAT " should have been reclaimed early", r->index());
1203       _sh->marked_object_iterate(r, &cl);
1204 
1205       if (ShenandoahPacing) {
1206         _sh->pacer()->report_evac(r->used() >> LogHeapWordSize);
1207       }
1208 
1209       if (_sh->check_cancelled_gc_and_yield(_concurrent)) {
1210         break;
1211       }
1212     }
1213   }
1214 };
1215 
1216 void ShenandoahHeap::evacuate_collection_set(bool concurrent) {
1217   ShenandoahEvacuationTask task(this, _collection_set, concurrent);
1218   workers()->run_task(&task);
1219 }
1220 
1221 oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) {
1222   assert(thread == Thread::current(), "Expected thread parameter to be current thread.");
1223   if (ShenandoahThreadLocalData::is_oom_during_evac(thread)) {
1224     // This thread went through the OOM during evac protocol. It is safe to return
1225     // the forward pointer. It must not attempt to evacuate any other objects.
1226     return ShenandoahBarrierSet::resolve_forwarded(p);
1227   }
1228 
1229   assert(ShenandoahThreadLocalData::is_evac_allowed(thread), "must be enclosed in oom-evac scope");
1230 
1231   ShenandoahHeapRegion* r = heap_region_containing(p);
1232   assert(!r->is_humongous(), "never evacuate humongous objects");
1233 
1234   ShenandoahAffiliation target_gen = r->affiliation();
1235   return try_evacuate_object(p, thread, r, target_gen);
1236 }
1237 
1238 oop ShenandoahHeap::try_evacuate_object(oop p, Thread* thread, ShenandoahHeapRegion* from_region,
1239                                                ShenandoahAffiliation target_gen) {
1240   assert(target_gen == YOUNG_GENERATION, "Only expect evacuations to young in this mode");
1241   assert(from_region->is_young(), "Only expect evacuations from young in this mode");
1242   bool alloc_from_lab = true;
1243   HeapWord* copy = nullptr;
1244   size_t size = ShenandoahForwarding::size(p);
1245 
1246 #ifdef ASSERT
1247   if (ShenandoahOOMDuringEvacALot &&
1248       (os::random() & 1) == 0) { // Simulate OOM every ~2nd slow-path call
1249     copy = nullptr;
1250   } else {
1251 #endif
1252     if (UseTLAB) {
1253       copy = allocate_from_gclab(thread, size);
1254     }
1255     if (copy == nullptr) {
1256       // If we failed to allocate in LAB, we'll try a shared allocation.
1257       ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared_gc(size, target_gen);
1258       copy = allocate_memory(req);
1259       alloc_from_lab = false;
1260     }
1261 #ifdef ASSERT
1262   }
1263 #endif
1264 
1265   if (copy == nullptr) {
1266     control_thread()->handle_alloc_failure_evac(size);
1267 
1268     _oom_evac_handler.handle_out_of_memory_during_evacuation();
1269 
1270     return ShenandoahBarrierSet::resolve_forwarded(p);
1271   }
1272 
1273   // Copy the object:
1274   Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(p), copy, size);
1275 
1276   // Try to install the new forwarding pointer.
1277   oop copy_val = cast_to_oop(copy);
1278   oop result = ShenandoahForwarding::try_update_forwardee(p, copy_val);
1279   if (result == copy_val) {
1280     // Successfully evacuated. Our copy is now the public one!
1281     ContinuationGCSupport::relativize_stack_chunk(copy_val);
1282     shenandoah_assert_correct(nullptr, copy_val);
1283     return copy_val;
1284   }  else {
1285     // Failed to evacuate. We need to deal with the object that is left behind. Since this
1286     // new allocation is certainly after TAMS, it will be considered live in the next cycle.
1287     // But if it happens to contain references to evacuated regions, those references would
1288     // not get updated for this stale copy during this cycle, and we will crash while scanning
1289     // it the next cycle.
1290     if (alloc_from_lab) {
1291       // For LAB allocations, it is enough to rollback the allocation ptr. Either the next
1292       // object will overwrite this stale copy, or the filler object on LAB retirement will
1293       // do this.



1294       ShenandoahThreadLocalData::gclab(thread)->undo_allocation(copy, size);
1295     } else {
1296       // For non-LAB allocations, we have no way to retract the allocation, and
1297       // have to explicitly overwrite the copy with the filler object. With that overwrite,
1298       // we have to keep the fwdptr initialized and pointing to our (stale) copy.
1299       assert(size >= ShenandoahHeap::min_fill_size(), "previously allocated object known to be larger than min_size");
1300       fill_with_object(copy, size);
1301       shenandoah_assert_correct(nullptr, copy_val);
1302       // For non-LAB allocations, the object has already been registered
1303     }
1304     shenandoah_assert_correct(nullptr, result);
1305     return result;
1306   }
1307 }
1308 
1309 void ShenandoahHeap::trash_cset_regions() {
1310   ShenandoahHeapLocker locker(lock());
1311 
1312   ShenandoahCollectionSet* set = collection_set();
1313   ShenandoahHeapRegion* r;
1314   set->clear_current_index();
1315   while ((r = set->next()) != nullptr) {
1316     r->make_trash();
1317   }
1318   collection_set()->clear();
1319 }
1320 
1321 void ShenandoahHeap::print_heap_regions_on(outputStream* st) const {
1322   st->print_cr("Heap Regions:");
1323   st->print_cr("Region state: EU=empty-uncommitted, EC=empty-committed, R=regular, H=humongous start, HP=pinned humongous start");
1324   st->print_cr("              HC=humongous continuation, CS=collection set, TR=trash, P=pinned, CSP=pinned collection set");
1325   st->print_cr("BTE=bottom/top/end, TAMS=top-at-mark-start");
1326   st->print_cr("UWM=update watermark, U=used");
1327   st->print_cr("T=TLAB allocs, G=GCLAB allocs");
1328   st->print_cr("S=shared allocs, L=live data");
1329   st->print_cr("CP=critical pins");
1330 
1331   for (size_t i = 0; i < num_regions(); i++) {
1332     get_region(i)->print_on(st);
1333   }
1334 }
1335 
1336 size_t ShenandoahHeap::trash_humongous_region_at(ShenandoahHeapRegion* start) {
1337   assert(start->is_humongous_start(), "reclaim regions starting with the first one");
1338 
1339   oop humongous_obj = cast_to_oop(start->bottom());
1340   size_t size = humongous_obj->size();
1341   size_t required_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize);
1342   size_t index = start->index() + required_regions - 1;
1343 
1344   assert(!start->has_live(), "liveness must be zero");
1345 
1346   for(size_t i = 0; i < required_regions; i++) {
1347     // Reclaim from tail. Otherwise, assertion fails when printing region to trace log,
1348     // as it expects that every region belongs to a humongous region starting with a humongous start region.
1349     ShenandoahHeapRegion* region = get_region(index --);
1350 
1351     assert(region->is_humongous(), "expect correct humongous start or continuation");
1352     assert(!region->is_cset(), "Humongous region should not be in collection set");
1353 
1354     region->make_trash_immediate();
1355   }
1356   return required_regions;
1357 }
1358 
1359 class ShenandoahCheckCleanGCLABClosure : public ThreadClosure {
1360 public:
1361   ShenandoahCheckCleanGCLABClosure() {}
1362   void do_thread(Thread* thread) {
1363     PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1364     assert(gclab != nullptr, "GCLAB should be initialized for %s", thread->name());
1365     assert(gclab->words_remaining() == 0, "GCLAB should not need retirement");
1366 
1367     if (ShenandoahHeap::heap()->mode()->is_generational()) {
1368       PLAB* plab = ShenandoahThreadLocalData::plab(thread);
1369       assert(plab != nullptr, "PLAB should be initialized for %s", thread->name());
1370       assert(plab->words_remaining() == 0, "PLAB should not need retirement");
1371     }
1372   }
1373 };
1374 
1375 class ShenandoahRetireGCLABClosure : public ThreadClosure {
1376 private:
1377   bool const _resize;
1378 public:
1379   ShenandoahRetireGCLABClosure(bool resize) : _resize(resize) {}
1380   void do_thread(Thread* thread) {
1381     PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1382     assert(gclab != nullptr, "GCLAB should be initialized for %s", thread->name());
1383     gclab->retire();
1384     if (_resize && ShenandoahThreadLocalData::gclab_size(thread) > 0) {
1385       ShenandoahThreadLocalData::set_gclab_size(thread, 0);
1386     }
1387 
1388     if (ShenandoahHeap::heap()->mode()->is_generational()) {
1389       PLAB* plab = ShenandoahThreadLocalData::plab(thread);
1390       assert(plab != nullptr, "PLAB should be initialized for %s", thread->name());
1391 
1392       // There are two reasons to retire all plabs between old-gen evacuation passes.
1393       //  1. We need to make the plab memory parsable by remembered-set scanning.
1394       //  2. We need to establish a trustworthy UpdateWaterMark value within each old-gen heap region
1395       ShenandoahGenerationalHeap::heap()->retire_plab(plab, thread);
1396       if (_resize && ShenandoahThreadLocalData::plab_size(thread) > 0) {
1397         ShenandoahThreadLocalData::set_plab_size(thread, 0);
1398       }
1399     }
1400   }
1401 };
1402 
1403 void ShenandoahHeap::labs_make_parsable() {
1404   assert(UseTLAB, "Only call with UseTLAB");
1405 
1406   ShenandoahRetireGCLABClosure cl(false);
1407 
1408   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1409     ThreadLocalAllocBuffer& tlab = t->tlab();
1410     tlab.make_parsable();
1411     cl.do_thread(t);
1412   }
1413 
1414   workers()->threads_do(&cl);
1415 }
1416 
1417 void ShenandoahHeap::tlabs_retire(bool resize) {
1418   assert(UseTLAB, "Only call with UseTLAB");
1419   assert(!resize || ResizeTLAB, "Only call for resize when ResizeTLAB is enabled");

1513 }
1514 
1515 void ShenandoahHeap::print_tracing_info() const {
1516   LogTarget(Info, gc, stats) lt;
1517   if (lt.is_enabled()) {
1518     ResourceMark rm;
1519     LogStream ls(lt);
1520 
1521     phase_timings()->print_global_on(&ls);
1522 
1523     ls.cr();
1524     ls.cr();
1525 
1526     shenandoah_policy()->print_gc_stats(&ls);
1527 
1528     ls.cr();
1529     ls.cr();
1530   }
1531 }
1532 
1533 void ShenandoahHeap::set_gc_generation(ShenandoahGeneration* generation) {
1534   shenandoah_assert_control_or_vm_thread_at_safepoint();
1535   _gc_generation = generation;
1536 }
1537 
1538 // Active generation may only be set by the VM thread at a safepoint.
1539 void ShenandoahHeap::set_active_generation() {
1540   assert(Thread::current()->is_VM_thread(), "Only the VM Thread");
1541   assert(SafepointSynchronize::is_at_safepoint(), "Only at a safepoint!");
1542   assert(_gc_generation != nullptr, "Will set _active_generation to nullptr");
1543   _active_generation = _gc_generation;
1544 }
1545 
1546 void ShenandoahHeap::on_cycle_start(GCCause::Cause cause, ShenandoahGeneration* generation) {
1547   shenandoah_policy()->record_collection_cause(cause);
1548 
1549   assert(gc_cause()  == GCCause::_no_gc, "Over-writing cause");
1550   assert(_gc_generation == nullptr, "Over-writing _gc_generation");
1551 
1552   set_gc_cause(cause);
1553   set_gc_generation(generation);
1554 
1555   generation->heuristics()->record_cycle_start();
1556 }
1557 
1558 void ShenandoahHeap::on_cycle_end(ShenandoahGeneration* generation) {
1559   assert(gc_cause() != GCCause::_no_gc, "cause wasn't set");
1560   assert(_gc_generation != nullptr, "_gc_generation wasn't set");
1561 
1562   generation->heuristics()->record_cycle_end();
1563   if (mode()->is_generational() && generation->is_global()) {
1564     // If we just completed a GLOBAL GC, claim credit for completion of young-gen and old-gen GC as well
1565     young_generation()->heuristics()->record_cycle_end();
1566     old_generation()->heuristics()->record_cycle_end();
1567   }
1568 
1569   set_gc_generation(nullptr);
1570   set_gc_cause(GCCause::_no_gc);
1571 }
1572 
1573 void ShenandoahHeap::verify(VerifyOption vo) {
1574   if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
1575     if (ShenandoahVerify) {
1576       verifier()->verify_generic(vo);
1577     } else {
1578       // TODO: Consider allocating verification bitmaps on demand,
1579       // and turn this on unconditionally.
1580     }
1581   }
1582 }
1583 size_t ShenandoahHeap::tlab_capacity(Thread *thr) const {
1584   return _free_set->capacity();
1585 }
1586 
1587 class ObjectIterateScanRootClosure : public BasicOopIterateClosure {
1588 private:
1589   MarkBitMap* _bitmap;
1590   ShenandoahScanObjectStack* _oop_stack;
1591   ShenandoahHeap* const _heap;
1592   ShenandoahMarkingContext* const _marking_context;

1902   } else {
1903     heap_region_iterate(blk);
1904   }
1905 }
1906 
1907 class ShenandoahRendezvousClosure : public HandshakeClosure {
1908 public:
1909   inline ShenandoahRendezvousClosure(const char* name) : HandshakeClosure(name) {}
1910   inline void do_thread(Thread* thread) {}
1911 };
1912 
1913 void ShenandoahHeap::rendezvous_threads(const char* name) {
1914   ShenandoahRendezvousClosure cl(name);
1915   Handshake::execute(&cl);
1916 }
1917 
1918 void ShenandoahHeap::recycle_trash() {
1919   free_set()->recycle_trash();
1920 }
1921 



































































































1922 void ShenandoahHeap::do_class_unloading() {
1923   _unloader.unload();
1924   if (mode()->is_generational()) {
1925     old_generation()->set_parsable(false);
1926   }
1927 }
1928 
1929 void ShenandoahHeap::stw_weak_refs(bool full_gc) {
1930   // Weak refs processing
1931   ShenandoahPhaseTimings::Phase phase = full_gc ? ShenandoahPhaseTimings::full_gc_weakrefs
1932                                                 : ShenandoahPhaseTimings::degen_gc_weakrefs;
1933   ShenandoahTimingsTracker t(phase);
1934   ShenandoahGCWorkerPhase worker_phase(phase);
1935   shenandoah_assert_generations_reconciled();
1936   gc_generation()->ref_processor()->process_references(phase, workers(), false /* concurrent */);
1937 }
1938 
1939 void ShenandoahHeap::prepare_update_heap_references(bool concurrent) {
1940   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
1941 
1942   // Evacuation is over, no GCLABs are needed anymore. GCLABs are under URWM, so we need to
1943   // make them parsable for update code to work correctly. Plus, we can compute new sizes
1944   // for future GCLABs here.
1945   if (UseTLAB) {
1946     ShenandoahGCPhase phase(concurrent ?
1947                             ShenandoahPhaseTimings::init_update_refs_manage_gclabs :
1948                             ShenandoahPhaseTimings::degen_gc_init_update_refs_manage_gclabs);
1949     gclabs_retire(ResizeTLAB);
1950   }
1951 
1952   _update_refs_iterator.reset();
1953 }
1954 
1955 void ShenandoahHeap::propagate_gc_state_to_java_threads() {
1956   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at Shenandoah safepoint");
1957   if (_gc_state_changed) {
1958     _gc_state_changed = false;
1959     char state = gc_state();
1960     for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1961       ShenandoahThreadLocalData::set_gc_state(t, state);
1962     }
1963   }
1964 }
1965 
1966 void ShenandoahHeap::set_gc_state(uint mask, bool value) {
1967   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at Shenandoah safepoint");
1968   _gc_state.set_cond(mask, value);
1969   _gc_state_changed = true;
1970   // Check that if concurrent weak root is set then active_gen isn't null
1971   assert(!is_concurrent_weak_root_in_progress() || active_generation() != nullptr, "Error");
1972   shenandoah_assert_generations_reconciled();
1973 }
1974 
1975 void ShenandoahHeap::set_concurrent_young_mark_in_progress(bool in_progress) {
1976   uint mask;
1977   assert(!has_forwarded_objects(), "Young marking is not concurrent with evacuation");
1978   if (!in_progress && is_concurrent_old_mark_in_progress()) {
1979     assert(mode()->is_generational(), "Only generational GC has old marking");
1980     assert(_gc_state.is_set(MARKING), "concurrent_old_marking_in_progress implies MARKING");
1981     // If old-marking is in progress when we turn off YOUNG_MARKING, leave MARKING (and OLD_MARKING) on
1982     mask = YOUNG_MARKING;
1983   } else {
1984     mask = MARKING | YOUNG_MARKING;
1985   }
1986   set_gc_state(mask, in_progress);
1987   manage_satb_barrier(in_progress);
1988 }
1989 
1990 void ShenandoahHeap::set_concurrent_old_mark_in_progress(bool in_progress) {
1991 #ifdef ASSERT
1992   // has_forwarded_objects() iff UPDATEREFS or EVACUATION
1993   bool has_forwarded = has_forwarded_objects();
1994   bool updating_or_evacuating = _gc_state.is_set(UPDATEREFS | EVACUATION);
1995   bool evacuating = _gc_state.is_set(EVACUATION);
1996   assert ((has_forwarded == updating_or_evacuating) || (evacuating && !has_forwarded && collection_set()->is_empty()),
1997           "Updating or evacuating iff has forwarded objects, or if evacuation phase is promoting in place without forwarding");
1998 #endif
1999   if (!in_progress && is_concurrent_young_mark_in_progress()) {
2000     // If young-marking is in progress when we turn off OLD_MARKING, leave MARKING (and YOUNG_MARKING) on
2001     assert(_gc_state.is_set(MARKING), "concurrent_young_marking_in_progress implies MARKING");
2002     set_gc_state(OLD_MARKING, in_progress);
2003   } else {
2004     set_gc_state(MARKING | OLD_MARKING, in_progress);
2005   }
2006   manage_satb_barrier(in_progress);
2007 }
2008 
2009 bool ShenandoahHeap::is_prepare_for_old_mark_in_progress() const {
2010   return old_generation()->is_preparing_for_mark();
2011 }
2012 
2013 void ShenandoahHeap::manage_satb_barrier(bool active) {
2014   if (is_concurrent_mark_in_progress()) {
2015     // Ignore request to deactivate barrier while concurrent mark is in progress.
2016     // Do not attempt to re-activate the barrier if it is already active.
2017     if (active && !ShenandoahBarrierSet::satb_mark_queue_set().is_active()) {
2018       ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(active, !active);
2019     }
2020   } else {
2021     // No concurrent marking is in progress so honor request to deactivate,
2022     // but only if the barrier is already active.
2023     if (!active && ShenandoahBarrierSet::satb_mark_queue_set().is_active()) {
2024       ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(active, !active);
2025     }
2026   }
2027 }
2028 
2029 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
2030   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only call this at safepoint");
2031   set_gc_state(EVACUATION, in_progress);
2032 }
2033 
2034 void ShenandoahHeap::set_concurrent_strong_root_in_progress(bool in_progress) {
2035   if (in_progress) {
2036     _concurrent_strong_root_in_progress.set();
2037   } else {
2038     _concurrent_strong_root_in_progress.unset();
2039   }
2040 }
2041 
2042 void ShenandoahHeap::set_concurrent_weak_root_in_progress(bool cond) {
2043   set_gc_state(WEAK_ROOTS, cond);
2044 }
2045 
2046 GCTracer* ShenandoahHeap::tracer() {
2047   return shenandoah_policy()->tracer();
2048 }
2049 
2050 size_t ShenandoahHeap::tlab_used(Thread* thread) const {
2051   return _free_set->used();
2052 }
2053 
2054 bool ShenandoahHeap::try_cancel_gc() {
2055   jbyte prev = _cancelled_gc.cmpxchg(CANCELLED, CANCELLABLE);
2056   return prev == CANCELLABLE;
2057 }
2058 
2059 void ShenandoahHeap::cancel_concurrent_mark() {
2060   if (mode()->is_generational()) {
2061     young_generation()->cancel_marking();
2062     old_generation()->cancel_marking();
2063   }
2064 
2065   global_generation()->cancel_marking();
2066 
2067   ShenandoahBarrierSet::satb_mark_queue_set().abandon_partial_marking();
2068 }
2069 
2070 void ShenandoahHeap::cancel_gc(GCCause::Cause cause) {
2071   if (try_cancel_gc()) {
2072     FormatBuffer<> msg("Cancelling GC: %s", GCCause::to_string(cause));
2073     log_info(gc)("%s", msg.buffer());
2074     Events::log(Thread::current(), "%s", msg.buffer());
2075     _cancel_requested_time = os::elapsedTime();
2076   }
2077 }
2078 
2079 uint ShenandoahHeap::max_workers() {
2080   return _max_workers;
2081 }
2082 
2083 void ShenandoahHeap::stop() {
2084   // The shutdown sequence should be able to terminate when GC is running.
2085 
2086   // Step 0. Notify policy to disable event recording and prevent visiting gc threads during shutdown
2087   _shenandoah_policy->record_shutdown();
2088 
2089   // Step 1. Notify control thread that we are in shutdown.
2090   // Note that we cannot do that with stop(), because stop() is blocking and waits for the actual shutdown.
2091   // Doing stop() here would wait for the normal GC cycle to complete, never falling through to cancel below.
2092   control_thread()->prepare_for_graceful_shutdown();
2093 
2094   // Step 2. Notify GC workers that we are cancelling GC.
2095   cancel_gc(GCCause::_shenandoah_stop_vm);

2179 }
2180 
2181 void ShenandoahHeap::set_has_forwarded_objects(bool cond) {
2182   set_gc_state(HAS_FORWARDED, cond);
2183 }
2184 
2185 void ShenandoahHeap::set_unload_classes(bool uc) {
2186   _unload_classes.set_cond(uc);
2187 }
2188 
2189 bool ShenandoahHeap::unload_classes() const {
2190   return _unload_classes.is_set();
2191 }
2192 
2193 address ShenandoahHeap::in_cset_fast_test_addr() {
2194   ShenandoahHeap* heap = ShenandoahHeap::heap();
2195   assert(heap->collection_set() != nullptr, "Sanity");
2196   return (address) heap->collection_set()->biased_map_address();
2197 }
2198 




2199 void ShenandoahHeap::reset_bytes_allocated_since_gc_start() {
2200   if (mode()->is_generational()) {
2201     young_generation()->reset_bytes_allocated_since_gc_start();
2202     old_generation()->reset_bytes_allocated_since_gc_start();
2203   }
2204 
2205   global_generation()->reset_bytes_allocated_since_gc_start();
2206 }
2207 
2208 void ShenandoahHeap::set_degenerated_gc_in_progress(bool in_progress) {
2209   _degenerated_gc_in_progress.set_cond(in_progress);
2210 }
2211 
2212 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) {
2213   _full_gc_in_progress.set_cond(in_progress);
2214 }
2215 
2216 void ShenandoahHeap::set_full_gc_move_in_progress(bool in_progress) {
2217   assert (is_full_gc_in_progress(), "should be");
2218   _full_gc_move_in_progress.set_cond(in_progress);
2219 }
2220 
2221 void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) {
2222   set_gc_state(UPDATEREFS, in_progress);
2223 }
2224 
2225 void ShenandoahHeap::register_nmethod(nmethod* nm) {

2249     if (r->is_active()) {
2250       if (r->is_pinned()) {
2251         if (r->pin_count() == 0) {
2252           r->make_unpinned();
2253         }
2254       } else {
2255         if (r->pin_count() > 0) {
2256           r->make_pinned();
2257         }
2258       }
2259     }
2260   }
2261 
2262   assert_pinned_region_status();
2263 }
2264 
2265 #ifdef ASSERT
2266 void ShenandoahHeap::assert_pinned_region_status() {
2267   for (size_t i = 0; i < num_regions(); i++) {
2268     ShenandoahHeapRegion* r = get_region(i);
2269     shenandoah_assert_generations_reconciled();
2270     if (gc_generation()->contains(r)) {
2271       assert((r->is_pinned() && r->pin_count() > 0) || (!r->is_pinned() && r->pin_count() == 0),
2272              "Region " SIZE_FORMAT " pinning status is inconsistent", i);
2273     }
2274   }
2275 }
2276 #endif
2277 
2278 ConcurrentGCTimer* ShenandoahHeap::gc_timer() const {
2279   return _gc_timer;
2280 }
2281 
2282 void ShenandoahHeap::prepare_concurrent_roots() {
2283   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2284   assert(!is_stw_gc_in_progress(), "Only concurrent GC");
2285   set_concurrent_strong_root_in_progress(!collection_set()->is_empty());
2286   set_concurrent_weak_root_in_progress(true);
2287   if (unload_classes()) {
2288     _unloader.prepare();
2289   }
2290 }
2291 
2292 void ShenandoahHeap::finish_concurrent_roots() {
2293   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");

2308   } else {
2309     // Use ConcGCThreads outside safepoints
2310     assert(nworkers == ConcGCThreads, "Use ConcGCThreads (%u) outside safepoints, %u",
2311            ConcGCThreads, nworkers);
2312   }
2313 }
2314 #endif
2315 
2316 ShenandoahVerifier* ShenandoahHeap::verifier() {
2317   guarantee(ShenandoahVerify, "Should be enabled");
2318   assert (_verifier != nullptr, "sanity");
2319   return _verifier;
2320 }
2321 
2322 template<bool CONCURRENT>
2323 class ShenandoahUpdateHeapRefsTask : public WorkerTask {
2324 private:
2325   ShenandoahHeap* _heap;
2326   ShenandoahRegionIterator* _regions;
2327 public:
2328   explicit ShenandoahUpdateHeapRefsTask(ShenandoahRegionIterator* regions) :
2329     WorkerTask("Shenandoah Update References"),
2330     _heap(ShenandoahHeap::heap()),
2331     _regions(regions) {
2332   }
2333 
2334   void work(uint worker_id) {
2335     if (CONCURRENT) {
2336       ShenandoahConcurrentWorkerSession worker_session(worker_id);
2337       ShenandoahSuspendibleThreadSetJoiner stsj;
2338       do_work<ShenandoahConcUpdateRefsClosure>(worker_id);
2339     } else {
2340       ShenandoahParallelWorkerSession worker_session(worker_id);
2341       do_work<ShenandoahNonConcUpdateRefsClosure>(worker_id);
2342     }
2343   }
2344 
2345 private:
2346   template<class T>
2347   void do_work(uint worker_id) {

2348     if (CONCURRENT && (worker_id == 0)) {
2349       // We ask the first worker to replenish the Mutator free set by moving regions previously reserved to hold the
2350       // results of evacuation.  These reserves are no longer necessary because evacuation has completed.
2351       size_t cset_regions = _heap->collection_set()->count();
2352 
2353       // Now that evacuation is done, we can reassign any regions that had been reserved to hold the results of evacuation
2354       // to the mutator free set.  At the end of GC, we will have cset_regions newly evacuated fully empty regions from
2355       // which we will be able to replenish the Collector free set and the OldCollector free set in preparation for the
2356       // next GC cycle.
2357       _heap->free_set()->move_regions_from_collector_to_mutator(cset_regions);
2358     }
2359     // If !CONCURRENT, there's no value in expanding Mutator free set
2360     T cl;
2361     ShenandoahHeapRegion* r = _regions->next();

2362     while (r != nullptr) {
2363       HeapWord* update_watermark = r->get_update_watermark();
2364       assert (update_watermark >= r->bottom(), "sanity");
2365       if (r->is_active() && !r->is_cset()) {
2366         _heap->marked_object_oop_iterate(r, &cl, update_watermark);
2367         if (ShenandoahPacing) {
2368           _heap->pacer()->report_updaterefs(pointer_delta(update_watermark, r->bottom()));
2369         }
2370       }
2371       if (_heap->check_cancelled_gc_and_yield(CONCURRENT)) {
2372         return;
2373       }
2374       r = _regions->next();
2375     }
2376   }
2377 };
2378 
2379 void ShenandoahHeap::update_heap_references(bool concurrent) {
2380   assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
2381 
2382   if (concurrent) {
2383     ShenandoahUpdateHeapRefsTask<true> task(&_update_refs_iterator);
2384     workers()->run_task(&task);
2385   } else {
2386     ShenandoahUpdateHeapRefsTask<false> task(&_update_refs_iterator);
2387     workers()->run_task(&task);
2388   }
2389 }
2390 
2391 ShenandoahSynchronizePinnedRegionStates::ShenandoahSynchronizePinnedRegionStates() : _lock(ShenandoahHeap::heap()->lock()) { }
2392 
2393 void ShenandoahSynchronizePinnedRegionStates::heap_region_do(ShenandoahHeapRegion* r) {
2394   // Drop "pinned" state from regions that no longer have a pinned count. Put
2395   // regions with a pinned count into the "pinned" state.
2396   if (r->is_active()) {
2397     if (r->is_pinned()) {
2398       if (r->pin_count() == 0) {
2399         ShenandoahHeapLocker locker(_lock);
2400         r->make_unpinned();
2401       }
2402     } else {
2403       if (r->pin_count() > 0) {
2404         ShenandoahHeapLocker locker(_lock);
2405         r->make_pinned();









2406       }
2407     }
2408   }
2409 }


2410 
2411 void ShenandoahHeap::update_heap_region_states(bool concurrent) {
2412   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2413   assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
2414 
2415   {
2416     ShenandoahGCPhase phase(concurrent ?
2417                             ShenandoahPhaseTimings::final_update_refs_update_region_states :
2418                             ShenandoahPhaseTimings::degen_gc_final_update_refs_update_region_states);
2419 
2420     final_update_refs_update_region_states();
2421 
2422     assert_pinned_region_status();
2423   }
2424 
2425   {
2426     ShenandoahGCPhase phase(concurrent ?
2427                             ShenandoahPhaseTimings::final_update_refs_trash_cset :
2428                             ShenandoahPhaseTimings::degen_gc_final_update_refs_trash_cset);
2429     trash_cset_regions();
2430   }
2431 }
2432 
2433 void ShenandoahHeap::final_update_refs_update_region_states() {
2434   ShenandoahSynchronizePinnedRegionStates cl;
2435   parallel_heap_region_iterate(&cl);
2436 }
2437 
2438 void ShenandoahHeap::rebuild_free_set(bool concurrent) {
2439   ShenandoahGCPhase phase(concurrent ?
2440                           ShenandoahPhaseTimings::final_update_refs_rebuild_freeset :
2441                           ShenandoahPhaseTimings::degen_gc_final_update_refs_rebuild_freeset);
2442   ShenandoahHeapLocker locker(lock());
2443   size_t young_cset_regions, old_cset_regions;
2444   size_t first_old_region, last_old_region, old_region_count;
2445   _free_set->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old_region, last_old_region, old_region_count);
2446   // If there are no old regions, first_old_region will be greater than last_old_region
2447   assert((first_old_region > last_old_region) ||
2448          ((last_old_region + 1 - first_old_region >= old_region_count) &&
2449           get_region(first_old_region)->is_old() && get_region(last_old_region)->is_old()),
2450          "sanity: old_region_count: " SIZE_FORMAT ", first_old_region: " SIZE_FORMAT ", last_old_region: " SIZE_FORMAT,
2451          old_region_count, first_old_region, last_old_region);
2452 
2453   if (mode()->is_generational()) {
2454 #ifdef ASSERT
2455     if (ShenandoahVerify) {
2456       verifier()->verify_before_rebuilding_free_set();
2457     }
2458 #endif
2459 
2460     // The computation of bytes_of_allocation_runway_before_gc_trigger is quite conservative so consider all of this
2461     // available for transfer to old. Note that transfer of humongous regions does not impact available.
2462     ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::heap();
2463     size_t allocation_runway = gen_heap->young_generation()->heuristics()->bytes_of_allocation_runway_before_gc_trigger(young_cset_regions);
2464     gen_heap->compute_old_generation_balance(allocation_runway, old_cset_regions);
2465 
2466     // Total old_available may have been expanded to hold anticipated promotions.  We trigger if the fragmented available
2467     // memory represents more than 16 regions worth of data.  Note that fragmentation may increase when we promote regular
2468     // regions in place when many of these regular regions have an abundant amount of available memory within them.  Fragmentation
2469     // will decrease as promote-by-copy consumes the available memory within these partially consumed regions.
2470     //
2471     // We consider old-gen to have excessive fragmentation if more than 12.5% of old-gen is free memory that resides
2472     // within partially consumed regions of memory.
2473   }
2474   // Rebuild free set based on adjusted generation sizes.
2475   _free_set->finish_rebuild(young_cset_regions, old_cset_regions, old_region_count);
2476 
2477   if (mode()->is_generational()) {
2478     ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::heap();
2479     ShenandoahOldGeneration* old_gen = gen_heap->old_generation();
2480     old_gen->heuristics()->evaluate_triggers(first_old_region, last_old_region, old_region_count, num_regions());
2481   }
2482 }
2483 
2484 void ShenandoahHeap::print_extended_on(outputStream *st) const {
2485   print_on(st);
2486   st->cr();
2487   print_heap_regions_on(st);
2488 }
2489 
2490 bool ShenandoahHeap::is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self) {
2491   size_t slice = r->index() / _bitmap_regions_per_slice;
2492 
2493   size_t regions_from = _bitmap_regions_per_slice * slice;
2494   size_t regions_to   = MIN2(num_regions(), _bitmap_regions_per_slice * (slice + 1));
2495   for (size_t g = regions_from; g < regions_to; g++) {
2496     assert (g / _bitmap_regions_per_slice == slice, "same slice");
2497     if (skip_self && g == r->index()) continue;
2498     if (get_region(g)->is_committed()) {
2499       return true;
2500     }

2583 void ShenandoahHeap::initialize_serviceability() {
2584   _memory_pool = new ShenandoahMemoryPool(this);
2585   _cycle_memory_manager.add_pool(_memory_pool);
2586   _stw_memory_manager.add_pool(_memory_pool);
2587 }
2588 
2589 GrowableArray<GCMemoryManager*> ShenandoahHeap::memory_managers() {
2590   GrowableArray<GCMemoryManager*> memory_managers(2);
2591   memory_managers.append(&_cycle_memory_manager);
2592   memory_managers.append(&_stw_memory_manager);
2593   return memory_managers;
2594 }
2595 
2596 GrowableArray<MemoryPool*> ShenandoahHeap::memory_pools() {
2597   GrowableArray<MemoryPool*> memory_pools(1);
2598   memory_pools.append(_memory_pool);
2599   return memory_pools;
2600 }
2601 
2602 MemoryUsage ShenandoahHeap::memory_usage() {
2603   return MemoryUsage(_initial_size, used(), committed(), max_capacity());
2604 }
2605 
2606 ShenandoahRegionIterator::ShenandoahRegionIterator() :
2607   _heap(ShenandoahHeap::heap()),
2608   _index(0) {}
2609 
2610 ShenandoahRegionIterator::ShenandoahRegionIterator(ShenandoahHeap* heap) :
2611   _heap(heap),
2612   _index(0) {}
2613 
2614 void ShenandoahRegionIterator::reset() {
2615   _index = 0;
2616 }
2617 
2618 bool ShenandoahRegionIterator::has_next() const {
2619   return _index < _heap->num_regions();
2620 }
2621 
2622 char ShenandoahHeap::gc_state() const {
2623   return _gc_state.raw_value();

2725   }
2726 
2727   // No unclaimed tail at the end of archive space.
2728   assert(cur == end,
2729          "Archive space should be fully used: " PTR_FORMAT " " PTR_FORMAT,
2730          p2i(cur), p2i(end));
2731 
2732   // Region bounds are good.
2733   ShenandoahHeapRegion* begin_reg = heap_region_containing(start);
2734   ShenandoahHeapRegion* end_reg = heap_region_containing(end);
2735   assert(begin_reg->is_regular(), "Must be");
2736   assert(end_reg->is_regular(), "Must be");
2737   assert(begin_reg->bottom() == start,
2738          "Must agree: archive-space-start: " PTR_FORMAT ", begin-region-bottom: " PTR_FORMAT,
2739          p2i(start), p2i(begin_reg->bottom()));
2740   assert(end_reg->top() == end,
2741          "Must agree: archive-space-end: " PTR_FORMAT ", end-region-top: " PTR_FORMAT,
2742          p2i(end), p2i(end_reg->top()));
2743 #endif
2744 }
2745 
2746 ShenandoahGeneration* ShenandoahHeap::generation_for(ShenandoahAffiliation affiliation) const {
2747   if (!mode()->is_generational()) {
2748     return global_generation();
2749   } else if (affiliation == YOUNG_GENERATION) {
2750     return young_generation();
2751   } else if (affiliation == OLD_GENERATION) {
2752     return old_generation();
2753   }
2754 
2755   ShouldNotReachHere();
2756   return nullptr;
2757 }
2758 
2759 void ShenandoahHeap::log_heap_status(const char* msg) const {
2760   if (mode()->is_generational()) {
2761     young_generation()->log_status(msg);
2762     old_generation()->log_status(msg);
2763   } else {
2764     global_generation()->log_status(msg);
2765   }
2766 }
2767 
< prev index next >