1 /*
   2  * Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "gc/parallel/objectStartArray.inline.hpp"
  26 #include "gc/parallel/parallelArguments.hpp"
  27 #include "gc/parallel/parallelInitLogger.hpp"
  28 #include "gc/parallel/parallelScavengeHeap.inline.hpp"
  29 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
  30 #include "gc/parallel/psMemoryPool.hpp"
  31 #include "gc/parallel/psParallelCompact.inline.hpp"
  32 #include "gc/parallel/psParallelCompactNew.inline.hpp"
  33 #include "gc/parallel/psPromotionManager.hpp"
  34 #include "gc/parallel/psScavenge.hpp"
  35 #include "gc/parallel/psVMOperations.hpp"
  36 #include "gc/shared/barrierSetNMethod.hpp"
  37 #include "gc/shared/fullGCForwarding.inline.hpp"
  38 #include "gc/shared/gcHeapSummary.hpp"
  39 #include "gc/shared/gcLocker.inline.hpp"
  40 #include "gc/shared/gcWhen.hpp"
  41 #include "gc/shared/genArguments.hpp"
  42 #include "gc/shared/locationPrinter.inline.hpp"
  43 #include "gc/shared/scavengableNMethods.hpp"
  44 #include "gc/shared/suspendibleThreadSet.hpp"
  45 #include "logging/log.hpp"
  46 #include "memory/iterator.hpp"
  47 #include "memory/metaspaceCounters.hpp"
  48 #include "memory/metaspaceUtils.hpp"
  49 #include "memory/reservedSpace.hpp"
  50 #include "memory/universe.hpp"
  51 #include "oops/oop.inline.hpp"
  52 #include "runtime/cpuTimeCounters.hpp"
  53 #include "runtime/globals_extension.hpp"
  54 #include "runtime/handles.inline.hpp"
  55 #include "runtime/init.hpp"
  56 #include "runtime/java.hpp"
  57 #include "runtime/vmThread.hpp"
  58 #include "services/memoryManager.hpp"
  59 #include "utilities/macros.hpp"
  60 #include "utilities/vmError.hpp"
  61 
  62 PSYoungGen*  ParallelScavengeHeap::_young_gen = nullptr;
  63 PSOldGen*    ParallelScavengeHeap::_old_gen = nullptr;
  64 PSAdaptiveSizePolicy* ParallelScavengeHeap::_size_policy = nullptr;
  65 GCPolicyCounters* ParallelScavengeHeap::_gc_policy_counters = nullptr;
  66 size_t ParallelScavengeHeap::_desired_page_size = 0;
  67 
  68 jint ParallelScavengeHeap::initialize() {
  69   const size_t reserved_heap_size = ParallelArguments::heap_reserved_size_bytes();
  70 
  71   assert(_desired_page_size != 0, "Should be initialized");
  72   ReservedHeapSpace heap_rs = Universe::reserve_heap(reserved_heap_size, HeapAlignment, _desired_page_size);
  73   // Adjust SpaceAlignment based on actually used large page size.
  74   if (UseLargePages) {
  75     SpaceAlignment = MAX2(heap_rs.page_size(), default_space_alignment());
  76   }
  77   assert(is_aligned(SpaceAlignment, heap_rs.page_size()), "inv");
  78 
  79   trace_actual_reserved_page_size(reserved_heap_size, heap_rs);
  80 
  81   initialize_reserved_region(heap_rs);
  82   // Layout the reserved space for the generations.
  83   ReservedSpace old_rs   = heap_rs.first_part(MaxOldSize, SpaceAlignment);
  84   ReservedSpace young_rs = heap_rs.last_part(MaxOldSize, SpaceAlignment);
  85   assert(young_rs.size() == MaxNewSize, "Didn't reserve all of the heap");
  86 
  87   PSCardTable* card_table = new PSCardTable(_reserved);
  88   card_table->initialize(old_rs.base(), young_rs.base());
  89 
  90   CardTableBarrierSet* const barrier_set = new CardTableBarrierSet(card_table);
  91   BarrierSet::set_barrier_set(barrier_set);
  92 
  93   // Set up WorkerThreads
  94   _workers.initialize_workers();
  95 
  96   // Create and initialize the generations.
  97   _young_gen = new PSYoungGen(
  98       young_rs,
  99       NewSize,
 100       MinNewSize,
 101       MaxNewSize);
 102   _old_gen = new PSOldGen(
 103       old_rs,
 104       OldSize,
 105       MinOldSize,
 106       MaxOldSize);
 107 
 108   assert(young_gen()->max_gen_size() == young_rs.size(),"Consistency check");
 109   assert(old_gen()->max_gen_size() == old_rs.size(), "Consistency check");
 110 
 111   double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0;
 112 
 113   _size_policy = new PSAdaptiveSizePolicy(SpaceAlignment,
 114                                           max_gc_pause_sec);
 115 
 116   assert((old_gen()->virtual_space()->high_boundary() ==
 117           young_gen()->virtual_space()->low_boundary()),
 118          "Boundaries must meet");
 119   // initialize the policy counters - 2 collectors, 2 generations
 120   _gc_policy_counters = new GCPolicyCounters("ParScav:MSC", 2, 2);
 121 
 122   if (UseCompactObjectHeaders) {
 123     if (!PSParallelCompactNew::initialize_aux_data()) {
 124       return JNI_ENOMEM;
 125     }
 126   } else {
 127     if (!PSParallelCompact::initialize_aux_data()) {
 128       return JNI_ENOMEM;
 129     }
 130   }
 131 
 132   // Create CPU time counter
 133   CPUTimeCounters::create_counter(CPUTimeGroups::CPUTimeType::gc_parallel_workers);
 134 
 135   ParallelInitLogger::print();
 136 
 137   FullGCForwarding::initialize(_reserved);
 138 
 139   return JNI_OK;
 140 }
 141 
 142 void ParallelScavengeHeap::initialize_serviceability() {
 143 
 144   _eden_pool = new EdenMutableSpacePool(_young_gen,
 145                                         _young_gen->eden_space(),
 146                                         "PS Eden Space",
 147                                         false /* support_usage_threshold */);
 148 
 149   _survivor_pool = new SurvivorMutableSpacePool(_young_gen,
 150                                                 "PS Survivor Space",
 151                                                 false /* support_usage_threshold */);
 152 
 153   _old_pool = new PSGenerationPool(_old_gen,
 154                                    "PS Old Gen",
 155                                    true /* support_usage_threshold */);
 156 
 157   _young_manager = new GCMemoryManager("PS Scavenge");
 158   _old_manager = new GCMemoryManager("PS MarkSweep");
 159 
 160   _old_manager->add_pool(_eden_pool);
 161   _old_manager->add_pool(_survivor_pool);
 162   _old_manager->add_pool(_old_pool);
 163 
 164   _young_manager->add_pool(_eden_pool);
 165   _young_manager->add_pool(_survivor_pool);
 166 
 167 }
 168 
 169 class PSIsScavengable : public BoolObjectClosure {
 170   bool do_object_b(oop obj) {
 171     return ParallelScavengeHeap::heap()->is_in_young(obj);
 172   }
 173 };
 174 
 175 static PSIsScavengable _is_scavengable;
 176 
 177 void ParallelScavengeHeap::post_initialize() {
 178   CollectedHeap::post_initialize();
 179   // Need to init the tenuring threshold
 180   PSScavenge::initialize();
 181   if (UseCompactObjectHeaders) {
 182     PSParallelCompactNew::post_initialize();
 183   } else {
 184     PSParallelCompact::post_initialize();
 185   }
 186   PSPromotionManager::initialize();
 187 
 188   ScavengableNMethods::initialize(&_is_scavengable);
 189   GCLocker::initialize();
 190 }
 191 
 192 void ParallelScavengeHeap::gc_epilogue(bool full) {
 193   if (_is_heap_almost_full) {
 194     // Reset emergency state if eden is empty after a young/full gc
 195     if (_young_gen->eden_space()->is_empty()) {
 196       log_debug(gc)("Leaving memory constrained state; back to normal");
 197       _is_heap_almost_full = false;
 198     }
 199   } else {
 200     if (full && !_young_gen->eden_space()->is_empty()) {
 201       log_debug(gc)("Non-empty young-gen after full-gc; in memory constrained state");
 202       _is_heap_almost_full = true;
 203     }
 204   }
 205 }
 206 
 207 void ParallelScavengeHeap::update_counters() {
 208   young_gen()->update_counters();
 209   old_gen()->update_counters();
 210   MetaspaceCounters::update_performance_counters();
 211   update_parallel_worker_threads_cpu_time();
 212 }
 213 
 214 size_t ParallelScavengeHeap::capacity() const {
 215   size_t value = young_gen()->capacity_in_bytes() + old_gen()->capacity_in_bytes();
 216   return value;
 217 }
 218 
 219 size_t ParallelScavengeHeap::used() const {
 220   size_t value = young_gen()->used_in_bytes() + old_gen()->used_in_bytes();
 221   return value;
 222 }
 223 
 224 size_t ParallelScavengeHeap::max_capacity() const {
 225   size_t estimated = reserved_region().byte_size();
 226   if (UseAdaptiveSizePolicy) {
 227     estimated -= _size_policy->max_survivor_size(young_gen()->max_gen_size());
 228   } else {
 229     estimated -= young_gen()->to_space()->capacity_in_bytes();
 230   }
 231   return MAX2(estimated, capacity());
 232 }
 233 
 234 bool ParallelScavengeHeap::is_in(const void* p) const {
 235   return young_gen()->is_in(p) || old_gen()->is_in(p);
 236 }
 237 
 238 bool ParallelScavengeHeap::is_in_reserved(const void* p) const {
 239   return young_gen()->is_in_reserved(p) || old_gen()->is_in_reserved(p);
 240 }
 241 
 242 bool ParallelScavengeHeap::requires_barriers(stackChunkOop p) const {
 243   return !is_in_young(p);
 244 }
 245 
 246 // There are two levels of allocation policy here.
 247 //
 248 // When an allocation request fails, the requesting thread must invoke a VM
 249 // operation, transfer control to the VM thread, and await the results of a
 250 // garbage collection. That is quite expensive, and we should avoid doing it
 251 // multiple times if possible.
 252 //
 253 // To accomplish this, we have a basic allocation policy, and also a
 254 // failed allocation policy.
 255 //
 256 // The basic allocation policy controls how you allocate memory without
 257 // attempting garbage collection. It is okay to grab locks and
 258 // expand the heap, if that can be done without coming to a safepoint.
 259 // It is likely that the basic allocation policy will not be very
 260 // aggressive.
 261 //
 262 // The failed allocation policy is invoked from the VM thread after
 263 // the basic allocation policy is unable to satisfy a mem_allocate
 264 // request. This policy needs to cover the entire range of collection,
 265 // heap expansion, and out-of-memory conditions. It should make every
 266 // attempt to allocate the requested memory.
 267 
 268 // Basic allocation policy. Should never be called at a safepoint, or
 269 // from the VM thread.
 270 //
 271 // This method must handle cases where many mem_allocate requests fail
 272 // simultaneously. When that happens, only one VM operation will succeed,
 273 // and the rest will not be executed. For that reason, this method loops
 274 // during failed allocation attempts. If the java heap becomes exhausted,
 275 // we rely on the size_policy object to force a bail out.
 276 HeapWord* ParallelScavengeHeap::mem_allocate(size_t size) {
 277   assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint");
 278   assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread");
 279   assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
 280 
 281   bool is_tlab = false;
 282   return mem_allocate_work(size, is_tlab);
 283 }
 284 
 285 HeapWord* ParallelScavengeHeap::mem_allocate_cas_noexpand(size_t size, bool is_tlab) {
 286   // Try young-gen first.
 287   HeapWord* result = young_gen()->allocate(size);
 288   if (result != nullptr) {
 289     return result;
 290   }
 291 
 292   // Try allocating from the old gen for non-TLAB and large allocations.
 293   if (!is_tlab) {
 294     if (!should_alloc_in_eden(size)) {
 295       result = old_gen()->cas_allocate_noexpand(size);
 296       if (result != nullptr) {
 297         return result;
 298       }
 299     }
 300   }
 301 
 302   // In extreme cases, try allocating in from space also.
 303   if (_is_heap_almost_full) {
 304     result = young_gen()->from_space()->cas_allocate(size);
 305     if (result != nullptr) {
 306       return result;
 307     }
 308     if (!is_tlab) {
 309       result = old_gen()->cas_allocate_noexpand(size);
 310       if (result != nullptr) {
 311         return result;
 312       }
 313     }
 314   }
 315 
 316   return nullptr;
 317 }
 318 
 319 HeapWord* ParallelScavengeHeap::mem_allocate_work(size_t size, bool is_tlab) {
 320   for (uint loop_count = 0; /* empty */; ++loop_count) {
 321     HeapWord* result = mem_allocate_cas_noexpand(size, is_tlab);
 322     if (result != nullptr) {
 323       return result;
 324     }
 325 
 326     // Read total_collections() under the lock so that multiple
 327     // allocation-failures result in one GC.
 328     uint gc_count;
 329     {
 330       MutexLocker ml(Heap_lock);
 331 
 332       // Re-try after acquiring the lock, because a GC might have occurred
 333       // while waiting for this lock.
 334       result = mem_allocate_cas_noexpand(size, is_tlab);
 335       if (result != nullptr) {
 336         return result;
 337       }
 338 
 339       if (!is_init_completed()) {
 340         // Can't do GC; try heap expansion to satisfy the request.
 341         result = expand_heap_and_allocate(size, is_tlab);
 342         if (result != nullptr) {
 343           return result;
 344         }
 345       }
 346 
 347       gc_count = total_collections();
 348     }
 349 
 350     {
 351       VM_ParallelCollectForAllocation op(size, is_tlab, gc_count);
 352       VMThread::execute(&op);
 353 
 354       if (op.gc_succeeded()) {
 355         assert(is_in_or_null(op.result()), "result not in heap");
 356         return op.result();
 357       }
 358     }
 359 
 360     // Was the gc-overhead reached inside the safepoint? If so, this mutator
 361     // should return null as well for global consistency.
 362     if (_gc_overhead_counter >= GCOverheadLimitThreshold) {
 363       return nullptr;
 364     }
 365 
 366     if ((QueuedAllocationWarningCount > 0) &&
 367         (loop_count % QueuedAllocationWarningCount == 0)) {
 368       log_warning(gc)("ParallelScavengeHeap::mem_allocate retries %d times, size=%zu", loop_count, size);
 369     }
 370   }
 371 }
 372 
 373 void ParallelScavengeHeap::do_full_collection(bool clear_all_soft_refs) {
 374   // No need for max-compaction in this context.
 375   const bool should_do_max_compaction = false;
 376   if (UseCompactObjectHeaders) {
 377     PSParallelCompactNew::invoke(clear_all_soft_refs, should_do_max_compaction);
 378   } else {
 379     PSParallelCompact::invoke(clear_all_soft_refs, should_do_max_compaction);
 380   }
 381 }
 382 
 383 bool ParallelScavengeHeap::should_attempt_young_gc() const {
 384   const bool ShouldRunYoungGC = true;
 385   const bool ShouldRunFullGC = false;
 386 
 387   if (!_young_gen->to_space()->is_empty()) {
 388     log_debug(gc, ergo)("To-space is not empty; run full-gc instead.");
 389     return ShouldRunFullGC;
 390   }
 391 
 392   // Check if the predicted promoted bytes will overflow free space in old-gen.
 393   PSAdaptiveSizePolicy* policy = _size_policy;
 394 
 395   size_t avg_promoted = (size_t) policy->padded_average_promoted_in_bytes();
 396   size_t promotion_estimate = MIN2(avg_promoted, _young_gen->used_in_bytes());
 397   // Total free size after possible old gen expansion
 398   size_t free_in_old_gen_with_expansion = _old_gen->max_gen_size() - _old_gen->used_in_bytes();
 399 
 400   log_trace(gc, ergo)("average_promoted %zu; padded_average_promoted %zu",
 401               (size_t) policy->average_promoted_in_bytes(),
 402               (size_t) policy->padded_average_promoted_in_bytes());
 403 
 404   if (promotion_estimate >= free_in_old_gen_with_expansion) {
 405     log_debug(gc, ergo)("Run full-gc; predicted promotion size >= max free space in old-gen: %zu >= %zu",
 406       promotion_estimate, free_in_old_gen_with_expansion);
 407     return ShouldRunFullGC;
 408   }
 409 
 410   if (UseAdaptiveSizePolicy) {
 411     // Also checking OS has enough free memory to commit and expand old-gen.
 412     // Otherwise, the recorded gc-pause-time might be inflated to include time
 413     // of OS preparing free memory, resulting in inaccurate young-gen resizing.
 414     assert(_old_gen->committed().byte_size() >= _old_gen->used_in_bytes(), "inv");
 415     // Use uint64_t instead of size_t for 32bit compatibility.
 416     uint64_t free_mem_in_os;
 417     if (os::free_memory(free_mem_in_os)) {
 418       size_t actual_free = (size_t)MIN2(_old_gen->committed().byte_size() - _old_gen->used_in_bytes() + free_mem_in_os,
 419                                         (uint64_t)SIZE_MAX);
 420       if (promotion_estimate > actual_free) {
 421         log_debug(gc, ergo)("Run full-gc; predicted promotion size > free space in old-gen and OS: %zu > %zu",
 422           promotion_estimate, actual_free);
 423         return ShouldRunFullGC;
 424       }
 425     }
 426   }
 427 
 428   // No particular reasons to run full-gc, so young-gc.
 429   return ShouldRunYoungGC;
 430 }
 431 
 432 static bool check_gc_heap_free_limit(size_t free_bytes, size_t capacity_bytes) {
 433   return (free_bytes * 100 / capacity_bytes) < GCHeapFreeLimit;
 434 }
 435 
 436 bool ParallelScavengeHeap::check_gc_overhead_limit() {
 437   assert(SafepointSynchronize::is_at_safepoint(), "precondition");
 438 
 439   if (UseGCOverheadLimit) {
 440     // The goal here is to return null prematurely so that apps can exit
 441     // gracefully when GC takes the most time.
 442     bool little_mutator_time = _size_policy->mutator_time_percent() * 100 < (100 - GCTimeLimit);
 443     bool little_free_space = check_gc_heap_free_limit(_young_gen->free_in_bytes(), _young_gen->capacity_in_bytes())
 444                           && check_gc_heap_free_limit(  _old_gen->free_in_bytes(),   _old_gen->capacity_in_bytes());
 445 
 446     log_debug(gc)("GC Overhead Limit: GC Time %f Free Space Young %f Old %f Counter %zu",
 447                   (100 - _size_policy->mutator_time_percent()),
 448                   percent_of(_young_gen->free_in_bytes(), _young_gen->capacity_in_bytes()),
 449                   percent_of(_old_gen->free_in_bytes(), _young_gen->capacity_in_bytes()),
 450                   _gc_overhead_counter);
 451 
 452     if (little_mutator_time && little_free_space) {
 453       _gc_overhead_counter++;
 454       if (_gc_overhead_counter >= GCOverheadLimitThreshold) {
 455         return true;
 456       }
 457     } else {
 458       _gc_overhead_counter = 0;
 459     }
 460   }
 461   return false;
 462 }
 463 
 464 HeapWord* ParallelScavengeHeap::expand_heap_and_allocate(size_t size, bool is_tlab) {
 465 #ifdef ASSERT
 466   assert(Heap_lock->is_locked(), "precondition");
 467   if (is_init_completed()) {
 468     assert(SafepointSynchronize::is_at_safepoint(), "precondition");
 469     assert(Thread::current()->is_VM_thread(), "precondition");
 470   } else {
 471     assert(Thread::current()->is_Java_thread(), "precondition");
 472     assert(Heap_lock->owned_by_self(), "precondition");
 473   }
 474 #endif
 475 
 476   HeapWord* result = young_gen()->expand_and_allocate(size);
 477 
 478   if (result == nullptr && !is_tlab) {
 479     result = old_gen()->expand_and_allocate(size);
 480   }
 481 
 482   return result;   // Could be null if we are out of space.
 483 }
 484 
 485 HeapWord* ParallelScavengeHeap::satisfy_failed_allocation(size_t size, bool is_tlab) {
 486   assert(size != 0, "precondition");
 487 
 488   HeapWord* result = nullptr;
 489 
 490   if (!_is_heap_almost_full) {
 491     // If young-gen can handle this allocation, attempt young-gc firstly, as young-gc is usually cheaper.
 492     bool should_run_young_gc = is_tlab || should_alloc_in_eden(size);
 493 
 494     collect_at_safepoint(!should_run_young_gc);
 495 
 496     // If gc-overhead is reached, we will skip allocation.
 497     if (!check_gc_overhead_limit()) {
 498       result = expand_heap_and_allocate(size, is_tlab);
 499       if (result != nullptr) {
 500         return result;
 501       }
 502     }
 503   }
 504 
 505   // Last resort GC; clear soft refs and do max-compaction before throwing OOM.
 506   {
 507     const bool clear_all_soft_refs = true;
 508     const bool should_do_max_compaction = true;
 509     if (UseCompactObjectHeaders) {
 510       PSParallelCompactNew::invoke(clear_all_soft_refs, should_do_max_compaction);
 511     } else {
 512       PSParallelCompact::invoke(clear_all_soft_refs, should_do_max_compaction);
 513     }
 514   }
 515 
 516   if (check_gc_overhead_limit()) {
 517     log_info(gc)("GC Overhead Limit exceeded too often (%zu).", GCOverheadLimitThreshold);
 518     return nullptr;
 519   }
 520 
 521   result = expand_heap_and_allocate(size, is_tlab);
 522   return result;
 523 }
 524 
 525 void ParallelScavengeHeap::ensure_parsability(bool retire_tlabs) {
 526   CollectedHeap::ensure_parsability(retire_tlabs);
 527   young_gen()->eden_space()->ensure_parsability();
 528 }
 529 
 530 size_t ParallelScavengeHeap::tlab_capacity() const {
 531   return young_gen()->eden_space()->tlab_capacity();
 532 }
 533 
 534 size_t ParallelScavengeHeap::tlab_used() const {
 535   return young_gen()->eden_space()->tlab_used();
 536 }
 537 
 538 size_t ParallelScavengeHeap::unsafe_max_tlab_alloc() const {
 539   return young_gen()->eden_space()->unsafe_max_tlab_alloc();
 540 }
 541 
 542 HeapWord* ParallelScavengeHeap::allocate_new_tlab(size_t min_size, size_t requested_size, size_t* actual_size) {
 543   HeapWord* result = mem_allocate_work(requested_size /* size */,
 544                                        true /* is_tlab */);
 545   if (result != nullptr) {
 546     *actual_size = requested_size;
 547   }
 548 
 549   return result;
 550 }
 551 
 552 void ParallelScavengeHeap::resize_all_tlabs() {
 553   CollectedHeap::resize_all_tlabs();
 554 }
 555 
 556 void ParallelScavengeHeap::prune_scavengable_nmethods() {
 557   ScavengableNMethods::prune_nmethods_not_into_young();
 558 }
 559 
 560 void ParallelScavengeHeap::prune_unlinked_nmethods() {
 561   ScavengableNMethods::prune_unlinked_nmethods();
 562 }
 563 
 564 void ParallelScavengeHeap::collect(GCCause::Cause cause) {
 565   assert(!Heap_lock->owned_by_self(),
 566     "this thread should not own the Heap_lock");
 567 
 568   uint gc_count      = 0;
 569   uint full_gc_count = 0;
 570   {
 571     MutexLocker ml(Heap_lock);
 572     // This value is guarded by the Heap_lock
 573     gc_count      = total_collections();
 574     full_gc_count = total_full_collections();
 575   }
 576 
 577   VM_ParallelGCCollect op(gc_count, full_gc_count, cause);
 578   VMThread::execute(&op);
 579 }
 580 
 581 void ParallelScavengeHeap::collect_at_safepoint(bool is_full) {
 582   assert(!GCLocker::is_active(), "precondition");
 583   bool clear_soft_refs = GCCause::should_clear_all_soft_refs(_gc_cause);
 584 
 585   if (!is_full && should_attempt_young_gc()) {
 586     bool young_gc_success = PSScavenge::invoke(clear_soft_refs);
 587     if (young_gc_success) {
 588       return;
 589     }
 590     log_debug(gc, heap)("Upgrade to Full-GC since Young-gc failed.");
 591   }
 592 
 593   const bool should_do_max_compaction = false;
 594   if (UseCompactObjectHeaders) {
 595     PSParallelCompactNew::invoke(clear_soft_refs, should_do_max_compaction);
 596   } else {
 597     PSParallelCompact::invoke(clear_soft_refs, should_do_max_compaction);
 598   }
 599 }
 600 
 601 void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) {
 602   young_gen()->object_iterate(cl);
 603   old_gen()->object_iterate(cl);
 604 }
 605 
 606 // The HeapBlockClaimer is used during parallel iteration over the heap,
 607 // allowing workers to claim heap areas ("blocks"), gaining exclusive rights to these.
 608 // The eden and survivor spaces are treated as single blocks as it is hard to divide
 609 // these spaces.
 610 // The old space is divided into fixed-size blocks.
 611 class HeapBlockClaimer : public StackObj {
 612   size_t _claimed_index;
 613 
 614 public:
 615   static const size_t InvalidIndex = SIZE_MAX;
 616   static const size_t EdenIndex = 0;
 617   static const size_t SurvivorIndex = 1;
 618   static const size_t NumNonOldGenClaims = 2;
 619 
 620   HeapBlockClaimer() : _claimed_index(EdenIndex) { }
 621   // Claim the block and get the block index.
 622   size_t claim_and_get_block() {
 623     size_t block_index;
 624     block_index = AtomicAccess::fetch_then_add(&_claimed_index, 1u);
 625 
 626     PSOldGen* old_gen = ParallelScavengeHeap::heap()->old_gen();
 627     size_t num_claims = old_gen->num_iterable_blocks() + NumNonOldGenClaims;
 628 
 629     return block_index < num_claims ? block_index : InvalidIndex;
 630   }
 631 };
 632 
 633 void ParallelScavengeHeap::object_iterate_parallel(ObjectClosure* cl,
 634                                                    HeapBlockClaimer* claimer) {
 635   size_t block_index = claimer->claim_and_get_block();
 636   // Iterate until all blocks are claimed
 637   if (block_index == HeapBlockClaimer::EdenIndex) {
 638     young_gen()->eden_space()->object_iterate(cl);
 639     block_index = claimer->claim_and_get_block();
 640   }
 641   if (block_index == HeapBlockClaimer::SurvivorIndex) {
 642     young_gen()->from_space()->object_iterate(cl);
 643     young_gen()->to_space()->object_iterate(cl);
 644     block_index = claimer->claim_and_get_block();
 645   }
 646   while (block_index != HeapBlockClaimer::InvalidIndex) {
 647     old_gen()->object_iterate_block(cl, block_index - HeapBlockClaimer::NumNonOldGenClaims);
 648     block_index = claimer->claim_and_get_block();
 649   }
 650 }
 651 
 652 class PSScavengeParallelObjectIterator : public ParallelObjectIteratorImpl {
 653 private:
 654   ParallelScavengeHeap*  _heap;
 655   HeapBlockClaimer      _claimer;
 656 
 657 public:
 658   PSScavengeParallelObjectIterator() :
 659       _heap(ParallelScavengeHeap::heap()),
 660       _claimer() {}
 661 
 662   virtual void object_iterate(ObjectClosure* cl, uint worker_id) {
 663     _heap->object_iterate_parallel(cl, &_claimer);
 664   }
 665 };
 666 
 667 ParallelObjectIteratorImpl* ParallelScavengeHeap::parallel_object_iterator(uint thread_num) {
 668   return new PSScavengeParallelObjectIterator();
 669 }
 670 
 671 HeapWord* ParallelScavengeHeap::block_start(const void* addr) const {
 672   if (young_gen()->is_in_reserved(addr)) {
 673     assert(young_gen()->is_in(addr),
 674            "addr should be in allocated part of young gen");
 675     // called from os::print_location by find or VMError
 676     if (DebuggingContext::is_enabled() || VMError::is_error_reported()) {
 677       return nullptr;
 678     }
 679     Unimplemented();
 680   } else if (old_gen()->is_in_reserved(addr)) {
 681     assert(old_gen()->is_in(addr),
 682            "addr should be in allocated part of old gen");
 683     return old_gen()->start_array()->object_start((HeapWord*)addr);
 684   }
 685   return nullptr;
 686 }
 687 
 688 bool ParallelScavengeHeap::block_is_obj(const HeapWord* addr) const {
 689   return block_start(addr) == addr;
 690 }
 691 
 692 void ParallelScavengeHeap::prepare_for_verify() {
 693   ensure_parsability(false);  // no need to retire TLABs for verification
 694 }
 695 
 696 PSHeapSummary ParallelScavengeHeap::create_ps_heap_summary() {
 697   PSOldGen* old = old_gen();
 698   HeapWord* old_committed_end = (HeapWord*)old->virtual_space()->committed_high_addr();
 699   HeapWord* old_reserved_start = old->reserved().start();
 700   HeapWord* old_reserved_end = old->reserved().end();
 701   VirtualSpaceSummary old_summary(old_reserved_start, old_committed_end, old_reserved_end);
 702   SpaceSummary old_space(old_reserved_start, old_committed_end, old->used_in_bytes());
 703 
 704   PSYoungGen* young = young_gen();
 705   VirtualSpaceSummary young_summary(young->reserved().start(),
 706     (HeapWord*)young->virtual_space()->committed_high_addr(), young->reserved().end());
 707 
 708   MutableSpace* eden = young_gen()->eden_space();
 709   SpaceSummary eden_space(eden->bottom(), eden->end(), eden->used_in_bytes());
 710 
 711   MutableSpace* from = young_gen()->from_space();
 712   SpaceSummary from_space(from->bottom(), from->end(), from->used_in_bytes());
 713 
 714   MutableSpace* to = young_gen()->to_space();
 715   SpaceSummary to_space(to->bottom(), to->end(), to->used_in_bytes());
 716 
 717   VirtualSpaceSummary heap_summary = create_heap_space_summary();
 718   return PSHeapSummary(heap_summary, used(), old_summary, old_space, young_summary, eden_space, from_space, to_space);
 719 }
 720 
 721 bool ParallelScavengeHeap::print_location(outputStream* st, void* addr) const {
 722   return BlockLocationPrinter<ParallelScavengeHeap>::print_location(st, addr);
 723 }
 724 
 725 void ParallelScavengeHeap::print_heap_on(outputStream* st) const {
 726   if (young_gen() != nullptr) {
 727     young_gen()->print_on(st);
 728   }
 729   if (old_gen() != nullptr) {
 730     old_gen()->print_on(st);
 731   }
 732 }
 733 
 734 void ParallelScavengeHeap::print_gc_on(outputStream* st) const {
 735   BarrierSet* bs = BarrierSet::barrier_set();
 736   if (bs != nullptr) {
 737     bs->print_on(st);
 738   }
 739   st->cr();
 740 
 741   if (UseCompactObjectHeaders) {
 742     PSParallelCompactNew::print_on(st);
 743   } else {
 744     PSParallelCompact::print_on(st);
 745   }
 746 }
 747 
 748 void ParallelScavengeHeap::gc_threads_do(ThreadClosure* tc) const {
 749   ParallelScavengeHeap::heap()->workers().threads_do(tc);
 750 }
 751 
 752 void ParallelScavengeHeap::print_tracing_info() const {
 753   log_debug(gc, heap, exit)("Accumulated young generation GC time %3.7f secs", PSScavenge::accumulated_time()->seconds());
 754   if (UseCompactObjectHeaders) {
 755     log_debug(gc, heap, exit)("Accumulated old generation GC time %3.7f secs", PSParallelCompactNew::accumulated_time()->seconds());
 756   } else {
 757     log_debug(gc, heap, exit)("Accumulated old generation GC time %3.7f secs", PSParallelCompact::accumulated_time()->seconds());
 758   }
 759 }
 760 
 761 PreGenGCValues ParallelScavengeHeap::get_pre_gc_values() const {
 762   const PSYoungGen* const young = young_gen();
 763   const MutableSpace* const eden = young->eden_space();
 764   const MutableSpace* const from = young->from_space();
 765   const PSOldGen* const old = old_gen();
 766 
 767   return PreGenGCValues(young->used_in_bytes(),
 768                         young->capacity_in_bytes(),
 769                         eden->used_in_bytes(),
 770                         eden->capacity_in_bytes(),
 771                         from->used_in_bytes(),
 772                         from->capacity_in_bytes(),
 773                         old->used_in_bytes(),
 774                         old->capacity_in_bytes());
 775 }
 776 
 777 void ParallelScavengeHeap::print_heap_change(const PreGenGCValues& pre_gc_values) const {
 778   const PSYoungGen* const young = young_gen();
 779   const MutableSpace* const eden = young->eden_space();
 780   const MutableSpace* const from = young->from_space();
 781   const PSOldGen* const old = old_gen();
 782 
 783   log_info(gc, heap)(HEAP_CHANGE_FORMAT" "
 784                      HEAP_CHANGE_FORMAT" "
 785                      HEAP_CHANGE_FORMAT,
 786                      HEAP_CHANGE_FORMAT_ARGS(young->name(),
 787                                              pre_gc_values.young_gen_used(),
 788                                              pre_gc_values.young_gen_capacity(),
 789                                              young->used_in_bytes(),
 790                                              young->capacity_in_bytes()),
 791                      HEAP_CHANGE_FORMAT_ARGS("Eden",
 792                                              pre_gc_values.eden_used(),
 793                                              pre_gc_values.eden_capacity(),
 794                                              eden->used_in_bytes(),
 795                                              eden->capacity_in_bytes()),
 796                      HEAP_CHANGE_FORMAT_ARGS("From",
 797                                              pre_gc_values.from_used(),
 798                                              pre_gc_values.from_capacity(),
 799                                              from->used_in_bytes(),
 800                                              from->capacity_in_bytes()));
 801   log_info(gc, heap)(HEAP_CHANGE_FORMAT,
 802                      HEAP_CHANGE_FORMAT_ARGS(old->name(),
 803                                              pre_gc_values.old_gen_used(),
 804                                              pre_gc_values.old_gen_capacity(),
 805                                              old->used_in_bytes(),
 806                                              old->capacity_in_bytes()));
 807   MetaspaceUtils::print_metaspace_change(pre_gc_values.metaspace_sizes());
 808 }
 809 
 810 void ParallelScavengeHeap::verify(VerifyOption option /* ignored */) {
 811   log_debug(gc, verify)("Tenured");
 812   old_gen()->verify();
 813 
 814   log_debug(gc, verify)("Eden");
 815   young_gen()->verify();
 816 
 817   log_debug(gc, verify)("CardTable");
 818   card_table()->verify_all_young_refs_imprecise();
 819 }
 820 
 821 void ParallelScavengeHeap::trace_actual_reserved_page_size(const size_t reserved_heap_size, const ReservedSpace rs) {
 822   // Check if Info level is enabled, since os::trace_page_sizes() logs on Info level.
 823   if(log_is_enabled(Info, pagesize)) {
 824     const size_t page_size = rs.page_size();
 825     os::trace_page_sizes("Heap",
 826                          MinHeapSize,
 827                          reserved_heap_size,
 828                          rs.base(),
 829                          rs.size(),
 830                          page_size);
 831   }
 832 }
 833 
 834 void ParallelScavengeHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) {
 835   const PSHeapSummary& heap_summary = create_ps_heap_summary();
 836   gc_tracer->report_gc_heap_summary(when, heap_summary);
 837 
 838   const MetaspaceSummary& metaspace_summary = create_metaspace_summary();
 839   gc_tracer->report_metaspace_summary(when, metaspace_summary);
 840 }
 841 
 842 CardTableBarrierSet* ParallelScavengeHeap::barrier_set() {
 843   return barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set());
 844 }
 845 
 846 PSCardTable* ParallelScavengeHeap::card_table() {
 847   return static_cast<PSCardTable*>(barrier_set()->card_table());
 848 }
 849 
 850 static size_t calculate_free_from_free_ratio_flag(size_t live, uintx free_percent) {
 851   assert(free_percent != 100, "precondition");
 852   // We want to calculate how much free memory there can be based on the
 853   // live size.
 854   //   percent * (free + live) = free
 855   // =>
 856   //   free = (live * percent) / (1 - percent)
 857 
 858   const double percent = free_percent / 100.0;
 859   return live * percent / (1.0 - percent);
 860 }
 861 
 862 size_t ParallelScavengeHeap::calculate_desired_old_gen_capacity(size_t old_gen_live_size) {
 863   // If min free percent is 100%, the old-gen should always be in its max capacity
 864   if (MinHeapFreeRatio == 100) {
 865     return _old_gen->max_gen_size();
 866   }
 867 
 868   // Using recorded data to calculate the new capacity of old-gen to avoid
 869   // excessive expansion but also keep footprint low
 870 
 871   size_t promoted_estimate = _size_policy->padded_average_promoted_in_bytes();
 872   // Should have at least this free room for the next young-gc promotion.
 873   size_t free_size = promoted_estimate;
 874 
 875   size_t largest_live_size = MAX2((size_t)_size_policy->peak_old_gen_used_estimate(), old_gen_live_size);
 876   free_size += largest_live_size - old_gen_live_size;
 877 
 878   // Respect free percent
 879   if (MinHeapFreeRatio != 0) {
 880     size_t min_free = calculate_free_from_free_ratio_flag(old_gen_live_size, MinHeapFreeRatio);
 881     free_size = MAX2(free_size, min_free);
 882   }
 883 
 884   if (MaxHeapFreeRatio != 100) {
 885     size_t max_free = calculate_free_from_free_ratio_flag(old_gen_live_size, MaxHeapFreeRatio);
 886     free_size = MIN2(max_free, free_size);
 887   }
 888 
 889   return old_gen_live_size + free_size;
 890 }
 891 
 892 void ParallelScavengeHeap::resize_old_gen_after_full_gc() {
 893   size_t current_capacity = _old_gen->capacity_in_bytes();
 894   size_t desired_capacity = calculate_desired_old_gen_capacity(old_gen()->used_in_bytes());
 895 
 896   // If MinHeapFreeRatio is at its default value; shrink cautiously. Otherwise, users expect prompt shrinking.
 897   if (FLAG_IS_DEFAULT(MinHeapFreeRatio)) {
 898     if (desired_capacity < current_capacity) {
 899       // Shrinking
 900       if (total_full_collections() < AdaptiveSizePolicyReadyThreshold) {
 901         // No enough data for shrinking
 902         return;
 903       }
 904     }
 905   }
 906 
 907   _old_gen->resize(desired_capacity);
 908 }
 909 
 910 void ParallelScavengeHeap::resize_after_young_gc(bool is_survivor_overflowing) {
 911   _young_gen->resize_after_young_gc(is_survivor_overflowing);
 912 
 913   // Consider if should shrink old-gen
 914   if (!is_survivor_overflowing) {
 915     // Upper bound for a single step shrink
 916     size_t max_shrink_bytes = SpaceAlignment;
 917     size_t shrink_bytes = _size_policy->compute_old_gen_shrink_bytes(old_gen()->free_in_bytes(), max_shrink_bytes);
 918     if (shrink_bytes != 0) {
 919       if (MinHeapFreeRatio != 0) {
 920         size_t new_capacity = old_gen()->capacity_in_bytes() - shrink_bytes;
 921         size_t new_free_size = old_gen()->free_in_bytes() - shrink_bytes;
 922         if ((double)new_free_size / new_capacity * 100 < MinHeapFreeRatio) {
 923           // Would violate MinHeapFreeRatio
 924           return;
 925         }
 926       }
 927       old_gen()->shrink(shrink_bytes);
 928     }
 929   }
 930 }
 931 
 932 void ParallelScavengeHeap::resize_after_full_gc() {
 933   resize_old_gen_after_full_gc();
 934   // We don't resize young-gen after full-gc because:
 935   // 1. eden-size directly affects young-gc frequency (GCTimeRatio), and we
 936   // don't have enough info to determine its desired size.
 937   // 2. eden can contain live objs after a full-gc, which is unsafe for
 938   // resizing. We will perform expansion on allocation if needed, in
 939   // satisfy_failed_allocation().
 940 }
 941 
 942 HeapWord* ParallelScavengeHeap::allocate_loaded_archive_space(size_t size) {
 943   return _old_gen->allocate(size);
 944 }
 945 
 946 void ParallelScavengeHeap::complete_loaded_archive_space(MemRegion archive_space) {
 947   assert(_old_gen->object_space()->used_region().contains(archive_space),
 948          "Archive space not contained in old gen");
 949   _old_gen->complete_loaded_archive_space(archive_space);
 950 }
 951 
 952 void ParallelScavengeHeap::register_nmethod(nmethod* nm) {
 953   ScavengableNMethods::register_nmethod(nm);
 954   BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
 955   bs_nm->disarm(nm);
 956 }
 957 
 958 void ParallelScavengeHeap::unregister_nmethod(nmethod* nm) {
 959   ScavengableNMethods::unregister_nmethod(nm);
 960 }
 961 
 962 void ParallelScavengeHeap::verify_nmethod(nmethod* nm) {
 963   ScavengableNMethods::verify_nmethod(nm);
 964 }
 965 
 966 GrowableArray<GCMemoryManager*> ParallelScavengeHeap::memory_managers() {
 967   GrowableArray<GCMemoryManager*> memory_managers(2);
 968   memory_managers.append(_young_manager);
 969   memory_managers.append(_old_manager);
 970   return memory_managers;
 971 }
 972 
 973 GrowableArray<MemoryPool*> ParallelScavengeHeap::memory_pools() {
 974   GrowableArray<MemoryPool*> memory_pools(3);
 975   memory_pools.append(_eden_pool);
 976   memory_pools.append(_survivor_pool);
 977   memory_pools.append(_old_pool);
 978   return memory_pools;
 979 }
 980 
 981 void ParallelScavengeHeap::pin_object(JavaThread* thread, oop obj) {
 982   GCLocker::enter(thread);
 983 }
 984 
 985 void ParallelScavengeHeap::unpin_object(JavaThread* thread, oop obj) {
 986   GCLocker::exit(thread);
 987 }
 988 
 989 void ParallelScavengeHeap::update_parallel_worker_threads_cpu_time() {
 990   assert(Thread::current()->is_VM_thread(),
 991          "Must be called from VM thread to avoid races");
 992   if (!UsePerfData || !os::is_thread_cpu_time_supported()) {
 993     return;
 994   }
 995 
 996   // Ensure ThreadTotalCPUTimeClosure destructor is called before publishing gc
 997   // time.
 998   {
 999     ThreadTotalCPUTimeClosure tttc(CPUTimeGroups::CPUTimeType::gc_parallel_workers);
1000     // Currently parallel worker threads in GCTaskManager never terminate, so it
1001     // is safe for VMThread to read their CPU times. If upstream changes this
1002     // behavior, we should rethink if it is still safe.
1003     gc_threads_do(&tttc);
1004   }
1005 
1006   CPUTimeCounters::publish_gc_total_cpu_time();
1007 }