1 /*
  2  * Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "gc/parallel/objectStartArray.inline.hpp"
 26 #include "gc/parallel/parallelArguments.hpp"
 27 #include "gc/parallel/parallelInitLogger.hpp"
 28 #include "gc/parallel/parallelScavengeHeap.inline.hpp"
 29 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
 30 #include "gc/parallel/psMemoryPool.hpp"
 31 #include "gc/parallel/psParallelCompact.inline.hpp"
 32 #include "gc/parallel/psPromotionManager.hpp"
 33 #include "gc/parallel/psScavenge.hpp"
 34 #include "gc/parallel/psVMOperations.hpp"
 35 #include "gc/shared/barrierSetNMethod.hpp"
 36 #include "gc/shared/fullGCForwarding.inline.hpp"
 37 #include "gc/shared/gcHeapSummary.hpp"
 38 #include "gc/shared/gcLocker.inline.hpp"
 39 #include "gc/shared/gcWhen.hpp"
 40 #include "gc/shared/genArguments.hpp"
 41 #include "gc/shared/locationPrinter.inline.hpp"
 42 #include "gc/shared/scavengableNMethods.hpp"
 43 #include "gc/shared/suspendibleThreadSet.hpp"
 44 #include "logging/log.hpp"
 45 #include "memory/iterator.hpp"
 46 #include "memory/metaspaceCounters.hpp"
 47 #include "memory/metaspaceUtils.hpp"
 48 #include "memory/reservedSpace.hpp"
 49 #include "memory/universe.hpp"
 50 #include "oops/oop.inline.hpp"
 51 #include "runtime/cpuTimeCounters.hpp"
 52 #include "runtime/globals_extension.hpp"
 53 #include "runtime/handles.inline.hpp"
 54 #include "runtime/init.hpp"
 55 #include "runtime/java.hpp"
 56 #include "runtime/vmThread.hpp"
 57 #include "services/memoryManager.hpp"
 58 #include "utilities/macros.hpp"
 59 #include "utilities/vmError.hpp"
 60 
 61 PSYoungGen*  ParallelScavengeHeap::_young_gen = nullptr;
 62 PSOldGen*    ParallelScavengeHeap::_old_gen = nullptr;
 63 PSAdaptiveSizePolicy* ParallelScavengeHeap::_size_policy = nullptr;
 64 GCPolicyCounters* ParallelScavengeHeap::_gc_policy_counters = nullptr;
 65 size_t ParallelScavengeHeap::_desired_page_size = 0;
 66 
 67 jint ParallelScavengeHeap::initialize() {
 68   const size_t reserved_heap_size = ParallelArguments::heap_reserved_size_bytes();
 69 
 70   assert(_desired_page_size != 0, "Should be initialized");
 71   ReservedHeapSpace heap_rs = Universe::reserve_heap(reserved_heap_size, HeapAlignment, _desired_page_size);
 72   // Adjust SpaceAlignment based on actually used large page size.
 73   if (UseLargePages) {
 74     SpaceAlignment = MAX2(heap_rs.page_size(), default_space_alignment());
 75   }
 76   assert(is_aligned(SpaceAlignment, heap_rs.page_size()), "inv");
 77 
 78   trace_actual_reserved_page_size(reserved_heap_size, heap_rs);
 79 
 80   initialize_reserved_region(heap_rs);
 81   // Layout the reserved space for the generations.
 82   ReservedSpace old_rs   = heap_rs.first_part(MaxOldSize, SpaceAlignment);
 83   ReservedSpace young_rs = heap_rs.last_part(MaxOldSize, SpaceAlignment);
 84   assert(young_rs.size() == MaxNewSize, "Didn't reserve all of the heap");
 85 
 86   PSCardTable* card_table = new PSCardTable(_reserved);
 87   card_table->initialize(old_rs.base(), young_rs.base());
 88 
 89   CardTableBarrierSet* const barrier_set = new CardTableBarrierSet(card_table);
 90   BarrierSet::set_barrier_set(barrier_set);
 91 
 92   // Set up WorkerThreads
 93   _workers.initialize_workers();
 94 
 95   // Create and initialize the generations.
 96   _young_gen = new PSYoungGen(
 97       young_rs,
 98       NewSize,
 99       MinNewSize,
100       MaxNewSize);
101   _old_gen = new PSOldGen(
102       old_rs,
103       OldSize,
104       MinOldSize,
105       MaxOldSize);
106 
107   assert(young_gen()->max_gen_size() == young_rs.size(),"Consistency check");
108   assert(old_gen()->max_gen_size() == old_rs.size(), "Consistency check");
109 
110   double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0;
111 
112   _size_policy = new PSAdaptiveSizePolicy(SpaceAlignment,
113                                           max_gc_pause_sec);
114 
115   assert((old_gen()->virtual_space()->high_boundary() ==
116           young_gen()->virtual_space()->low_boundary()),
117          "Boundaries must meet");
118   // initialize the policy counters - 2 collectors, 2 generations
119   _gc_policy_counters = new GCPolicyCounters("ParScav:MSC", 2, 2);
120 
121   if (!PSParallelCompact::initialize_aux_data()) {
122     return JNI_ENOMEM;
123   }
124 
125   // Create CPU time counter
126   CPUTimeCounters::create_counter(CPUTimeGroups::CPUTimeType::gc_parallel_workers);
127 
128   ParallelInitLogger::print();
129 
130   FullGCForwarding::initialize(_reserved);
131 
132   return JNI_OK;
133 }
134 
135 void ParallelScavengeHeap::initialize_serviceability() {
136 
137   _eden_pool = new EdenMutableSpacePool(_young_gen,
138                                         _young_gen->eden_space(),
139                                         "PS Eden Space",
140                                         false /* support_usage_threshold */);
141 
142   _survivor_pool = new SurvivorMutableSpacePool(_young_gen,
143                                                 "PS Survivor Space",
144                                                 false /* support_usage_threshold */);
145 
146   _old_pool = new PSGenerationPool(_old_gen,
147                                    "PS Old Gen",
148                                    true /* support_usage_threshold */);
149 
150   _young_manager = new GCMemoryManager("PS Scavenge");
151   _old_manager = new GCMemoryManager("PS MarkSweep");
152 
153   _old_manager->add_pool(_eden_pool);
154   _old_manager->add_pool(_survivor_pool);
155   _old_manager->add_pool(_old_pool);
156 
157   _young_manager->add_pool(_eden_pool);
158   _young_manager->add_pool(_survivor_pool);
159 
160 }
161 
162 class PSIsScavengable : public BoolObjectClosure {
163   bool do_object_b(oop obj) {
164     return ParallelScavengeHeap::heap()->is_in_young(obj);
165   }
166 };
167 
168 static PSIsScavengable _is_scavengable;
169 
170 void ParallelScavengeHeap::post_initialize() {
171   CollectedHeap::post_initialize();
172   // Need to init the tenuring threshold
173   PSScavenge::initialize();
174   PSParallelCompact::post_initialize();
175   PSPromotionManager::initialize();
176 
177   ScavengableNMethods::initialize(&_is_scavengable);
178   GCLocker::initialize();
179 }
180 
181 void ParallelScavengeHeap::gc_epilogue(bool full) {
182   if (_is_heap_almost_full) {
183     // Reset emergency state if eden is empty after a young/full gc
184     if (_young_gen->eden_space()->is_empty()) {
185       log_debug(gc)("Leaving memory constrained state; back to normal");
186       _is_heap_almost_full = false;
187     }
188   } else {
189     if (full && !_young_gen->eden_space()->is_empty()) {
190       log_debug(gc)("Non-empty young-gen after full-gc; in memory constrained state");
191       _is_heap_almost_full = true;
192     }
193   }
194 }
195 
196 void ParallelScavengeHeap::update_counters() {
197   young_gen()->update_counters();
198   old_gen()->update_counters();
199   MetaspaceCounters::update_performance_counters();
200   update_parallel_worker_threads_cpu_time();
201 }
202 
203 size_t ParallelScavengeHeap::capacity() const {
204   size_t value = young_gen()->capacity_in_bytes() + old_gen()->capacity_in_bytes();
205   return value;
206 }
207 
208 size_t ParallelScavengeHeap::used() const {
209   size_t value = young_gen()->used_in_bytes() + old_gen()->used_in_bytes();
210   return value;
211 }
212 
213 size_t ParallelScavengeHeap::max_capacity() const {
214   size_t estimated = reserved_region().byte_size();
215   if (UseAdaptiveSizePolicy) {
216     estimated -= _size_policy->max_survivor_size(young_gen()->max_gen_size());
217   } else {
218     estimated -= young_gen()->to_space()->capacity_in_bytes();
219   }
220   return MAX2(estimated, capacity());
221 }
222 
223 bool ParallelScavengeHeap::is_in(const void* p) const {
224   return young_gen()->is_in(p) || old_gen()->is_in(p);
225 }
226 
227 bool ParallelScavengeHeap::is_in_reserved(const void* p) const {
228   return young_gen()->is_in_reserved(p) || old_gen()->is_in_reserved(p);
229 }
230 
231 bool ParallelScavengeHeap::requires_barriers(stackChunkOop p) const {
232   return !is_in_young(p);
233 }
234 
235 // There are two levels of allocation policy here.
236 //
237 // When an allocation request fails, the requesting thread must invoke a VM
238 // operation, transfer control to the VM thread, and await the results of a
239 // garbage collection. That is quite expensive, and we should avoid doing it
240 // multiple times if possible.
241 //
242 // To accomplish this, we have a basic allocation policy, and also a
243 // failed allocation policy.
244 //
245 // The basic allocation policy controls how you allocate memory without
246 // attempting garbage collection. It is okay to grab locks and
247 // expand the heap, if that can be done without coming to a safepoint.
248 // It is likely that the basic allocation policy will not be very
249 // aggressive.
250 //
251 // The failed allocation policy is invoked from the VM thread after
252 // the basic allocation policy is unable to satisfy a mem_allocate
253 // request. This policy needs to cover the entire range of collection,
254 // heap expansion, and out-of-memory conditions. It should make every
255 // attempt to allocate the requested memory.
256 
257 // Basic allocation policy. Should never be called at a safepoint, or
258 // from the VM thread.
259 //
260 // This method must handle cases where many mem_allocate requests fail
261 // simultaneously. When that happens, only one VM operation will succeed,
262 // and the rest will not be executed. For that reason, this method loops
263 // during failed allocation attempts. If the java heap becomes exhausted,
264 // we rely on the size_policy object to force a bail out.
265 HeapWord* ParallelScavengeHeap::mem_allocate(size_t size) {
266   assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint");
267   assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread");
268   assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
269 
270   bool is_tlab = false;
271   return mem_allocate_work(size, is_tlab);
272 }
273 
274 HeapWord* ParallelScavengeHeap::mem_allocate_cas_noexpand(size_t size, bool is_tlab) {
275   // Try young-gen first.
276   HeapWord* result = young_gen()->allocate(size);
277   if (result != nullptr) {
278     return result;
279   }
280 
281   // Try allocating from the old gen for non-TLAB and large allocations.
282   if (!is_tlab) {
283     if (!should_alloc_in_eden(size)) {
284       result = old_gen()->cas_allocate_noexpand(size);
285       if (result != nullptr) {
286         return result;
287       }
288     }
289   }
290 
291   // In extreme cases, try allocating in from space also.
292   if (_is_heap_almost_full) {
293     result = young_gen()->from_space()->cas_allocate(size);
294     if (result != nullptr) {
295       return result;
296     }
297     if (!is_tlab) {
298       result = old_gen()->cas_allocate_noexpand(size);
299       if (result != nullptr) {
300         return result;
301       }
302     }
303   }
304 
305   return nullptr;
306 }
307 
308 HeapWord* ParallelScavengeHeap::mem_allocate_work(size_t size, bool is_tlab) {
309   for (uint loop_count = 0; /* empty */; ++loop_count) {
310     HeapWord* result = mem_allocate_cas_noexpand(size, is_tlab);
311     if (result != nullptr) {
312       return result;
313     }
314 
315     // Read total_collections() under the lock so that multiple
316     // allocation-failures result in one GC.
317     uint gc_count;
318     {
319       MutexLocker ml(Heap_lock);
320 
321       // Re-try after acquiring the lock, because a GC might have occurred
322       // while waiting for this lock.
323       result = mem_allocate_cas_noexpand(size, is_tlab);
324       if (result != nullptr) {
325         return result;
326       }
327 
328       if (!is_init_completed()) {
329         // Can't do GC; try heap expansion to satisfy the request.
330         result = expand_heap_and_allocate(size, is_tlab);
331         if (result != nullptr) {
332           return result;
333         }
334       }
335 
336       gc_count = total_collections();
337     }
338 
339     {
340       VM_ParallelCollectForAllocation op(size, is_tlab, gc_count);
341       VMThread::execute(&op);
342 
343       if (op.gc_succeeded()) {
344         assert(is_in_or_null(op.result()), "result not in heap");
345         return op.result();
346       }
347     }
348 
349     // Was the gc-overhead reached inside the safepoint? If so, this mutator
350     // should return null as well for global consistency.
351     if (_gc_overhead_counter >= GCOverheadLimitThreshold) {
352       return nullptr;
353     }
354 
355     if ((QueuedAllocationWarningCount > 0) &&
356         (loop_count % QueuedAllocationWarningCount == 0)) {
357       log_warning(gc)("ParallelScavengeHeap::mem_allocate retries %d times, size=%zu", loop_count, size);
358     }
359   }
360 }
361 
362 void ParallelScavengeHeap::do_full_collection(bool clear_all_soft_refs) {
363   // No need for max-compaction in this context.
364   const bool should_do_max_compaction = false;
365   PSParallelCompact::invoke(clear_all_soft_refs, should_do_max_compaction);
366 }
367 
368 bool ParallelScavengeHeap::should_attempt_young_gc() const {
369   const bool ShouldRunYoungGC = true;
370   const bool ShouldRunFullGC = false;
371 
372   if (!_young_gen->to_space()->is_empty()) {
373     log_debug(gc, ergo)("To-space is not empty; run full-gc instead.");
374     return ShouldRunFullGC;
375   }
376 
377   // Check if the predicted promoted bytes will overflow free space in old-gen.
378   PSAdaptiveSizePolicy* policy = _size_policy;
379 
380   size_t avg_promoted = (size_t) policy->padded_average_promoted_in_bytes();
381   size_t promotion_estimate = MIN2(avg_promoted, _young_gen->used_in_bytes());
382   // Total free size after possible old gen expansion
383   size_t free_in_old_gen_with_expansion = _old_gen->max_gen_size() - _old_gen->used_in_bytes();
384 
385   log_trace(gc, ergo)("average_promoted %zu; padded_average_promoted %zu",
386               (size_t) policy->average_promoted_in_bytes(),
387               (size_t) policy->padded_average_promoted_in_bytes());
388 
389   if (promotion_estimate >= free_in_old_gen_with_expansion) {
390     log_debug(gc, ergo)("Run full-gc; predicted promotion size >= max free space in old-gen: %zu >= %zu",
391       promotion_estimate, free_in_old_gen_with_expansion);
392     return ShouldRunFullGC;
393   }
394 
395   if (UseAdaptiveSizePolicy) {
396     // Also checking OS has enough free memory to commit and expand old-gen.
397     // Otherwise, the recorded gc-pause-time might be inflated to include time
398     // of OS preparing free memory, resulting in inaccurate young-gen resizing.
399     assert(_old_gen->committed().byte_size() >= _old_gen->used_in_bytes(), "inv");
400     // Use uint64_t instead of size_t for 32bit compatibility.
401     uint64_t free_mem_in_os;
402     if (os::free_memory(free_mem_in_os)) {
403       size_t actual_free = (size_t)MIN2(_old_gen->committed().byte_size() - _old_gen->used_in_bytes() + free_mem_in_os,
404                                         (uint64_t)SIZE_MAX);
405       if (promotion_estimate > actual_free) {
406         log_debug(gc, ergo)("Run full-gc; predicted promotion size > free space in old-gen and OS: %zu > %zu",
407           promotion_estimate, actual_free);
408         return ShouldRunFullGC;
409       }
410     }
411   }
412 
413   // No particular reasons to run full-gc, so young-gc.
414   return ShouldRunYoungGC;
415 }
416 
417 static bool check_gc_heap_free_limit(size_t free_bytes, size_t capacity_bytes) {
418   return (free_bytes * 100 / capacity_bytes) < GCHeapFreeLimit;
419 }
420 
421 bool ParallelScavengeHeap::check_gc_overhead_limit() {
422   assert(SafepointSynchronize::is_at_safepoint(), "precondition");
423 
424   if (UseGCOverheadLimit) {
425     // The goal here is to return null prematurely so that apps can exit
426     // gracefully when GC takes the most time.
427     bool little_mutator_time = _size_policy->mutator_time_percent() * 100 < (100 - GCTimeLimit);
428     bool little_free_space = check_gc_heap_free_limit(_young_gen->free_in_bytes(), _young_gen->capacity_in_bytes())
429                           && check_gc_heap_free_limit(  _old_gen->free_in_bytes(),   _old_gen->capacity_in_bytes());
430 
431     log_debug(gc)("GC Overhead Limit: GC Time %f Free Space Young %f Old %f Counter %zu",
432                   (100 - _size_policy->mutator_time_percent()),
433                   percent_of(_young_gen->free_in_bytes(), _young_gen->capacity_in_bytes()),
434                   percent_of(_old_gen->free_in_bytes(), _young_gen->capacity_in_bytes()),
435                   _gc_overhead_counter);
436 
437     if (little_mutator_time && little_free_space) {
438       _gc_overhead_counter++;
439       if (_gc_overhead_counter >= GCOverheadLimitThreshold) {
440         return true;
441       }
442     } else {
443       _gc_overhead_counter = 0;
444     }
445   }
446   return false;
447 }
448 
449 HeapWord* ParallelScavengeHeap::expand_heap_and_allocate(size_t size, bool is_tlab) {
450 #ifdef ASSERT
451   assert(Heap_lock->is_locked(), "precondition");
452   if (is_init_completed()) {
453     assert(SafepointSynchronize::is_at_safepoint(), "precondition");
454     assert(Thread::current()->is_VM_thread(), "precondition");
455   } else {
456     assert(Thread::current()->is_Java_thread(), "precondition");
457     assert(Heap_lock->owned_by_self(), "precondition");
458   }
459 #endif
460 
461   HeapWord* result = young_gen()->expand_and_allocate(size);
462 
463   if (result == nullptr && !is_tlab) {
464     result = old_gen()->expand_and_allocate(size);
465   }
466 
467   return result;   // Could be null if we are out of space.
468 }
469 
470 HeapWord* ParallelScavengeHeap::satisfy_failed_allocation(size_t size, bool is_tlab) {
471   assert(size != 0, "precondition");
472 
473   HeapWord* result = nullptr;
474 
475   if (!_is_heap_almost_full) {
476     // If young-gen can handle this allocation, attempt young-gc firstly, as young-gc is usually cheaper.
477     bool should_run_young_gc = is_tlab || should_alloc_in_eden(size);
478 
479     collect_at_safepoint(!should_run_young_gc);
480 
481     // If gc-overhead is reached, we will skip allocation.
482     if (!check_gc_overhead_limit()) {
483       result = expand_heap_and_allocate(size, is_tlab);
484       if (result != nullptr) {
485         return result;
486       }
487     }
488   }
489 
490   // Last resort GC; clear soft refs and do max-compaction before throwing OOM.
491   {
492     const bool clear_all_soft_refs = true;
493     const bool should_do_max_compaction = true;
494     PSParallelCompact::invoke(clear_all_soft_refs, should_do_max_compaction);
495   }
496 
497   if (check_gc_overhead_limit()) {
498     log_info(gc)("GC Overhead Limit exceeded too often (%zu).", GCOverheadLimitThreshold);
499     return nullptr;
500   }
501 
502   result = expand_heap_and_allocate(size, is_tlab);
503 
504   return result;
505 }
506 
507 void ParallelScavengeHeap::ensure_parsability(bool retire_tlabs) {
508   CollectedHeap::ensure_parsability(retire_tlabs);
509   young_gen()->eden_space()->ensure_parsability();
510 }
511 
512 size_t ParallelScavengeHeap::tlab_capacity() const {
513   return young_gen()->eden_space()->tlab_capacity();
514 }
515 
516 size_t ParallelScavengeHeap::tlab_used() const {
517   return young_gen()->eden_space()->tlab_used();
518 }
519 
520 size_t ParallelScavengeHeap::unsafe_max_tlab_alloc() const {
521   return young_gen()->eden_space()->unsafe_max_tlab_alloc();
522 }
523 
524 HeapWord* ParallelScavengeHeap::allocate_new_tlab(size_t min_size, size_t requested_size, size_t* actual_size) {
525   HeapWord* result = mem_allocate_work(requested_size /* size */,
526                                        true /* is_tlab */);
527   if (result != nullptr) {
528     *actual_size = requested_size;
529   }
530 
531   return result;
532 }
533 
534 void ParallelScavengeHeap::resize_all_tlabs() {
535   CollectedHeap::resize_all_tlabs();
536 }
537 
538 void ParallelScavengeHeap::prune_scavengable_nmethods() {
539   ScavengableNMethods::prune_nmethods_not_into_young();
540 }
541 
542 void ParallelScavengeHeap::prune_unlinked_nmethods() {
543   ScavengableNMethods::prune_unlinked_nmethods();
544 }
545 
546 void ParallelScavengeHeap::collect(GCCause::Cause cause) {
547   assert(!Heap_lock->owned_by_self(),
548     "this thread should not own the Heap_lock");
549 
550   uint gc_count      = 0;
551   uint full_gc_count = 0;
552   {
553     MutexLocker ml(Heap_lock);
554     // This value is guarded by the Heap_lock
555     gc_count      = total_collections();
556     full_gc_count = total_full_collections();
557   }
558 
559   VM_ParallelGCCollect op(gc_count, full_gc_count, cause);
560   VMThread::execute(&op);
561 }
562 
563 void ParallelScavengeHeap::collect_at_safepoint(bool is_full) {
564   assert(!GCLocker::is_active(), "precondition");
565   bool clear_soft_refs = GCCause::should_clear_all_soft_refs(_gc_cause);
566 
567   if (!is_full && should_attempt_young_gc()) {
568     bool young_gc_success = PSScavenge::invoke(clear_soft_refs);
569     if (young_gc_success) {
570       return;
571     }
572     log_debug(gc, heap)("Upgrade to Full-GC since Young-gc failed.");
573   }
574 
575   const bool should_do_max_compaction = false;
576   PSParallelCompact::invoke(clear_soft_refs, should_do_max_compaction);
577 }
578 
579 void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) {
580   young_gen()->object_iterate(cl);
581   old_gen()->object_iterate(cl);
582 }
583 
584 // The HeapBlockClaimer is used during parallel iteration over the heap,
585 // allowing workers to claim heap areas ("blocks"), gaining exclusive rights to these.
586 // The eden and survivor spaces are treated as single blocks as it is hard to divide
587 // these spaces.
588 // The old space is divided into fixed-size blocks.
589 class HeapBlockClaimer : public StackObj {
590   size_t _claimed_index;
591 
592 public:
593   static const size_t InvalidIndex = SIZE_MAX;
594   static const size_t EdenIndex = 0;
595   static const size_t SurvivorIndex = 1;
596   static const size_t NumNonOldGenClaims = 2;
597 
598   HeapBlockClaimer() : _claimed_index(EdenIndex) { }
599   // Claim the block and get the block index.
600   size_t claim_and_get_block() {
601     size_t block_index;
602     block_index = AtomicAccess::fetch_then_add(&_claimed_index, 1u);
603 
604     PSOldGen* old_gen = ParallelScavengeHeap::heap()->old_gen();
605     size_t num_claims = old_gen->num_iterable_blocks() + NumNonOldGenClaims;
606 
607     return block_index < num_claims ? block_index : InvalidIndex;
608   }
609 };
610 
611 void ParallelScavengeHeap::object_iterate_parallel(ObjectClosure* cl,
612                                                    HeapBlockClaimer* claimer) {
613   size_t block_index = claimer->claim_and_get_block();
614   // Iterate until all blocks are claimed
615   if (block_index == HeapBlockClaimer::EdenIndex) {
616     young_gen()->eden_space()->object_iterate(cl);
617     block_index = claimer->claim_and_get_block();
618   }
619   if (block_index == HeapBlockClaimer::SurvivorIndex) {
620     young_gen()->from_space()->object_iterate(cl);
621     young_gen()->to_space()->object_iterate(cl);
622     block_index = claimer->claim_and_get_block();
623   }
624   while (block_index != HeapBlockClaimer::InvalidIndex) {
625     old_gen()->object_iterate_block(cl, block_index - HeapBlockClaimer::NumNonOldGenClaims);
626     block_index = claimer->claim_and_get_block();
627   }
628 }
629 
630 class PSScavengeParallelObjectIterator : public ParallelObjectIteratorImpl {
631 private:
632   ParallelScavengeHeap*  _heap;
633   HeapBlockClaimer      _claimer;
634 
635 public:
636   PSScavengeParallelObjectIterator() :
637       _heap(ParallelScavengeHeap::heap()),
638       _claimer() {}
639 
640   virtual void object_iterate(ObjectClosure* cl, uint worker_id) {
641     _heap->object_iterate_parallel(cl, &_claimer);
642   }
643 };
644 
645 ParallelObjectIteratorImpl* ParallelScavengeHeap::parallel_object_iterator(uint thread_num) {
646   return new PSScavengeParallelObjectIterator();
647 }
648 
649 HeapWord* ParallelScavengeHeap::block_start(const void* addr) const {
650   if (young_gen()->is_in_reserved(addr)) {
651     assert(young_gen()->is_in(addr),
652            "addr should be in allocated part of young gen");
653     // called from os::print_location by find or VMError
654     if (DebuggingContext::is_enabled() || VMError::is_error_reported()) {
655       return nullptr;
656     }
657     Unimplemented();
658   } else if (old_gen()->is_in_reserved(addr)) {
659     assert(old_gen()->is_in(addr),
660            "addr should be in allocated part of old gen");
661     return old_gen()->start_array()->object_start((HeapWord*)addr);
662   }
663   return nullptr;
664 }
665 
666 bool ParallelScavengeHeap::block_is_obj(const HeapWord* addr) const {
667   return block_start(addr) == addr;
668 }
669 
670 void ParallelScavengeHeap::prepare_for_verify() {
671   ensure_parsability(false);  // no need to retire TLABs for verification
672 }
673 
674 PSHeapSummary ParallelScavengeHeap::create_ps_heap_summary() {
675   PSOldGen* old = old_gen();
676   HeapWord* old_committed_end = (HeapWord*)old->virtual_space()->committed_high_addr();
677   HeapWord* old_reserved_start = old->reserved().start();
678   HeapWord* old_reserved_end = old->reserved().end();
679   VirtualSpaceSummary old_summary(old_reserved_start, old_committed_end, old_reserved_end);
680   SpaceSummary old_space(old_reserved_start, old_committed_end, old->used_in_bytes());
681 
682   PSYoungGen* young = young_gen();
683   VirtualSpaceSummary young_summary(young->reserved().start(),
684     (HeapWord*)young->virtual_space()->committed_high_addr(), young->reserved().end());
685 
686   MutableSpace* eden = young_gen()->eden_space();
687   SpaceSummary eden_space(eden->bottom(), eden->end(), eden->used_in_bytes());
688 
689   MutableSpace* from = young_gen()->from_space();
690   SpaceSummary from_space(from->bottom(), from->end(), from->used_in_bytes());
691 
692   MutableSpace* to = young_gen()->to_space();
693   SpaceSummary to_space(to->bottom(), to->end(), to->used_in_bytes());
694 
695   VirtualSpaceSummary heap_summary = create_heap_space_summary();
696   return PSHeapSummary(heap_summary, used(), old_summary, old_space, young_summary, eden_space, from_space, to_space);
697 }
698 
699 bool ParallelScavengeHeap::print_location(outputStream* st, void* addr) const {
700   return BlockLocationPrinter<ParallelScavengeHeap>::print_location(st, addr);
701 }
702 
703 void ParallelScavengeHeap::print_heap_on(outputStream* st) const {
704   if (young_gen() != nullptr) {
705     young_gen()->print_on(st);
706   }
707   if (old_gen() != nullptr) {
708     old_gen()->print_on(st);
709   }
710 }
711 
712 void ParallelScavengeHeap::print_gc_on(outputStream* st) const {
713   BarrierSet* bs = BarrierSet::barrier_set();
714   if (bs != nullptr) {
715     bs->print_on(st);
716   }
717   st->cr();
718 
719   PSParallelCompact::print_on(st);
720 }
721 
722 void ParallelScavengeHeap::gc_threads_do(ThreadClosure* tc) const {
723   ParallelScavengeHeap::heap()->workers().threads_do(tc);
724 }
725 
726 void ParallelScavengeHeap::print_tracing_info() const {
727   log_debug(gc, heap, exit)("Accumulated young generation GC time %3.7f secs", PSScavenge::accumulated_time()->seconds());
728   log_debug(gc, heap, exit)("Accumulated old generation GC time %3.7f secs", PSParallelCompact::accumulated_time()->seconds());
729 }
730 
731 PreGenGCValues ParallelScavengeHeap::get_pre_gc_values() const {
732   const PSYoungGen* const young = young_gen();
733   const MutableSpace* const eden = young->eden_space();
734   const MutableSpace* const from = young->from_space();
735   const PSOldGen* const old = old_gen();
736 
737   return PreGenGCValues(young->used_in_bytes(),
738                         young->capacity_in_bytes(),
739                         eden->used_in_bytes(),
740                         eden->capacity_in_bytes(),
741                         from->used_in_bytes(),
742                         from->capacity_in_bytes(),
743                         old->used_in_bytes(),
744                         old->capacity_in_bytes());
745 }
746 
747 void ParallelScavengeHeap::print_heap_change(const PreGenGCValues& pre_gc_values) const {
748   const PSYoungGen* const young = young_gen();
749   const MutableSpace* const eden = young->eden_space();
750   const MutableSpace* const from = young->from_space();
751   const PSOldGen* const old = old_gen();
752 
753   log_info(gc, heap)(HEAP_CHANGE_FORMAT" "
754                      HEAP_CHANGE_FORMAT" "
755                      HEAP_CHANGE_FORMAT,
756                      HEAP_CHANGE_FORMAT_ARGS(young->name(),
757                                              pre_gc_values.young_gen_used(),
758                                              pre_gc_values.young_gen_capacity(),
759                                              young->used_in_bytes(),
760                                              young->capacity_in_bytes()),
761                      HEAP_CHANGE_FORMAT_ARGS("Eden",
762                                              pre_gc_values.eden_used(),
763                                              pre_gc_values.eden_capacity(),
764                                              eden->used_in_bytes(),
765                                              eden->capacity_in_bytes()),
766                      HEAP_CHANGE_FORMAT_ARGS("From",
767                                              pre_gc_values.from_used(),
768                                              pre_gc_values.from_capacity(),
769                                              from->used_in_bytes(),
770                                              from->capacity_in_bytes()));
771   log_info(gc, heap)(HEAP_CHANGE_FORMAT,
772                      HEAP_CHANGE_FORMAT_ARGS(old->name(),
773                                              pre_gc_values.old_gen_used(),
774                                              pre_gc_values.old_gen_capacity(),
775                                              old->used_in_bytes(),
776                                              old->capacity_in_bytes()));
777   MetaspaceUtils::print_metaspace_change(pre_gc_values.metaspace_sizes());
778 }
779 
780 void ParallelScavengeHeap::verify(VerifyOption option /* ignored */) {
781   log_debug(gc, verify)("Tenured");
782   old_gen()->verify();
783 
784   log_debug(gc, verify)("Eden");
785   young_gen()->verify();
786 
787   log_debug(gc, verify)("CardTable");
788   card_table()->verify_all_young_refs_imprecise();
789 }
790 
791 void ParallelScavengeHeap::trace_actual_reserved_page_size(const size_t reserved_heap_size, const ReservedSpace rs) {
792   // Check if Info level is enabled, since os::trace_page_sizes() logs on Info level.
793   if(log_is_enabled(Info, pagesize)) {
794     const size_t page_size = rs.page_size();
795     os::trace_page_sizes("Heap",
796                          MinHeapSize,
797                          reserved_heap_size,
798                          rs.base(),
799                          rs.size(),
800                          page_size);
801   }
802 }
803 
804 void ParallelScavengeHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) {
805   const PSHeapSummary& heap_summary = create_ps_heap_summary();
806   gc_tracer->report_gc_heap_summary(when, heap_summary);
807 
808   const MetaspaceSummary& metaspace_summary = create_metaspace_summary();
809   gc_tracer->report_metaspace_summary(when, metaspace_summary);
810 }
811 
812 CardTableBarrierSet* ParallelScavengeHeap::barrier_set() {
813   return barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set());
814 }
815 
816 PSCardTable* ParallelScavengeHeap::card_table() {
817   return static_cast<PSCardTable*>(barrier_set()->card_table());
818 }
819 
820 static size_t calculate_free_from_free_ratio_flag(size_t live, uintx free_percent) {
821   assert(free_percent != 100, "precondition");
822   // We want to calculate how much free memory there can be based on the
823   // live size.
824   //   percent * (free + live) = free
825   // =>
826   //   free = (live * percent) / (1 - percent)
827 
828   const double percent = free_percent / 100.0;
829   return live * percent / (1.0 - percent);
830 }
831 
832 size_t ParallelScavengeHeap::calculate_desired_old_gen_capacity(size_t old_gen_live_size) {
833   // If min free percent is 100%, the old-gen should always be in its max capacity
834   if (MinHeapFreeRatio == 100) {
835     return _old_gen->max_gen_size();
836   }
837 
838   // Using recorded data to calculate the new capacity of old-gen to avoid
839   // excessive expansion but also keep footprint low
840 
841   size_t promoted_estimate = _size_policy->padded_average_promoted_in_bytes();
842   // Should have at least this free room for the next young-gc promotion.
843   size_t free_size = promoted_estimate;
844 
845   size_t largest_live_size = MAX2((size_t)_size_policy->peak_old_gen_used_estimate(), old_gen_live_size);
846   free_size += largest_live_size - old_gen_live_size;
847 
848   // Respect free percent
849   if (MinHeapFreeRatio != 0) {
850     size_t min_free = calculate_free_from_free_ratio_flag(old_gen_live_size, MinHeapFreeRatio);
851     free_size = MAX2(free_size, min_free);
852   }
853 
854   if (MaxHeapFreeRatio != 100) {
855     size_t max_free = calculate_free_from_free_ratio_flag(old_gen_live_size, MaxHeapFreeRatio);
856     free_size = MIN2(max_free, free_size);
857   }
858 
859   return old_gen_live_size + free_size;
860 }
861 
862 void ParallelScavengeHeap::resize_old_gen_after_full_gc() {
863   size_t current_capacity = _old_gen->capacity_in_bytes();
864   size_t desired_capacity = calculate_desired_old_gen_capacity(old_gen()->used_in_bytes());
865 
866   // If MinHeapFreeRatio is at its default value; shrink cautiously. Otherwise, users expect prompt shrinking.
867   if (FLAG_IS_DEFAULT(MinHeapFreeRatio)) {
868     if (desired_capacity < current_capacity) {
869       // Shrinking
870       if (total_full_collections() < AdaptiveSizePolicyReadyThreshold) {
871         // No enough data for shrinking
872         return;
873       }
874     }
875   }
876 
877   _old_gen->resize(desired_capacity);
878 }
879 
880 void ParallelScavengeHeap::resize_after_young_gc(bool is_survivor_overflowing) {
881   _young_gen->resize_after_young_gc(is_survivor_overflowing);
882 
883   // Consider if should shrink old-gen
884   if (!is_survivor_overflowing) {
885     // Upper bound for a single step shrink
886     size_t max_shrink_bytes = SpaceAlignment;
887     size_t shrink_bytes = _size_policy->compute_old_gen_shrink_bytes(old_gen()->free_in_bytes(), max_shrink_bytes);
888     if (shrink_bytes != 0) {
889       if (MinHeapFreeRatio != 0) {
890         size_t new_capacity = old_gen()->capacity_in_bytes() - shrink_bytes;
891         size_t new_free_size = old_gen()->free_in_bytes() - shrink_bytes;
892         if ((double)new_free_size / new_capacity * 100 < MinHeapFreeRatio) {
893           // Would violate MinHeapFreeRatio
894           return;
895         }
896       }
897       old_gen()->shrink(shrink_bytes);
898     }
899   }
900 }
901 
902 void ParallelScavengeHeap::resize_after_full_gc() {
903   resize_old_gen_after_full_gc();
904   // We don't resize young-gen after full-gc because:
905   // 1. eden-size directly affects young-gc frequency (GCTimeRatio), and we
906   // don't have enough info to determine its desired size.
907   // 2. eden can contain live objs after a full-gc, which is unsafe for
908   // resizing. We will perform expansion on allocation if needed, in
909   // satisfy_failed_allocation().
910 }
911 
912 HeapWord* ParallelScavengeHeap::allocate_loaded_archive_space(size_t size) {
913   return _old_gen->allocate(size);
914 }
915 
916 void ParallelScavengeHeap::complete_loaded_archive_space(MemRegion archive_space) {
917   assert(_old_gen->object_space()->used_region().contains(archive_space),
918          "Archive space not contained in old gen");
919   _old_gen->complete_loaded_archive_space(archive_space);
920 }
921 
922 void ParallelScavengeHeap::register_nmethod(nmethod* nm) {
923   ScavengableNMethods::register_nmethod(nm);
924   BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
925   bs_nm->disarm(nm);
926 }
927 
928 void ParallelScavengeHeap::unregister_nmethod(nmethod* nm) {
929   ScavengableNMethods::unregister_nmethod(nm);
930 }
931 
932 void ParallelScavengeHeap::verify_nmethod(nmethod* nm) {
933   ScavengableNMethods::verify_nmethod(nm);
934 }
935 
936 GrowableArray<GCMemoryManager*> ParallelScavengeHeap::memory_managers() {
937   GrowableArray<GCMemoryManager*> memory_managers(2);
938   memory_managers.append(_young_manager);
939   memory_managers.append(_old_manager);
940   return memory_managers;
941 }
942 
943 GrowableArray<MemoryPool*> ParallelScavengeHeap::memory_pools() {
944   GrowableArray<MemoryPool*> memory_pools(3);
945   memory_pools.append(_eden_pool);
946   memory_pools.append(_survivor_pool);
947   memory_pools.append(_old_pool);
948   return memory_pools;
949 }
950 
951 void ParallelScavengeHeap::pin_object(JavaThread* thread, oop obj) {
952   GCLocker::enter(thread);
953 }
954 
955 void ParallelScavengeHeap::unpin_object(JavaThread* thread, oop obj) {
956   GCLocker::exit(thread);
957 }
958 
959 void ParallelScavengeHeap::update_parallel_worker_threads_cpu_time() {
960   assert(Thread::current()->is_VM_thread(),
961          "Must be called from VM thread to avoid races");
962   if (!UsePerfData || !os::is_thread_cpu_time_supported()) {
963     return;
964   }
965 
966   // Ensure ThreadTotalCPUTimeClosure destructor is called before publishing gc
967   // time.
968   {
969     ThreadTotalCPUTimeClosure tttc(CPUTimeGroups::CPUTimeType::gc_parallel_workers);
970     // Currently parallel worker threads in GCTaskManager never terminate, so it
971     // is safe for VMThread to read their CPU times. If upstream changes this
972     // behavior, we should rethink if it is still safe.
973     gc_threads_do(&tttc);
974   }
975 
976   CPUTimeCounters::publish_gc_total_cpu_time();
977 }