1 /*
  2  * Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "gc/parallel/objectStartArray.inline.hpp"
 26 #include "gc/parallel/parallelArguments.hpp"
 27 #include "gc/parallel/parallelInitLogger.hpp"
 28 #include "gc/parallel/parallelScavengeHeap.inline.hpp"
 29 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
 30 #include "gc/parallel/psMemoryPool.hpp"
 31 #include "gc/parallel/psParallelCompact.inline.hpp"
 32 #include "gc/parallel/psParallelCompactNew.inline.hpp"
 33 #include "gc/parallel/psPromotionManager.hpp"
 34 #include "gc/parallel/psScavenge.hpp"
 35 #include "gc/parallel/psVMOperations.hpp"
 36 #include "gc/shared/fullGCForwarding.inline.hpp"
 37 #include "gc/shared/gcHeapSummary.hpp"
 38 #include "gc/shared/gcLocker.inline.hpp"
 39 #include "gc/shared/gcWhen.hpp"
 40 #include "gc/shared/genArguments.hpp"
 41 #include "gc/shared/locationPrinter.inline.hpp"
 42 #include "gc/shared/scavengableNMethods.hpp"
 43 #include "gc/shared/suspendibleThreadSet.hpp"
 44 #include "logging/log.hpp"
 45 #include "memory/iterator.hpp"
 46 #include "memory/metaspaceCounters.hpp"
 47 #include "memory/metaspaceUtils.hpp"
 48 #include "memory/reservedSpace.hpp"
 49 #include "memory/universe.hpp"
 50 #include "oops/oop.inline.hpp"
 51 #include "runtime/cpuTimeCounters.hpp"
 52 #include "runtime/handles.inline.hpp"
 53 #include "runtime/java.hpp"
 54 #include "runtime/vmThread.hpp"
 55 #include "services/memoryManager.hpp"
 56 #include "utilities/macros.hpp"
 57 #include "utilities/vmError.hpp"
 58 
 59 PSYoungGen*  ParallelScavengeHeap::_young_gen = nullptr;
 60 PSOldGen*    ParallelScavengeHeap::_old_gen = nullptr;
 61 PSAdaptiveSizePolicy* ParallelScavengeHeap::_size_policy = nullptr;
 62 PSGCAdaptivePolicyCounters* ParallelScavengeHeap::_gc_policy_counters = nullptr;
 63 
 64 jint ParallelScavengeHeap::initialize() {
 65   const size_t reserved_heap_size = ParallelArguments::heap_reserved_size_bytes();
 66 
 67   ReservedHeapSpace heap_rs = Universe::reserve_heap(reserved_heap_size, HeapAlignment);
 68 
 69   trace_actual_reserved_page_size(reserved_heap_size, heap_rs);
 70 
 71   initialize_reserved_region(heap_rs);
 72   // Layout the reserved space for the generations.
 73   ReservedSpace old_rs   = heap_rs.first_part(MaxOldSize, GenAlignment);
 74   ReservedSpace young_rs = heap_rs.last_part(MaxOldSize, GenAlignment);
 75   assert(young_rs.size() == MaxNewSize, "Didn't reserve all of the heap");
 76 
 77   PSCardTable* card_table = new PSCardTable(_reserved);
 78   card_table->initialize(old_rs.base(), young_rs.base());
 79 
 80   CardTableBarrierSet* const barrier_set = new CardTableBarrierSet(card_table);
 81   barrier_set->initialize();
 82   BarrierSet::set_barrier_set(barrier_set);
 83 
 84   // Set up WorkerThreads
 85   _workers.initialize_workers();
 86 
 87   // Create and initialize the generations.
 88   _young_gen = new PSYoungGen(
 89       young_rs,
 90       NewSize,
 91       MinNewSize,
 92       MaxNewSize);
 93   _old_gen = new PSOldGen(
 94       old_rs,
 95       OldSize,
 96       MinOldSize,
 97       MaxOldSize,
 98       "old", 1);
 99 
100   assert(young_gen()->max_gen_size() == young_rs.size(),"Consistency check");
101   assert(old_gen()->max_gen_size() == old_rs.size(), "Consistency check");
102 
103   double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0;
104 
105   const size_t eden_capacity = _young_gen->eden_space()->capacity_in_bytes();
106   const size_t old_capacity = _old_gen->capacity_in_bytes();
107   const size_t initial_promo_size = MIN2(eden_capacity, old_capacity);
108   _size_policy =
109     new PSAdaptiveSizePolicy(eden_capacity,
110                              initial_promo_size,
111                              young_gen()->to_space()->capacity_in_bytes(),
112                              GenAlignment,
113                              max_gc_pause_sec,
114                              GCTimeRatio
115                              );
116 
117   assert((old_gen()->virtual_space()->high_boundary() ==
118           young_gen()->virtual_space()->low_boundary()),
119          "Boundaries must meet");
120   // initialize the policy counters - 2 collectors, 2 generations
121   _gc_policy_counters =
122     new PSGCAdaptivePolicyCounters("ParScav:MSC", 2, 2, _size_policy);
123 
124   if (UseCompactObjectHeaders) {
125     if (!PSParallelCompactNew::initialize_aux_data()) {
126       return JNI_ENOMEM;
127     }
128   } else {
129     if (!PSParallelCompact::initialize_aux_data()) {
130       return JNI_ENOMEM;
131     }
132   }
133 
134   // Create CPU time counter
135   CPUTimeCounters::create_counter(CPUTimeGroups::CPUTimeType::gc_parallel_workers);
136 
137   ParallelInitLogger::print();
138 
139   FullGCForwarding::initialize(_reserved);
140 
141   return JNI_OK;
142 }
143 
144 void ParallelScavengeHeap::initialize_serviceability() {
145 
146   _eden_pool = new EdenMutableSpacePool(_young_gen,
147                                         _young_gen->eden_space(),
148                                         "PS Eden Space",
149                                         false /* support_usage_threshold */);
150 
151   _survivor_pool = new SurvivorMutableSpacePool(_young_gen,
152                                                 "PS Survivor Space",
153                                                 false /* support_usage_threshold */);
154 
155   _old_pool = new PSGenerationPool(_old_gen,
156                                    "PS Old Gen",
157                                    true /* support_usage_threshold */);
158 
159   _young_manager = new GCMemoryManager("PS Scavenge");
160   _old_manager = new GCMemoryManager("PS MarkSweep");
161 
162   _old_manager->add_pool(_eden_pool);
163   _old_manager->add_pool(_survivor_pool);
164   _old_manager->add_pool(_old_pool);
165 
166   _young_manager->add_pool(_eden_pool);
167   _young_manager->add_pool(_survivor_pool);
168 
169 }
170 
171 void ParallelScavengeHeap::safepoint_synchronize_begin() {
172   if (UseStringDeduplication) {
173     SuspendibleThreadSet::synchronize();
174   }
175 }
176 
177 void ParallelScavengeHeap::safepoint_synchronize_end() {
178   if (UseStringDeduplication) {
179     SuspendibleThreadSet::desynchronize();
180   }
181 }
182 class PSIsScavengable : public BoolObjectClosure {
183   bool do_object_b(oop obj) {
184     return ParallelScavengeHeap::heap()->is_in_young(obj);
185   }
186 };
187 
188 static PSIsScavengable _is_scavengable;
189 
190 void ParallelScavengeHeap::post_initialize() {
191   CollectedHeap::post_initialize();
192   // Need to init the tenuring threshold
193   PSScavenge::initialize();
194   if (UseCompactObjectHeaders) {
195     PSParallelCompactNew::post_initialize();
196   } else {
197     PSParallelCompact::post_initialize();
198   }
199   PSPromotionManager::initialize();
200 
201   ScavengableNMethods::initialize(&_is_scavengable);
202   GCLocker::initialize();
203 }
204 
205 void ParallelScavengeHeap::update_counters() {
206   young_gen()->update_counters();
207   old_gen()->update_counters();
208   MetaspaceCounters::update_performance_counters();
209   update_parallel_worker_threads_cpu_time();
210 }
211 
212 size_t ParallelScavengeHeap::capacity() const {
213   size_t value = young_gen()->capacity_in_bytes() + old_gen()->capacity_in_bytes();
214   return value;
215 }
216 
217 size_t ParallelScavengeHeap::used() const {
218   size_t value = young_gen()->used_in_bytes() + old_gen()->used_in_bytes();
219   return value;
220 }
221 
222 bool ParallelScavengeHeap::is_maximal_no_gc() const {
223   // We don't expand young-gen except at a GC.
224   return old_gen()->is_maximal_no_gc();
225 }
226 
227 
228 size_t ParallelScavengeHeap::max_capacity() const {
229   size_t estimated = reserved_region().byte_size();
230   if (UseAdaptiveSizePolicy) {
231     estimated -= _size_policy->max_survivor_size(young_gen()->max_gen_size());
232   } else {
233     estimated -= young_gen()->to_space()->capacity_in_bytes();
234   }
235   return MAX2(estimated, capacity());
236 }
237 
238 bool ParallelScavengeHeap::is_in(const void* p) const {
239   return young_gen()->is_in(p) || old_gen()->is_in(p);
240 }
241 
242 bool ParallelScavengeHeap::is_in_reserved(const void* p) const {
243   return young_gen()->is_in_reserved(p) || old_gen()->is_in_reserved(p);
244 }
245 
246 bool ParallelScavengeHeap::requires_barriers(stackChunkOop p) const {
247   return !is_in_young(p);
248 }
249 
250 // There are two levels of allocation policy here.
251 //
252 // When an allocation request fails, the requesting thread must invoke a VM
253 // operation, transfer control to the VM thread, and await the results of a
254 // garbage collection. That is quite expensive, and we should avoid doing it
255 // multiple times if possible.
256 //
257 // To accomplish this, we have a basic allocation policy, and also a
258 // failed allocation policy.
259 //
260 // The basic allocation policy controls how you allocate memory without
261 // attempting garbage collection. It is okay to grab locks and
262 // expand the heap, if that can be done without coming to a safepoint.
263 // It is likely that the basic allocation policy will not be very
264 // aggressive.
265 //
266 // The failed allocation policy is invoked from the VM thread after
267 // the basic allocation policy is unable to satisfy a mem_allocate
268 // request. This policy needs to cover the entire range of collection,
269 // heap expansion, and out-of-memory conditions. It should make every
270 // attempt to allocate the requested memory.
271 
272 // Basic allocation policy. Should never be called at a safepoint, or
273 // from the VM thread.
274 //
275 // This method must handle cases where many mem_allocate requests fail
276 // simultaneously. When that happens, only one VM operation will succeed,
277 // and the rest will not be executed. For that reason, this method loops
278 // during failed allocation attempts. If the java heap becomes exhausted,
279 // we rely on the size_policy object to force a bail out.
280 HeapWord* ParallelScavengeHeap::mem_allocate(size_t size,
281                                              bool* gc_overhead_limit_was_exceeded) {
282   assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint");
283   assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread");
284   assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
285 
286   bool is_tlab = false;
287   return mem_allocate_work(size, is_tlab, gc_overhead_limit_was_exceeded);
288 }
289 
290 HeapWord* ParallelScavengeHeap::mem_allocate_work(size_t size,
291                                                   bool is_tlab,
292                                                   bool* gc_overhead_limit_was_exceeded) {
293 
294   // In general gc_overhead_limit_was_exceeded should be false so
295   // set it so here and reset it to true only if the gc time
296   // limit is being exceeded as checked below.
297   *gc_overhead_limit_was_exceeded = false;
298 
299   HeapWord* result = young_gen()->allocate(size);
300 
301   uint loop_count = 0;
302   uint gc_count = 0;
303 
304   while (result == nullptr) {
305     // We don't want to have multiple collections for a single filled generation.
306     // To prevent this, each thread tracks the total_collections() value, and if
307     // the count has changed, does not do a new collection.
308     //
309     // The collection count must be read only while holding the heap lock. VM
310     // operations also hold the heap lock during collections. There is a lock
311     // contention case where thread A blocks waiting on the Heap_lock, while
312     // thread B is holding it doing a collection. When thread A gets the lock,
313     // the collection count has already changed. To prevent duplicate collections,
314     // The policy MUST attempt allocations during the same period it reads the
315     // total_collections() value!
316     {
317       MutexLocker ml(Heap_lock);
318       gc_count = total_collections();
319 
320       result = young_gen()->allocate(size);
321       if (result != nullptr) {
322         return result;
323       }
324 
325       // If certain conditions hold, try allocating from the old gen.
326       if (!is_tlab) {
327         result = mem_allocate_old_gen(size);
328         if (result != nullptr) {
329           return result;
330         }
331       }
332     }
333 
334     assert(result == nullptr, "inv");
335     {
336       VM_ParallelCollectForAllocation op(size, is_tlab, gc_count);
337       VMThread::execute(&op);
338 
339       // Did the VM operation execute? If so, return the result directly.
340       // This prevents us from looping until time out on requests that can
341       // not be satisfied.
342       if (op.prologue_succeeded()) {
343         assert(is_in_or_null(op.result()), "result not in heap");
344 
345         // Exit the loop if the gc time limit has been exceeded.
346         // The allocation must have failed above ("result" guarding
347         // this path is null) and the most recent collection has exceeded the
348         // gc overhead limit (although enough may have been collected to
349         // satisfy the allocation).  Exit the loop so that an out-of-memory
350         // will be thrown (return a null ignoring the contents of
351         // op.result()),
352         // but clear gc_overhead_limit_exceeded so that the next collection
353         // starts with a clean slate (i.e., forgets about previous overhead
354         // excesses).  Fill op.result() with a filler object so that the
355         // heap remains parsable.
356         const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded();
357         const bool softrefs_clear = soft_ref_policy()->all_soft_refs_clear();
358 
359         if (limit_exceeded && softrefs_clear) {
360           *gc_overhead_limit_was_exceeded = true;
361           size_policy()->set_gc_overhead_limit_exceeded(false);
362           log_trace(gc)("ParallelScavengeHeap::mem_allocate: return null because gc_overhead_limit_exceeded is set");
363           if (op.result() != nullptr) {
364             CollectedHeap::fill_with_object(op.result(), size);
365           }
366           return nullptr;
367         }
368 
369         return op.result();
370       }
371     }
372 
373     // The policy object will prevent us from looping forever. If the
374     // time spent in gc crosses a threshold, we will bail out.
375     loop_count++;
376     if ((result == nullptr) && (QueuedAllocationWarningCount > 0) &&
377         (loop_count % QueuedAllocationWarningCount == 0)) {
378       log_warning(gc)("ParallelScavengeHeap::mem_allocate retries %d times", loop_count);
379       log_warning(gc)("\tsize=%zu", size);
380     }
381   }
382 
383   return result;
384 }
385 
386 HeapWord* ParallelScavengeHeap::allocate_old_gen_and_record(size_t size) {
387   assert_locked_or_safepoint(Heap_lock);
388   HeapWord* res = old_gen()->allocate(size);
389   if (res != nullptr) {
390     _size_policy->tenured_allocation(size * HeapWordSize);
391   }
392   return res;
393 }
394 
395 HeapWord* ParallelScavengeHeap::mem_allocate_old_gen(size_t size) {
396   if (!should_alloc_in_eden(size)) {
397     // Size is too big for eden.
398     return allocate_old_gen_and_record(size);
399   }
400 
401   return nullptr;
402 }
403 
404 void ParallelScavengeHeap::do_full_collection(bool clear_all_soft_refs) {
405   if (UseCompactObjectHeaders) {
406     PSParallelCompactNew::invoke(clear_all_soft_refs, false /* serial */);
407   } else {
408     PSParallelCompact::invoke(clear_all_soft_refs);
409   }
410 }
411 
412 HeapWord* ParallelScavengeHeap::expand_heap_and_allocate(size_t size, bool is_tlab) {
413   HeapWord* result = nullptr;
414 
415   result = young_gen()->allocate(size);
416   if (result == nullptr && !is_tlab) {
417     result = old_gen()->expand_and_allocate(size);
418   }
419   return result;   // Could be null if we are out of space.
420 }
421 
422 HeapWord* ParallelScavengeHeap::satisfy_failed_allocation(size_t size, bool is_tlab) {
423   assert(size != 0, "precondition");
424 
425   HeapWord* result = nullptr;
426 
427   // If young-gen can handle this allocation, attempt young-gc firstly.
428   bool should_run_young_gc = is_tlab || should_alloc_in_eden(size);
429   collect_at_safepoint(!should_run_young_gc);
430 
431   result = expand_heap_and_allocate(size, is_tlab);
432   if (result != nullptr) {
433     return result;
434   }
435 
436   // If we reach this point, we're really out of memory. Try every trick
437   // we can to reclaim memory. Force collection of soft references. Force
438   // a complete compaction of the heap. Any additional methods for finding
439   // free memory should be here, especially if they are expensive. If this
440   // attempt fails, an OOM exception will be thrown.
441   {
442     // Make sure the heap is fully compacted
443     uintx old_interval = HeapMaximumCompactionInterval;
444     HeapMaximumCompactionInterval = 0;
445 
446     const bool clear_all_soft_refs = true;
447     if (UseCompactObjectHeaders) {
448       PSParallelCompactNew::invoke(clear_all_soft_refs, false /* serial */);
449     } else {
450       PSParallelCompact::invoke(clear_all_soft_refs);
451     }
452 
453     // Restore
454     HeapMaximumCompactionInterval = old_interval;
455   }
456 
457   result = expand_heap_and_allocate(size, is_tlab);
458   if (result != nullptr) {
459     return result;
460   }
461 
462   if (UseCompactObjectHeaders) {
463     PSParallelCompactNew::invoke(true /* clear_soft_refs */, true /* serial */);
464   }
465 
466   result = expand_heap_and_allocate(size, is_tlab);
467   if (result != nullptr) {
468     return result;
469   }
470 
471   // What else?  We might try synchronous finalization later.  If the total
472   // space available is large enough for the allocation, then a more
473   // complete compaction phase than we've tried so far might be
474   // appropriate.
475   return nullptr;
476 }
477 
478 
479 void ParallelScavengeHeap::ensure_parsability(bool retire_tlabs) {
480   CollectedHeap::ensure_parsability(retire_tlabs);
481   young_gen()->eden_space()->ensure_parsability();
482 }
483 
484 size_t ParallelScavengeHeap::tlab_capacity(Thread* thr) const {
485   return young_gen()->eden_space()->tlab_capacity(thr);
486 }
487 
488 size_t ParallelScavengeHeap::tlab_used(Thread* thr) const {
489   return young_gen()->eden_space()->tlab_used(thr);
490 }
491 
492 size_t ParallelScavengeHeap::unsafe_max_tlab_alloc(Thread* thr) const {
493   return young_gen()->eden_space()->unsafe_max_tlab_alloc(thr);
494 }
495 
496 HeapWord* ParallelScavengeHeap::allocate_new_tlab(size_t min_size, size_t requested_size, size_t* actual_size) {
497   bool dummy;
498   HeapWord* result = mem_allocate_work(requested_size /* size */,
499                                        true /* is_tlab */,
500                                        &dummy);
501   if (result != nullptr) {
502     *actual_size = requested_size;
503   }
504 
505   return result;
506 }
507 
508 void ParallelScavengeHeap::resize_all_tlabs() {
509   CollectedHeap::resize_all_tlabs();
510 }
511 
512 void ParallelScavengeHeap::prune_scavengable_nmethods() {
513   ScavengableNMethods::prune_nmethods_not_into_young();
514 }
515 
516 void ParallelScavengeHeap::prune_unlinked_nmethods() {
517   ScavengableNMethods::prune_unlinked_nmethods();
518 }
519 
520 void ParallelScavengeHeap::collect(GCCause::Cause cause) {
521   assert(!Heap_lock->owned_by_self(),
522     "this thread should not own the Heap_lock");
523 
524   uint gc_count      = 0;
525   uint full_gc_count = 0;
526   {
527     MutexLocker ml(Heap_lock);
528     // This value is guarded by the Heap_lock
529     gc_count      = total_collections();
530     full_gc_count = total_full_collections();
531   }
532 
533   while (true) {
534     VM_ParallelGCCollect op(gc_count, full_gc_count, cause);
535     VMThread::execute(&op);
536 
537     if (!GCCause::is_explicit_full_gc(cause)) {
538       return;
539     }
540 
541     {
542       MutexLocker ml(Heap_lock);
543       if (full_gc_count != total_full_collections()) {
544         return;
545       }
546     }
547   }
548 }
549 
550 bool ParallelScavengeHeap::must_clear_all_soft_refs() {
551   return _gc_cause == GCCause::_metadata_GC_clear_soft_refs ||
552          _gc_cause == GCCause::_wb_full_gc;
553 }
554 
555 void ParallelScavengeHeap::collect_at_safepoint(bool full) {
556   assert(!GCLocker::is_active(), "precondition");
557   bool clear_soft_refs = must_clear_all_soft_refs();
558 
559   if (!full) {
560     bool success = PSScavenge::invoke(clear_soft_refs);
561     if (success) {
562       return;
563     }
564     // Upgrade to Full-GC if young-gc fails
565   }
566   if (UseCompactObjectHeaders) {
567     PSParallelCompactNew::invoke(clear_soft_refs, false /* serial */);
568   } else {
569     PSParallelCompact::invoke(clear_soft_refs);
570   }
571 }
572 
573 void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) {
574   young_gen()->object_iterate(cl);
575   old_gen()->object_iterate(cl);
576 }
577 
578 // The HeapBlockClaimer is used during parallel iteration over the heap,
579 // allowing workers to claim heap areas ("blocks"), gaining exclusive rights to these.
580 // The eden and survivor spaces are treated as single blocks as it is hard to divide
581 // these spaces.
582 // The old space is divided into fixed-size blocks.
583 class HeapBlockClaimer : public StackObj {
584   size_t _claimed_index;
585 
586 public:
587   static const size_t InvalidIndex = SIZE_MAX;
588   static const size_t EdenIndex = 0;
589   static const size_t SurvivorIndex = 1;
590   static const size_t NumNonOldGenClaims = 2;
591 
592   HeapBlockClaimer() : _claimed_index(EdenIndex) { }
593   // Claim the block and get the block index.
594   size_t claim_and_get_block() {
595     size_t block_index;
596     block_index = Atomic::fetch_then_add(&_claimed_index, 1u);
597 
598     PSOldGen* old_gen = ParallelScavengeHeap::heap()->old_gen();
599     size_t num_claims = old_gen->num_iterable_blocks() + NumNonOldGenClaims;
600 
601     return block_index < num_claims ? block_index : InvalidIndex;
602   }
603 };
604 
605 void ParallelScavengeHeap::object_iterate_parallel(ObjectClosure* cl,
606                                                    HeapBlockClaimer* claimer) {
607   size_t block_index = claimer->claim_and_get_block();
608   // Iterate until all blocks are claimed
609   if (block_index == HeapBlockClaimer::EdenIndex) {
610     young_gen()->eden_space()->object_iterate(cl);
611     block_index = claimer->claim_and_get_block();
612   }
613   if (block_index == HeapBlockClaimer::SurvivorIndex) {
614     young_gen()->from_space()->object_iterate(cl);
615     young_gen()->to_space()->object_iterate(cl);
616     block_index = claimer->claim_and_get_block();
617   }
618   while (block_index != HeapBlockClaimer::InvalidIndex) {
619     old_gen()->object_iterate_block(cl, block_index - HeapBlockClaimer::NumNonOldGenClaims);
620     block_index = claimer->claim_and_get_block();
621   }
622 }
623 
624 class PSScavengeParallelObjectIterator : public ParallelObjectIteratorImpl {
625 private:
626   ParallelScavengeHeap*  _heap;
627   HeapBlockClaimer      _claimer;
628 
629 public:
630   PSScavengeParallelObjectIterator() :
631       _heap(ParallelScavengeHeap::heap()),
632       _claimer() {}
633 
634   virtual void object_iterate(ObjectClosure* cl, uint worker_id) {
635     _heap->object_iterate_parallel(cl, &_claimer);
636   }
637 };
638 
639 ParallelObjectIteratorImpl* ParallelScavengeHeap::parallel_object_iterator(uint thread_num) {
640   return new PSScavengeParallelObjectIterator();
641 }
642 
643 HeapWord* ParallelScavengeHeap::block_start(const void* addr) const {
644   if (young_gen()->is_in_reserved(addr)) {
645     assert(young_gen()->is_in(addr),
646            "addr should be in allocated part of young gen");
647     // called from os::print_location by find or VMError
648     if (DebuggingContext::is_enabled() || VMError::is_error_reported()) {
649       return nullptr;
650     }
651     Unimplemented();
652   } else if (old_gen()->is_in_reserved(addr)) {
653     assert(old_gen()->is_in(addr),
654            "addr should be in allocated part of old gen");
655     return old_gen()->start_array()->object_start((HeapWord*)addr);
656   }
657   return nullptr;
658 }
659 
660 bool ParallelScavengeHeap::block_is_obj(const HeapWord* addr) const {
661   return block_start(addr) == addr;
662 }
663 
664 void ParallelScavengeHeap::prepare_for_verify() {
665   ensure_parsability(false);  // no need to retire TLABs for verification
666 }
667 
668 PSHeapSummary ParallelScavengeHeap::create_ps_heap_summary() {
669   PSOldGen* old = old_gen();
670   HeapWord* old_committed_end = (HeapWord*)old->virtual_space()->committed_high_addr();
671   HeapWord* old_reserved_start = old->reserved().start();
672   HeapWord* old_reserved_end = old->reserved().end();
673   VirtualSpaceSummary old_summary(old_reserved_start, old_committed_end, old_reserved_end);
674   SpaceSummary old_space(old_reserved_start, old_committed_end, old->used_in_bytes());
675 
676   PSYoungGen* young = young_gen();
677   VirtualSpaceSummary young_summary(young->reserved().start(),
678     (HeapWord*)young->virtual_space()->committed_high_addr(), young->reserved().end());
679 
680   MutableSpace* eden = young_gen()->eden_space();
681   SpaceSummary eden_space(eden->bottom(), eden->end(), eden->used_in_bytes());
682 
683   MutableSpace* from = young_gen()->from_space();
684   SpaceSummary from_space(from->bottom(), from->end(), from->used_in_bytes());
685 
686   MutableSpace* to = young_gen()->to_space();
687   SpaceSummary to_space(to->bottom(), to->end(), to->used_in_bytes());
688 
689   VirtualSpaceSummary heap_summary = create_heap_space_summary();
690   return PSHeapSummary(heap_summary, used(), old_summary, old_space, young_summary, eden_space, from_space, to_space);
691 }
692 
693 bool ParallelScavengeHeap::print_location(outputStream* st, void* addr) const {
694   return BlockLocationPrinter<ParallelScavengeHeap>::print_location(st, addr);
695 }
696 
697 void ParallelScavengeHeap::print_on(outputStream* st) const {
698   if (young_gen() != nullptr) {
699     young_gen()->print_on(st);
700   }
701   if (old_gen() != nullptr) {
702     old_gen()->print_on(st);
703   }
704   MetaspaceUtils::print_on(st);
705 }
706 
707 void ParallelScavengeHeap::print_on_error(outputStream* st) const {
708   this->CollectedHeap::print_on_error(st);
709 
710   st->cr();
711   if (UseCompactObjectHeaders) {
712     PSParallelCompactNew::print_on_error(st);
713   } else {
714     PSParallelCompact::print_on_error(st);
715   }
716 }
717 
718 void ParallelScavengeHeap::gc_threads_do(ThreadClosure* tc) const {
719   ParallelScavengeHeap::heap()->workers().threads_do(tc);
720 }
721 
722 void ParallelScavengeHeap::print_tracing_info() const {
723   AdaptiveSizePolicyOutput::print();
724   log_debug(gc, heap, exit)("Accumulated young generation GC time %3.7f secs", PSScavenge::accumulated_time()->seconds());
725   if (UseCompactObjectHeaders) {
726     log_debug(gc, heap, exit)("Accumulated old generation GC time %3.7f secs", PSParallelCompactNew::accumulated_time()->seconds());
727   } else {
728     log_debug(gc, heap, exit)("Accumulated old generation GC time %3.7f secs", PSParallelCompact::accumulated_time()->seconds());
729   }
730 }
731 
732 PreGenGCValues ParallelScavengeHeap::get_pre_gc_values() const {
733   const PSYoungGen* const young = young_gen();
734   const MutableSpace* const eden = young->eden_space();
735   const MutableSpace* const from = young->from_space();
736   const PSOldGen* const old = old_gen();
737 
738   return PreGenGCValues(young->used_in_bytes(),
739                         young->capacity_in_bytes(),
740                         eden->used_in_bytes(),
741                         eden->capacity_in_bytes(),
742                         from->used_in_bytes(),
743                         from->capacity_in_bytes(),
744                         old->used_in_bytes(),
745                         old->capacity_in_bytes());
746 }
747 
748 void ParallelScavengeHeap::print_heap_change(const PreGenGCValues& pre_gc_values) const {
749   const PSYoungGen* const young = young_gen();
750   const MutableSpace* const eden = young->eden_space();
751   const MutableSpace* const from = young->from_space();
752   const PSOldGen* const old = old_gen();
753 
754   log_info(gc, heap)(HEAP_CHANGE_FORMAT" "
755                      HEAP_CHANGE_FORMAT" "
756                      HEAP_CHANGE_FORMAT,
757                      HEAP_CHANGE_FORMAT_ARGS(young->name(),
758                                              pre_gc_values.young_gen_used(),
759                                              pre_gc_values.young_gen_capacity(),
760                                              young->used_in_bytes(),
761                                              young->capacity_in_bytes()),
762                      HEAP_CHANGE_FORMAT_ARGS("Eden",
763                                              pre_gc_values.eden_used(),
764                                              pre_gc_values.eden_capacity(),
765                                              eden->used_in_bytes(),
766                                              eden->capacity_in_bytes()),
767                      HEAP_CHANGE_FORMAT_ARGS("From",
768                                              pre_gc_values.from_used(),
769                                              pre_gc_values.from_capacity(),
770                                              from->used_in_bytes(),
771                                              from->capacity_in_bytes()));
772   log_info(gc, heap)(HEAP_CHANGE_FORMAT,
773                      HEAP_CHANGE_FORMAT_ARGS(old->name(),
774                                              pre_gc_values.old_gen_used(),
775                                              pre_gc_values.old_gen_capacity(),
776                                              old->used_in_bytes(),
777                                              old->capacity_in_bytes()));
778   MetaspaceUtils::print_metaspace_change(pre_gc_values.metaspace_sizes());
779 }
780 
781 void ParallelScavengeHeap::verify(VerifyOption option /* ignored */) {
782   // Why do we need the total_collections()-filter below?
783   if (total_collections() > 0) {
784     log_debug(gc, verify)("Tenured");
785     old_gen()->verify();
786 
787     log_debug(gc, verify)("Eden");
788     young_gen()->verify();
789 
790     log_debug(gc, verify)("CardTable");
791     card_table()->verify_all_young_refs_imprecise();
792   }
793 }
794 
795 void ParallelScavengeHeap::trace_actual_reserved_page_size(const size_t reserved_heap_size, const ReservedSpace rs) {
796   // Check if Info level is enabled, since os::trace_page_sizes() logs on Info level.
797   if(log_is_enabled(Info, pagesize)) {
798     const size_t page_size = rs.page_size();
799     os::trace_page_sizes("Heap",
800                          MinHeapSize,
801                          reserved_heap_size,
802                          rs.base(),
803                          rs.size(),
804                          page_size);
805   }
806 }
807 
808 void ParallelScavengeHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) {
809   const PSHeapSummary& heap_summary = create_ps_heap_summary();
810   gc_tracer->report_gc_heap_summary(when, heap_summary);
811 
812   const MetaspaceSummary& metaspace_summary = create_metaspace_summary();
813   gc_tracer->report_metaspace_summary(when, metaspace_summary);
814 }
815 
816 CardTableBarrierSet* ParallelScavengeHeap::barrier_set() {
817   return barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set());
818 }
819 
820 PSCardTable* ParallelScavengeHeap::card_table() {
821   return static_cast<PSCardTable*>(barrier_set()->card_table());
822 }
823 
824 void ParallelScavengeHeap::resize_young_gen(size_t eden_size,
825                                             size_t survivor_size) {
826   // Delegate the resize to the generation.
827   _young_gen->resize(eden_size, survivor_size);
828 }
829 
830 void ParallelScavengeHeap::resize_old_gen(size_t desired_free_space) {
831   // Delegate the resize to the generation.
832   _old_gen->resize(desired_free_space);
833 }
834 
835 HeapWord* ParallelScavengeHeap::allocate_loaded_archive_space(size_t size) {
836   return _old_gen->allocate(size);
837 }
838 
839 void ParallelScavengeHeap::complete_loaded_archive_space(MemRegion archive_space) {
840   assert(_old_gen->object_space()->used_region().contains(archive_space),
841          "Archive space not contained in old gen");
842   _old_gen->complete_loaded_archive_space(archive_space);
843 }
844 
845 void ParallelScavengeHeap::register_nmethod(nmethod* nm) {
846   ScavengableNMethods::register_nmethod(nm);
847 }
848 
849 void ParallelScavengeHeap::unregister_nmethod(nmethod* nm) {
850   ScavengableNMethods::unregister_nmethod(nm);
851 }
852 
853 void ParallelScavengeHeap::verify_nmethod(nmethod* nm) {
854   ScavengableNMethods::verify_nmethod(nm);
855 }
856 
857 GrowableArray<GCMemoryManager*> ParallelScavengeHeap::memory_managers() {
858   GrowableArray<GCMemoryManager*> memory_managers(2);
859   memory_managers.append(_young_manager);
860   memory_managers.append(_old_manager);
861   return memory_managers;
862 }
863 
864 GrowableArray<MemoryPool*> ParallelScavengeHeap::memory_pools() {
865   GrowableArray<MemoryPool*> memory_pools(3);
866   memory_pools.append(_eden_pool);
867   memory_pools.append(_survivor_pool);
868   memory_pools.append(_old_pool);
869   return memory_pools;
870 }
871 
872 void ParallelScavengeHeap::pin_object(JavaThread* thread, oop obj) {
873   GCLocker::enter(thread);
874 }
875 
876 void ParallelScavengeHeap::unpin_object(JavaThread* thread, oop obj) {
877   GCLocker::exit(thread);
878 }
879 
880 void ParallelScavengeHeap::update_parallel_worker_threads_cpu_time() {
881   assert(Thread::current()->is_VM_thread(),
882          "Must be called from VM thread to avoid races");
883   if (!UsePerfData || !os::is_thread_cpu_time_supported()) {
884     return;
885   }
886 
887   // Ensure ThreadTotalCPUTimeClosure destructor is called before publishing gc
888   // time.
889   {
890     ThreadTotalCPUTimeClosure tttc(CPUTimeGroups::CPUTimeType::gc_parallel_workers);
891     // Currently parallel worker threads in GCTaskManager never terminate, so it
892     // is safe for VMThread to read their CPU times. If upstream changes this
893     // behavior, we should rethink if it is still safe.
894     gc_threads_do(&tttc);
895   }
896 
897   CPUTimeCounters::publish_gc_total_cpu_time();
898 }