1 /*
  2  * Copyright (c) 2001, 2024, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "gc/parallel/objectStartArray.inline.hpp"
 27 #include "gc/parallel/parallelArguments.hpp"
 28 #include "gc/parallel/parallelInitLogger.hpp"
 29 #include "gc/parallel/parallelScavengeHeap.inline.hpp"
 30 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
 31 #include "gc/parallel/psMemoryPool.hpp"
 32 #include "gc/parallel/psParallelCompact.inline.hpp"
 33 #include "gc/parallel/psPromotionManager.hpp"
 34 #include "gc/parallel/psScavenge.hpp"
 35 #include "gc/parallel/psVMOperations.hpp"
 36 #include "gc/shared/gcHeapSummary.hpp"
 37 #include "gc/shared/gcLocker.inline.hpp"
 38 #include "gc/shared/gcWhen.hpp"
 39 #include "gc/shared/genArguments.hpp"
 40 #include "gc/shared/locationPrinter.inline.hpp"
 41 #include "gc/shared/scavengableNMethods.hpp"
 42 #include "gc/shared/slidingForwarding.inline.hpp"
 43 #include "gc/shared/suspendibleThreadSet.hpp"
 44 #include "logging/log.hpp"
 45 #include "memory/iterator.hpp"
 46 #include "memory/metaspaceCounters.hpp"
 47 #include "memory/metaspaceUtils.hpp"
 48 #include "memory/universe.hpp"
 49 #include "nmt/memTracker.hpp"
 50 #include "oops/oop.inline.hpp"
 51 #include "runtime/cpuTimeCounters.hpp"
 52 #include "runtime/handles.inline.hpp"
 53 #include "runtime/java.hpp"
 54 #include "runtime/vmThread.hpp"
 55 #include "services/memoryManager.hpp"
 56 #include "utilities/macros.hpp"
 57 #include "utilities/vmError.hpp"
 58 
 59 PSYoungGen*  ParallelScavengeHeap::_young_gen = nullptr;
 60 PSOldGen*    ParallelScavengeHeap::_old_gen = nullptr;
 61 PSAdaptiveSizePolicy* ParallelScavengeHeap::_size_policy = nullptr;
 62 PSGCAdaptivePolicyCounters* ParallelScavengeHeap::_gc_policy_counters = nullptr;
 63 
 64 jint ParallelScavengeHeap::initialize() {
 65   const size_t reserved_heap_size = ParallelArguments::heap_reserved_size_bytes();
 66 
 67   ReservedHeapSpace heap_rs = Universe::reserve_heap(reserved_heap_size, HeapAlignment);
 68 
 69   trace_actual_reserved_page_size(reserved_heap_size, heap_rs);
 70 
 71   initialize_reserved_region(heap_rs);
 72   // Layout the reserved space for the generations.
 73   ReservedSpace old_rs   = heap_rs.first_part(MaxOldSize);
 74   ReservedSpace young_rs = heap_rs.last_part(MaxOldSize);
 75   assert(young_rs.size() == MaxNewSize, "Didn't reserve all of the heap");
 76 
 77   PSCardTable* card_table = new PSCardTable(heap_rs.region());
 78   card_table->initialize(old_rs.base(), young_rs.base());
 79 
 80   CardTableBarrierSet* const barrier_set = new CardTableBarrierSet(card_table);
 81   barrier_set->initialize();
 82   BarrierSet::set_barrier_set(barrier_set);
 83 
 84   // Set up WorkerThreads
 85   _workers.initialize_workers();
 86 
 87   // Create and initialize the generations.
 88   _young_gen = new PSYoungGen(
 89       young_rs,
 90       NewSize,
 91       MinNewSize,
 92       MaxNewSize);
 93   _old_gen = new PSOldGen(
 94       old_rs,
 95       OldSize,
 96       MinOldSize,
 97       MaxOldSize,
 98       "old", 1);
 99 
100   assert(young_gen()->max_gen_size() == young_rs.size(),"Consistency check");
101   assert(old_gen()->max_gen_size() == old_rs.size(), "Consistency check");
102 
103   double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0;
104 
105   const size_t eden_capacity = _young_gen->eden_space()->capacity_in_bytes();
106   const size_t old_capacity = _old_gen->capacity_in_bytes();
107   const size_t initial_promo_size = MIN2(eden_capacity, old_capacity);
108   _size_policy =
109     new PSAdaptiveSizePolicy(eden_capacity,
110                              initial_promo_size,
111                              young_gen()->to_space()->capacity_in_bytes(),
112                              GenAlignment,
113                              max_gc_pause_sec,
114                              GCTimeRatio
115                              );
116 
117   assert((old_gen()->virtual_space()->high_boundary() ==
118           young_gen()->virtual_space()->low_boundary()),
119          "Boundaries must meet");
120   // initialize the policy counters - 2 collectors, 2 generations
121   _gc_policy_counters =
122     new PSGCAdaptivePolicyCounters("ParScav:MSC", 2, 2, _size_policy);
123 
124   if (!PSParallelCompact::initialize_aux_data()) {
125     return JNI_ENOMEM;
126   }
127 
128   // Create CPU time counter
129   CPUTimeCounters::create_counter(CPUTimeGroups::CPUTimeType::gc_parallel_workers);
130 
131   ParallelInitLogger::print();
132 
133   SlidingForwarding::initialize(heap_rs.region(), ParallelCompactData::RegionSize);
134 
135   return JNI_OK;
136 }
137 
138 void ParallelScavengeHeap::initialize_serviceability() {
139 
140   _eden_pool = new EdenMutableSpacePool(_young_gen,
141                                         _young_gen->eden_space(),
142                                         "PS Eden Space",
143                                         false /* support_usage_threshold */);
144 
145   _survivor_pool = new SurvivorMutableSpacePool(_young_gen,
146                                                 "PS Survivor Space",
147                                                 false /* support_usage_threshold */);
148 
149   _old_pool = new PSGenerationPool(_old_gen,
150                                    "PS Old Gen",
151                                    true /* support_usage_threshold */);
152 
153   _young_manager = new GCMemoryManager("PS Scavenge");
154   _old_manager = new GCMemoryManager("PS MarkSweep");
155 
156   _old_manager->add_pool(_eden_pool);
157   _old_manager->add_pool(_survivor_pool);
158   _old_manager->add_pool(_old_pool);
159 
160   _young_manager->add_pool(_eden_pool);
161   _young_manager->add_pool(_survivor_pool);
162 
163 }
164 
165 void ParallelScavengeHeap::safepoint_synchronize_begin() {
166   if (UseStringDeduplication) {
167     SuspendibleThreadSet::synchronize();
168   }
169 }
170 
171 void ParallelScavengeHeap::safepoint_synchronize_end() {
172   if (UseStringDeduplication) {
173     SuspendibleThreadSet::desynchronize();
174   }
175 }
176 class PSIsScavengable : public BoolObjectClosure {
177   bool do_object_b(oop obj) {
178     return ParallelScavengeHeap::heap()->is_in_young(obj);
179   }
180 };
181 
182 static PSIsScavengable _is_scavengable;
183 
184 void ParallelScavengeHeap::post_initialize() {
185   CollectedHeap::post_initialize();
186   // Need to init the tenuring threshold
187   PSScavenge::initialize();
188   PSParallelCompact::post_initialize();
189   PSPromotionManager::initialize();
190 
191   ScavengableNMethods::initialize(&_is_scavengable);
192 }
193 
194 void ParallelScavengeHeap::update_counters() {
195   young_gen()->update_counters();
196   old_gen()->update_counters();
197   MetaspaceCounters::update_performance_counters();
198   update_parallel_worker_threads_cpu_time();
199 }
200 
201 size_t ParallelScavengeHeap::capacity() const {
202   size_t value = young_gen()->capacity_in_bytes() + old_gen()->capacity_in_bytes();
203   return value;
204 }
205 
206 size_t ParallelScavengeHeap::used() const {
207   size_t value = young_gen()->used_in_bytes() + old_gen()->used_in_bytes();
208   return value;
209 }
210 
211 bool ParallelScavengeHeap::is_maximal_no_gc() const {
212   // We don't expand young-gen except at a GC.
213   return old_gen()->is_maximal_no_gc();
214 }
215 
216 
217 size_t ParallelScavengeHeap::max_capacity() const {
218   size_t estimated = reserved_region().byte_size();
219   if (UseAdaptiveSizePolicy) {
220     estimated -= _size_policy->max_survivor_size(young_gen()->max_gen_size());
221   } else {
222     estimated -= young_gen()->to_space()->capacity_in_bytes();
223   }
224   return MAX2(estimated, capacity());
225 }
226 
227 bool ParallelScavengeHeap::is_in(const void* p) const {
228   return young_gen()->is_in(p) || old_gen()->is_in(p);
229 }
230 
231 bool ParallelScavengeHeap::is_in_reserved(const void* p) const {
232   return young_gen()->is_in_reserved(p) || old_gen()->is_in_reserved(p);
233 }
234 
235 bool ParallelScavengeHeap::requires_barriers(stackChunkOop p) const {
236   return !is_in_young(p);
237 }
238 
239 // There are two levels of allocation policy here.
240 //
241 // When an allocation request fails, the requesting thread must invoke a VM
242 // operation, transfer control to the VM thread, and await the results of a
243 // garbage collection. That is quite expensive, and we should avoid doing it
244 // multiple times if possible.
245 //
246 // To accomplish this, we have a basic allocation policy, and also a
247 // failed allocation policy.
248 //
249 // The basic allocation policy controls how you allocate memory without
250 // attempting garbage collection. It is okay to grab locks and
251 // expand the heap, if that can be done without coming to a safepoint.
252 // It is likely that the basic allocation policy will not be very
253 // aggressive.
254 //
255 // The failed allocation policy is invoked from the VM thread after
256 // the basic allocation policy is unable to satisfy a mem_allocate
257 // request. This policy needs to cover the entire range of collection,
258 // heap expansion, and out-of-memory conditions. It should make every
259 // attempt to allocate the requested memory.
260 
261 // Basic allocation policy. Should never be called at a safepoint, or
262 // from the VM thread.
263 //
264 // This method must handle cases where many mem_allocate requests fail
265 // simultaneously. When that happens, only one VM operation will succeed,
266 // and the rest will not be executed. For that reason, this method loops
267 // during failed allocation attempts. If the java heap becomes exhausted,
268 // we rely on the size_policy object to force a bail out.
269 HeapWord* ParallelScavengeHeap::mem_allocate(
270                                      size_t size,
271                                      bool* gc_overhead_limit_was_exceeded) {
272   assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint");
273   assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread");
274   assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
275 
276   // In general gc_overhead_limit_was_exceeded should be false so
277   // set it so here and reset it to true only if the gc time
278   // limit is being exceeded as checked below.
279   *gc_overhead_limit_was_exceeded = false;
280 
281   HeapWord* result = young_gen()->allocate(size);
282 
283   uint loop_count = 0;
284   uint gc_count = 0;
285   uint gclocker_stalled_count = 0;
286 
287   while (result == nullptr) {
288     // We don't want to have multiple collections for a single filled generation.
289     // To prevent this, each thread tracks the total_collections() value, and if
290     // the count has changed, does not do a new collection.
291     //
292     // The collection count must be read only while holding the heap lock. VM
293     // operations also hold the heap lock during collections. There is a lock
294     // contention case where thread A blocks waiting on the Heap_lock, while
295     // thread B is holding it doing a collection. When thread A gets the lock,
296     // the collection count has already changed. To prevent duplicate collections,
297     // The policy MUST attempt allocations during the same period it reads the
298     // total_collections() value!
299     {
300       MutexLocker ml(Heap_lock);
301       gc_count = total_collections();
302 
303       result = young_gen()->allocate(size);
304       if (result != nullptr) {
305         return result;
306       }
307 
308       // If certain conditions hold, try allocating from the old gen.
309       result = mem_allocate_old_gen(size);
310       if (result != nullptr) {
311         return result;
312       }
313 
314       if (gclocker_stalled_count > GCLockerRetryAllocationCount) {
315         return nullptr;
316       }
317 
318       // Failed to allocate without a gc.
319       if (GCLocker::is_active_and_needs_gc()) {
320         // If this thread is not in a jni critical section, we stall
321         // the requestor until the critical section has cleared and
322         // GC allowed. When the critical section clears, a GC is
323         // initiated by the last thread exiting the critical section; so
324         // we retry the allocation sequence from the beginning of the loop,
325         // rather than causing more, now probably unnecessary, GC attempts.
326         JavaThread* jthr = JavaThread::current();
327         if (!jthr->in_critical()) {
328           MutexUnlocker mul(Heap_lock);
329           GCLocker::stall_until_clear();
330           gclocker_stalled_count += 1;
331           continue;
332         } else {
333           if (CheckJNICalls) {
334             fatal("Possible deadlock due to allocating while"
335                   " in jni critical section");
336           }
337           return nullptr;
338         }
339       }
340     }
341 
342     if (result == nullptr) {
343       // Generate a VM operation
344       VM_ParallelGCFailedAllocation op(size, gc_count);
345       VMThread::execute(&op);
346 
347       // Did the VM operation execute? If so, return the result directly.
348       // This prevents us from looping until time out on requests that can
349       // not be satisfied.
350       if (op.prologue_succeeded()) {
351         assert(is_in_or_null(op.result()), "result not in heap");
352 
353         // If GC was locked out during VM operation then retry allocation
354         // and/or stall as necessary.
355         if (op.gc_locked()) {
356           assert(op.result() == nullptr, "must be null if gc_locked() is true");
357           continue;  // retry and/or stall as necessary
358         }
359 
360         // Exit the loop if the gc time limit has been exceeded.
361         // The allocation must have failed above ("result" guarding
362         // this path is null) and the most recent collection has exceeded the
363         // gc overhead limit (although enough may have been collected to
364         // satisfy the allocation).  Exit the loop so that an out-of-memory
365         // will be thrown (return a null ignoring the contents of
366         // op.result()),
367         // but clear gc_overhead_limit_exceeded so that the next collection
368         // starts with a clean slate (i.e., forgets about previous overhead
369         // excesses).  Fill op.result() with a filler object so that the
370         // heap remains parsable.
371         const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded();
372         const bool softrefs_clear = soft_ref_policy()->all_soft_refs_clear();
373 
374         if (limit_exceeded && softrefs_clear) {
375           *gc_overhead_limit_was_exceeded = true;
376           size_policy()->set_gc_overhead_limit_exceeded(false);
377           log_trace(gc)("ParallelScavengeHeap::mem_allocate: return null because gc_overhead_limit_exceeded is set");
378           if (op.result() != nullptr) {
379             CollectedHeap::fill_with_object(op.result(), size);
380           }
381           return nullptr;
382         }
383 
384         return op.result();
385       }
386     }
387 
388     // The policy object will prevent us from looping forever. If the
389     // time spent in gc crosses a threshold, we will bail out.
390     loop_count++;
391     if ((result == nullptr) && (QueuedAllocationWarningCount > 0) &&
392         (loop_count % QueuedAllocationWarningCount == 0)) {
393       log_warning(gc)("ParallelScavengeHeap::mem_allocate retries %d times", loop_count);
394       log_warning(gc)("\tsize=" SIZE_FORMAT, size);
395     }
396   }
397 
398   return result;
399 }
400 
401 // A "death march" is a series of ultra-slow allocations in which a full gc is
402 // done before each allocation, and after the full gc the allocation still
403 // cannot be satisfied from the young gen.  This routine detects that condition;
404 // it should be called after a full gc has been done and the allocation
405 // attempted from the young gen. The parameter 'addr' should be the result of
406 // that young gen allocation attempt.
407 void
408 ParallelScavengeHeap::death_march_check(HeapWord* const addr, size_t size) {
409   if (addr != nullptr) {
410     _death_march_count = 0;  // death march has ended
411   } else if (_death_march_count == 0) {
412     if (should_alloc_in_eden(size)) {
413       _death_march_count = 1;    // death march has started
414     }
415   }
416 }
417 
418 HeapWord* ParallelScavengeHeap::allocate_old_gen_and_record(size_t size) {
419   assert_locked_or_safepoint(Heap_lock);
420   HeapWord* res = old_gen()->allocate(size);
421   if (res != nullptr) {
422     _size_policy->tenured_allocation(size * HeapWordSize);
423   }
424   return res;
425 }
426 
427 HeapWord* ParallelScavengeHeap::mem_allocate_old_gen(size_t size) {
428   if (!should_alloc_in_eden(size) || GCLocker::is_active_and_needs_gc()) {
429     // Size is too big for eden, or gc is locked out.
430     return allocate_old_gen_and_record(size);
431   }
432 
433   // If a "death march" is in progress, allocate from the old gen a limited
434   // number of times before doing a GC.
435   if (_death_march_count > 0) {
436     if (_death_march_count < 64) {
437       ++_death_march_count;
438       return allocate_old_gen_and_record(size);
439     } else {
440       _death_march_count = 0;
441     }
442   }
443   return nullptr;
444 }
445 
446 void ParallelScavengeHeap::do_full_collection(bool clear_all_soft_refs) {
447   // The do_full_collection() parameter clear_all_soft_refs
448   // is interpreted here as maximum_compaction which will
449   // cause SoftRefs to be cleared.
450   bool maximum_compaction = clear_all_soft_refs;
451   PSParallelCompact::invoke(maximum_compaction);
452 }
453 
454 // Failed allocation policy. Must be called from the VM thread, and
455 // only at a safepoint! Note that this method has policy for allocation
456 // flow, and NOT collection policy. So we do not check for gc collection
457 // time over limit here, that is the responsibility of the heap specific
458 // collection methods. This method decides where to attempt allocations,
459 // and when to attempt collections, but no collection specific policy.
460 HeapWord* ParallelScavengeHeap::failed_mem_allocate(size_t size) {
461   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
462   assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
463   assert(!is_stw_gc_active(), "not reentrant");
464   assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
465 
466   // We assume that allocation in eden will fail unless we collect.
467 
468   // First level allocation failure, scavenge and allocate in young gen.
469   GCCauseSetter gccs(this, GCCause::_allocation_failure);
470   const bool invoked_full_gc = PSScavenge::invoke();
471   HeapWord* result = young_gen()->allocate(size);
472 
473   // Second level allocation failure.
474   //   Mark sweep and allocate in young generation.
475   if (result == nullptr && !invoked_full_gc) {
476     do_full_collection(false);
477     result = young_gen()->allocate(size);
478   }
479 
480   death_march_check(result, size);
481 
482   // Third level allocation failure.
483   //   After mark sweep and young generation allocation failure,
484   //   allocate in old generation.
485   if (result == nullptr) {
486     result = allocate_old_gen_and_record(size);
487   }
488 
489   // Fourth level allocation failure. We're running out of memory.
490   //   More complete mark sweep and allocate in young generation.
491   if (result == nullptr) {
492     do_full_collection(true);
493     result = young_gen()->allocate(size);
494   }
495 
496   // Fifth level allocation failure.
497   //   After more complete mark sweep, allocate in old generation.
498   if (result == nullptr) {
499     result = allocate_old_gen_and_record(size);
500   }
501 
502   return result;
503 }
504 
505 void ParallelScavengeHeap::ensure_parsability(bool retire_tlabs) {
506   CollectedHeap::ensure_parsability(retire_tlabs);
507   young_gen()->eden_space()->ensure_parsability();
508 }
509 
510 size_t ParallelScavengeHeap::tlab_capacity(Thread* thr) const {
511   return young_gen()->eden_space()->tlab_capacity(thr);
512 }
513 
514 size_t ParallelScavengeHeap::tlab_used(Thread* thr) const {
515   return young_gen()->eden_space()->tlab_used(thr);
516 }
517 
518 size_t ParallelScavengeHeap::unsafe_max_tlab_alloc(Thread* thr) const {
519   return young_gen()->eden_space()->unsafe_max_tlab_alloc(thr);
520 }
521 
522 HeapWord* ParallelScavengeHeap::allocate_new_tlab(size_t min_size, size_t requested_size, size_t* actual_size) {
523   HeapWord* result = young_gen()->allocate(requested_size);
524   if (result != nullptr) {
525     *actual_size = requested_size;
526   }
527 
528   return result;
529 }
530 
531 void ParallelScavengeHeap::resize_all_tlabs() {
532   CollectedHeap::resize_all_tlabs();
533 }
534 
535 void ParallelScavengeHeap::prune_scavengable_nmethods() {
536   ScavengableNMethods::prune_nmethods_not_into_young();
537 }
538 
539 void ParallelScavengeHeap::prune_unlinked_nmethods() {
540   ScavengableNMethods::prune_unlinked_nmethods();
541 }
542 
543 // This method is used by System.gc() and JVMTI.
544 void ParallelScavengeHeap::collect(GCCause::Cause cause) {
545   assert(!Heap_lock->owned_by_self(),
546     "this thread should not own the Heap_lock");
547 
548   uint gc_count      = 0;
549   uint full_gc_count = 0;
550   {
551     MutexLocker ml(Heap_lock);
552     // This value is guarded by the Heap_lock
553     gc_count      = total_collections();
554     full_gc_count = total_full_collections();
555   }
556 
557   if (GCLocker::should_discard(cause, gc_count)) {
558     return;
559   }
560 
561   while (true) {
562     VM_ParallelGCSystemGC op(gc_count, full_gc_count, cause);
563     VMThread::execute(&op);
564 
565     if (!GCCause::is_explicit_full_gc(cause) || op.full_gc_succeeded()) {
566       return;
567     }
568 
569     {
570       MutexLocker ml(Heap_lock);
571       if (full_gc_count != total_full_collections()) {
572         return;
573       }
574     }
575 
576     if (GCLocker::is_active_and_needs_gc()) {
577       // If GCLocker is active, wait until clear before retrying.
578       GCLocker::stall_until_clear();
579     }
580   }
581 }
582 
583 void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) {
584   young_gen()->object_iterate(cl);
585   old_gen()->object_iterate(cl);
586 }
587 
588 // The HeapBlockClaimer is used during parallel iteration over the heap,
589 // allowing workers to claim heap areas ("blocks"), gaining exclusive rights to these.
590 // The eden and survivor spaces are treated as single blocks as it is hard to divide
591 // these spaces.
592 // The old space is divided into fixed-size blocks.
593 class HeapBlockClaimer : public StackObj {
594   size_t _claimed_index;
595 
596 public:
597   static const size_t InvalidIndex = SIZE_MAX;
598   static const size_t EdenIndex = 0;
599   static const size_t SurvivorIndex = 1;
600   static const size_t NumNonOldGenClaims = 2;
601 
602   HeapBlockClaimer() : _claimed_index(EdenIndex) { }
603   // Claim the block and get the block index.
604   size_t claim_and_get_block() {
605     size_t block_index;
606     block_index = Atomic::fetch_then_add(&_claimed_index, 1u);
607 
608     PSOldGen* old_gen = ParallelScavengeHeap::heap()->old_gen();
609     size_t num_claims = old_gen->num_iterable_blocks() + NumNonOldGenClaims;
610 
611     return block_index < num_claims ? block_index : InvalidIndex;
612   }
613 };
614 
615 void ParallelScavengeHeap::object_iterate_parallel(ObjectClosure* cl,
616                                                    HeapBlockClaimer* claimer) {
617   size_t block_index = claimer->claim_and_get_block();
618   // Iterate until all blocks are claimed
619   if (block_index == HeapBlockClaimer::EdenIndex) {
620     young_gen()->eden_space()->object_iterate(cl);
621     block_index = claimer->claim_and_get_block();
622   }
623   if (block_index == HeapBlockClaimer::SurvivorIndex) {
624     young_gen()->from_space()->object_iterate(cl);
625     young_gen()->to_space()->object_iterate(cl);
626     block_index = claimer->claim_and_get_block();
627   }
628   while (block_index != HeapBlockClaimer::InvalidIndex) {
629     old_gen()->object_iterate_block(cl, block_index - HeapBlockClaimer::NumNonOldGenClaims);
630     block_index = claimer->claim_and_get_block();
631   }
632 }
633 
634 class PSScavengeParallelObjectIterator : public ParallelObjectIteratorImpl {
635 private:
636   ParallelScavengeHeap*  _heap;
637   HeapBlockClaimer      _claimer;
638 
639 public:
640   PSScavengeParallelObjectIterator() :
641       _heap(ParallelScavengeHeap::heap()),
642       _claimer() {}
643 
644   virtual void object_iterate(ObjectClosure* cl, uint worker_id) {
645     _heap->object_iterate_parallel(cl, &_claimer);
646   }
647 };
648 
649 ParallelObjectIteratorImpl* ParallelScavengeHeap::parallel_object_iterator(uint thread_num) {
650   return new PSScavengeParallelObjectIterator();
651 }
652 
653 HeapWord* ParallelScavengeHeap::block_start(const void* addr) const {
654   if (young_gen()->is_in_reserved(addr)) {
655     assert(young_gen()->is_in(addr),
656            "addr should be in allocated part of young gen");
657     // called from os::print_location by find or VMError
658     if (DebuggingContext::is_enabled() || VMError::is_error_reported()) {
659       return nullptr;
660     }
661     Unimplemented();
662   } else if (old_gen()->is_in_reserved(addr)) {
663     assert(old_gen()->is_in(addr),
664            "addr should be in allocated part of old gen");
665     return old_gen()->start_array()->object_start((HeapWord*)addr);
666   }
667   return 0;
668 }
669 
670 bool ParallelScavengeHeap::block_is_obj(const HeapWord* addr) const {
671   return block_start(addr) == addr;
672 }
673 
674 void ParallelScavengeHeap::prepare_for_verify() {
675   ensure_parsability(false);  // no need to retire TLABs for verification
676 }
677 
678 PSHeapSummary ParallelScavengeHeap::create_ps_heap_summary() {
679   PSOldGen* old = old_gen();
680   HeapWord* old_committed_end = (HeapWord*)old->virtual_space()->committed_high_addr();
681   HeapWord* old_reserved_start = old->reserved().start();
682   HeapWord* old_reserved_end = old->reserved().end();
683   VirtualSpaceSummary old_summary(old_reserved_start, old_committed_end, old_reserved_end);
684   SpaceSummary old_space(old_reserved_start, old_committed_end, old->used_in_bytes());
685 
686   PSYoungGen* young = young_gen();
687   VirtualSpaceSummary young_summary(young->reserved().start(),
688     (HeapWord*)young->virtual_space()->committed_high_addr(), young->reserved().end());
689 
690   MutableSpace* eden = young_gen()->eden_space();
691   SpaceSummary eden_space(eden->bottom(), eden->end(), eden->used_in_bytes());
692 
693   MutableSpace* from = young_gen()->from_space();
694   SpaceSummary from_space(from->bottom(), from->end(), from->used_in_bytes());
695 
696   MutableSpace* to = young_gen()->to_space();
697   SpaceSummary to_space(to->bottom(), to->end(), to->used_in_bytes());
698 
699   VirtualSpaceSummary heap_summary = create_heap_space_summary();
700   return PSHeapSummary(heap_summary, used(), old_summary, old_space, young_summary, eden_space, from_space, to_space);
701 }
702 
703 bool ParallelScavengeHeap::print_location(outputStream* st, void* addr) const {
704   return BlockLocationPrinter<ParallelScavengeHeap>::print_location(st, addr);
705 }
706 
707 void ParallelScavengeHeap::print_on(outputStream* st) const {
708   if (young_gen() != nullptr) {
709     young_gen()->print_on(st);
710   }
711   if (old_gen() != nullptr) {
712     old_gen()->print_on(st);
713   }
714   MetaspaceUtils::print_on(st);
715 }
716 
717 void ParallelScavengeHeap::print_on_error(outputStream* st) const {
718   this->CollectedHeap::print_on_error(st);
719 
720   st->cr();
721   PSParallelCompact::print_on_error(st);
722 }
723 
724 void ParallelScavengeHeap::gc_threads_do(ThreadClosure* tc) const {
725   ParallelScavengeHeap::heap()->workers().threads_do(tc);
726 }
727 
728 void ParallelScavengeHeap::print_tracing_info() const {
729   AdaptiveSizePolicyOutput::print();
730   log_debug(gc, heap, exit)("Accumulated young generation GC time %3.7f secs", PSScavenge::accumulated_time()->seconds());
731   log_debug(gc, heap, exit)("Accumulated old generation GC time %3.7f secs", PSParallelCompact::accumulated_time()->seconds());
732 }
733 
734 PreGenGCValues ParallelScavengeHeap::get_pre_gc_values() const {
735   const PSYoungGen* const young = young_gen();
736   const MutableSpace* const eden = young->eden_space();
737   const MutableSpace* const from = young->from_space();
738   const PSOldGen* const old = old_gen();
739 
740   return PreGenGCValues(young->used_in_bytes(),
741                         young->capacity_in_bytes(),
742                         eden->used_in_bytes(),
743                         eden->capacity_in_bytes(),
744                         from->used_in_bytes(),
745                         from->capacity_in_bytes(),
746                         old->used_in_bytes(),
747                         old->capacity_in_bytes());
748 }
749 
750 void ParallelScavengeHeap::print_heap_change(const PreGenGCValues& pre_gc_values) const {
751   const PSYoungGen* const young = young_gen();
752   const MutableSpace* const eden = young->eden_space();
753   const MutableSpace* const from = young->from_space();
754   const PSOldGen* const old = old_gen();
755 
756   log_info(gc, heap)(HEAP_CHANGE_FORMAT" "
757                      HEAP_CHANGE_FORMAT" "
758                      HEAP_CHANGE_FORMAT,
759                      HEAP_CHANGE_FORMAT_ARGS(young->name(),
760                                              pre_gc_values.young_gen_used(),
761                                              pre_gc_values.young_gen_capacity(),
762                                              young->used_in_bytes(),
763                                              young->capacity_in_bytes()),
764                      HEAP_CHANGE_FORMAT_ARGS("Eden",
765                                              pre_gc_values.eden_used(),
766                                              pre_gc_values.eden_capacity(),
767                                              eden->used_in_bytes(),
768                                              eden->capacity_in_bytes()),
769                      HEAP_CHANGE_FORMAT_ARGS("From",
770                                              pre_gc_values.from_used(),
771                                              pre_gc_values.from_capacity(),
772                                              from->used_in_bytes(),
773                                              from->capacity_in_bytes()));
774   log_info(gc, heap)(HEAP_CHANGE_FORMAT,
775                      HEAP_CHANGE_FORMAT_ARGS(old->name(),
776                                              pre_gc_values.old_gen_used(),
777                                              pre_gc_values.old_gen_capacity(),
778                                              old->used_in_bytes(),
779                                              old->capacity_in_bytes()));
780   MetaspaceUtils::print_metaspace_change(pre_gc_values.metaspace_sizes());
781 }
782 
783 void ParallelScavengeHeap::verify(VerifyOption option /* ignored */) {
784   // Why do we need the total_collections()-filter below?
785   if (total_collections() > 0) {
786     log_debug(gc, verify)("Tenured");
787     old_gen()->verify();
788 
789     log_debug(gc, verify)("Eden");
790     young_gen()->verify();
791 
792     log_debug(gc, verify)("CardTable");
793     card_table()->verify_all_young_refs_imprecise();
794   }
795 }
796 
797 void ParallelScavengeHeap::trace_actual_reserved_page_size(const size_t reserved_heap_size, const ReservedSpace rs) {
798   // Check if Info level is enabled, since os::trace_page_sizes() logs on Info level.
799   if(log_is_enabled(Info, pagesize)) {
800     const size_t page_size = rs.page_size();
801     os::trace_page_sizes("Heap",
802                          MinHeapSize,
803                          reserved_heap_size,
804                          rs.base(),
805                          rs.size(),
806                          page_size);
807   }
808 }
809 
810 void ParallelScavengeHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) {
811   const PSHeapSummary& heap_summary = create_ps_heap_summary();
812   gc_tracer->report_gc_heap_summary(when, heap_summary);
813 
814   const MetaspaceSummary& metaspace_summary = create_metaspace_summary();
815   gc_tracer->report_metaspace_summary(when, metaspace_summary);
816 }
817 
818 CardTableBarrierSet* ParallelScavengeHeap::barrier_set() {
819   return barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set());
820 }
821 
822 PSCardTable* ParallelScavengeHeap::card_table() {
823   return static_cast<PSCardTable*>(barrier_set()->card_table());
824 }
825 
826 void ParallelScavengeHeap::resize_young_gen(size_t eden_size,
827                                             size_t survivor_size) {
828   // Delegate the resize to the generation.
829   _young_gen->resize(eden_size, survivor_size);
830 }
831 
832 void ParallelScavengeHeap::resize_old_gen(size_t desired_free_space) {
833   // Delegate the resize to the generation.
834   _old_gen->resize(desired_free_space);
835 }
836 
837 HeapWord* ParallelScavengeHeap::allocate_loaded_archive_space(size_t size) {
838   return _old_gen->allocate(size);
839 }
840 
841 void ParallelScavengeHeap::complete_loaded_archive_space(MemRegion archive_space) {
842   assert(_old_gen->object_space()->used_region().contains(archive_space),
843          "Archive space not contained in old gen");
844   _old_gen->complete_loaded_archive_space(archive_space);
845 }
846 
847 void ParallelScavengeHeap::register_nmethod(nmethod* nm) {
848   ScavengableNMethods::register_nmethod(nm);
849 }
850 
851 void ParallelScavengeHeap::unregister_nmethod(nmethod* nm) {
852   ScavengableNMethods::unregister_nmethod(nm);
853 }
854 
855 void ParallelScavengeHeap::verify_nmethod(nmethod* nm) {
856   ScavengableNMethods::verify_nmethod(nm);
857 }
858 
859 GrowableArray<GCMemoryManager*> ParallelScavengeHeap::memory_managers() {
860   GrowableArray<GCMemoryManager*> memory_managers(2);
861   memory_managers.append(_young_manager);
862   memory_managers.append(_old_manager);
863   return memory_managers;
864 }
865 
866 GrowableArray<MemoryPool*> ParallelScavengeHeap::memory_pools() {
867   GrowableArray<MemoryPool*> memory_pools(3);
868   memory_pools.append(_eden_pool);
869   memory_pools.append(_survivor_pool);
870   memory_pools.append(_old_pool);
871   return memory_pools;
872 }
873 
874 void ParallelScavengeHeap::pin_object(JavaThread* thread, oop obj) {
875   GCLocker::lock_critical(thread);
876 }
877 
878 void ParallelScavengeHeap::unpin_object(JavaThread* thread, oop obj) {
879   GCLocker::unlock_critical(thread);
880 }
881 
882 void ParallelScavengeHeap::update_parallel_worker_threads_cpu_time() {
883   assert(Thread::current()->is_VM_thread(),
884          "Must be called from VM thread to avoid races");
885   if (!UsePerfData || !os::is_thread_cpu_time_supported()) {
886     return;
887   }
888 
889   // Ensure ThreadTotalCPUTimeClosure destructor is called before publishing gc
890   // time.
891   {
892     ThreadTotalCPUTimeClosure tttc(CPUTimeGroups::CPUTimeType::gc_parallel_workers);
893     // Currently parallel worker threads in GCTaskManager never terminate, so it
894     // is safe for VMThread to read their CPU times. If upstream changes this
895     // behavior, we should rethink if it is still safe.
896     gc_threads_do(&tttc);
897   }
898 
899   CPUTimeCounters::publish_gc_total_cpu_time();
900 }