1 /*
  2  * Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "gc/parallel/objectStartArray.inline.hpp"
 26 #include "gc/parallel/parallelArguments.hpp"
 27 #include "gc/parallel/parallelInitLogger.hpp"
 28 #include "gc/parallel/parallelScavengeHeap.inline.hpp"
 29 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
 30 #include "gc/parallel/psMemoryPool.hpp"
 31 #include "gc/parallel/psParallelCompact.inline.hpp"
 32 #include "gc/parallel/psParallelCompactNew.inline.hpp"
 33 #include "gc/parallel/psPromotionManager.hpp"
 34 #include "gc/parallel/psScavenge.hpp"
 35 #include "gc/parallel/psVMOperations.hpp"
 36 #include "gc/shared/fullGCForwarding.inline.hpp"
 37 #include "gc/shared/gcHeapSummary.hpp"
 38 #include "gc/shared/gcLocker.inline.hpp"
 39 #include "gc/shared/gcWhen.hpp"
 40 #include "gc/shared/genArguments.hpp"
 41 #include "gc/shared/locationPrinter.inline.hpp"
 42 #include "gc/shared/scavengableNMethods.hpp"
 43 #include "gc/shared/suspendibleThreadSet.hpp"
 44 #include "logging/log.hpp"
 45 #include "memory/iterator.hpp"
 46 #include "memory/metaspaceCounters.hpp"
 47 #include "memory/metaspaceUtils.hpp"
 48 #include "memory/reservedSpace.hpp"
 49 #include "memory/universe.hpp"
 50 #include "oops/oop.inline.hpp"
 51 #include "runtime/cpuTimeCounters.hpp"
 52 #include "runtime/handles.inline.hpp"
 53 #include "runtime/java.hpp"
 54 #include "runtime/vmThread.hpp"
 55 #include "services/memoryManager.hpp"
 56 #include "utilities/macros.hpp"
 57 #include "utilities/vmError.hpp"
 58 
 59 PSYoungGen*  ParallelScavengeHeap::_young_gen = nullptr;
 60 PSOldGen*    ParallelScavengeHeap::_old_gen = nullptr;
 61 PSAdaptiveSizePolicy* ParallelScavengeHeap::_size_policy = nullptr;
 62 PSGCAdaptivePolicyCounters* ParallelScavengeHeap::_gc_policy_counters = nullptr;
 63 
 64 jint ParallelScavengeHeap::initialize() {
 65   const size_t reserved_heap_size = ParallelArguments::heap_reserved_size_bytes();
 66 
 67   ReservedHeapSpace heap_rs = Universe::reserve_heap(reserved_heap_size, HeapAlignment);
 68 
 69   trace_actual_reserved_page_size(reserved_heap_size, heap_rs);
 70 
 71   initialize_reserved_region(heap_rs);
 72   // Layout the reserved space for the generations.
 73   ReservedSpace old_rs   = heap_rs.first_part(MaxOldSize, SpaceAlignment);
 74   ReservedSpace young_rs = heap_rs.last_part(MaxOldSize, SpaceAlignment);
 75   assert(young_rs.size() == MaxNewSize, "Didn't reserve all of the heap");
 76 
 77   PSCardTable* card_table = new PSCardTable(_reserved);
 78   card_table->initialize(old_rs.base(), young_rs.base());
 79 
 80   CardTableBarrierSet* const barrier_set = new CardTableBarrierSet(card_table);
 81   barrier_set->initialize();
 82   BarrierSet::set_barrier_set(barrier_set);
 83 
 84   // Set up WorkerThreads
 85   _workers.initialize_workers();
 86 
 87   // Create and initialize the generations.
 88   _young_gen = new PSYoungGen(
 89       young_rs,
 90       NewSize,
 91       MinNewSize,
 92       MaxNewSize);
 93   _old_gen = new PSOldGen(
 94       old_rs,
 95       OldSize,
 96       MinOldSize,
 97       MaxOldSize);
 98 
 99   assert(young_gen()->max_gen_size() == young_rs.size(),"Consistency check");
100   assert(old_gen()->max_gen_size() == old_rs.size(), "Consistency check");
101 
102   double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0;
103 
104   const size_t eden_capacity = _young_gen->eden_space()->capacity_in_bytes();
105   const size_t old_capacity = _old_gen->capacity_in_bytes();
106   const size_t initial_promo_size = MIN2(eden_capacity, old_capacity);
107   _size_policy =
108     new PSAdaptiveSizePolicy(eden_capacity,
109                              initial_promo_size,
110                              young_gen()->to_space()->capacity_in_bytes(),
111                              SpaceAlignment,
112                              max_gc_pause_sec,
113                              GCTimeRatio
114                              );
115 
116   assert((old_gen()->virtual_space()->high_boundary() ==
117           young_gen()->virtual_space()->low_boundary()),
118          "Boundaries must meet");
119   // initialize the policy counters - 2 collectors, 2 generations
120   _gc_policy_counters =
121     new PSGCAdaptivePolicyCounters("ParScav:MSC", 2, 2, _size_policy);
122 
123   if (UseCompactObjectHeaders) {
124     if (!PSParallelCompactNew::initialize_aux_data()) {
125       return JNI_ENOMEM;
126     }
127   } else {
128     if (!PSParallelCompact::initialize_aux_data()) {
129       return JNI_ENOMEM;
130     }
131   }
132 
133   // Create CPU time counter
134   CPUTimeCounters::create_counter(CPUTimeGroups::CPUTimeType::gc_parallel_workers);
135 
136   ParallelInitLogger::print();
137 
138   FullGCForwarding::initialize(_reserved);
139 
140   return JNI_OK;
141 }
142 
143 void ParallelScavengeHeap::initialize_serviceability() {
144 
145   _eden_pool = new EdenMutableSpacePool(_young_gen,
146                                         _young_gen->eden_space(),
147                                         "PS Eden Space",
148                                         false /* support_usage_threshold */);
149 
150   _survivor_pool = new SurvivorMutableSpacePool(_young_gen,
151                                                 "PS Survivor Space",
152                                                 false /* support_usage_threshold */);
153 
154   _old_pool = new PSGenerationPool(_old_gen,
155                                    "PS Old Gen",
156                                    true /* support_usage_threshold */);
157 
158   _young_manager = new GCMemoryManager("PS Scavenge");
159   _old_manager = new GCMemoryManager("PS MarkSweep");
160 
161   _old_manager->add_pool(_eden_pool);
162   _old_manager->add_pool(_survivor_pool);
163   _old_manager->add_pool(_old_pool);
164 
165   _young_manager->add_pool(_eden_pool);
166   _young_manager->add_pool(_survivor_pool);
167 
168 }
169 
170 void ParallelScavengeHeap::safepoint_synchronize_begin() {
171   if (UseStringDeduplication) {
172     SuspendibleThreadSet::synchronize();
173   }
174 }
175 
176 void ParallelScavengeHeap::safepoint_synchronize_end() {
177   if (UseStringDeduplication) {
178     SuspendibleThreadSet::desynchronize();
179   }
180 }
181 class PSIsScavengable : public BoolObjectClosure {
182   bool do_object_b(oop obj) {
183     return ParallelScavengeHeap::heap()->is_in_young(obj);
184   }
185 };
186 
187 static PSIsScavengable _is_scavengable;
188 
189 void ParallelScavengeHeap::post_initialize() {
190   CollectedHeap::post_initialize();
191   // Need to init the tenuring threshold
192   PSScavenge::initialize();
193   if (UseCompactObjectHeaders) {
194     PSParallelCompactNew::post_initialize();
195   } else {
196     PSParallelCompact::post_initialize();
197   }
198   PSPromotionManager::initialize();
199 
200   ScavengableNMethods::initialize(&_is_scavengable);
201   GCLocker::initialize();
202 }
203 
204 void ParallelScavengeHeap::update_counters() {
205   young_gen()->update_counters();
206   old_gen()->update_counters();
207   MetaspaceCounters::update_performance_counters();
208   update_parallel_worker_threads_cpu_time();
209 }
210 
211 size_t ParallelScavengeHeap::capacity() const {
212   size_t value = young_gen()->capacity_in_bytes() + old_gen()->capacity_in_bytes();
213   return value;
214 }
215 
216 size_t ParallelScavengeHeap::used() const {
217   size_t value = young_gen()->used_in_bytes() + old_gen()->used_in_bytes();
218   return value;
219 }
220 
221 size_t ParallelScavengeHeap::max_capacity() const {
222   size_t estimated = reserved_region().byte_size();
223   if (UseAdaptiveSizePolicy) {
224     estimated -= _size_policy->max_survivor_size(young_gen()->max_gen_size());
225   } else {
226     estimated -= young_gen()->to_space()->capacity_in_bytes();
227   }
228   return MAX2(estimated, capacity());
229 }
230 
231 bool ParallelScavengeHeap::is_in(const void* p) const {
232   return young_gen()->is_in(p) || old_gen()->is_in(p);
233 }
234 
235 bool ParallelScavengeHeap::is_in_reserved(const void* p) const {
236   return young_gen()->is_in_reserved(p) || old_gen()->is_in_reserved(p);
237 }
238 
239 bool ParallelScavengeHeap::requires_barriers(stackChunkOop p) const {
240   return !is_in_young(p);
241 }
242 
243 // There are two levels of allocation policy here.
244 //
245 // When an allocation request fails, the requesting thread must invoke a VM
246 // operation, transfer control to the VM thread, and await the results of a
247 // garbage collection. That is quite expensive, and we should avoid doing it
248 // multiple times if possible.
249 //
250 // To accomplish this, we have a basic allocation policy, and also a
251 // failed allocation policy.
252 //
253 // The basic allocation policy controls how you allocate memory without
254 // attempting garbage collection. It is okay to grab locks and
255 // expand the heap, if that can be done without coming to a safepoint.
256 // It is likely that the basic allocation policy will not be very
257 // aggressive.
258 //
259 // The failed allocation policy is invoked from the VM thread after
260 // the basic allocation policy is unable to satisfy a mem_allocate
261 // request. This policy needs to cover the entire range of collection,
262 // heap expansion, and out-of-memory conditions. It should make every
263 // attempt to allocate the requested memory.
264 
265 // Basic allocation policy. Should never be called at a safepoint, or
266 // from the VM thread.
267 //
268 // This method must handle cases where many mem_allocate requests fail
269 // simultaneously. When that happens, only one VM operation will succeed,
270 // and the rest will not be executed. For that reason, this method loops
271 // during failed allocation attempts. If the java heap becomes exhausted,
272 // we rely on the size_policy object to force a bail out.
273 HeapWord* ParallelScavengeHeap::mem_allocate(size_t size,
274                                              bool* gc_overhead_limit_was_exceeded) {
275   assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint");
276   assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread");
277   assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
278 
279   bool is_tlab = false;
280   return mem_allocate_work(size, is_tlab, gc_overhead_limit_was_exceeded);
281 }
282 
283 HeapWord* ParallelScavengeHeap::mem_allocate_work(size_t size,
284                                                   bool is_tlab,
285                                                   bool* gc_overhead_limit_was_exceeded) {
286 
287   // In general gc_overhead_limit_was_exceeded should be false so
288   // set it so here and reset it to true only if the gc time
289   // limit is being exceeded as checked below.
290   *gc_overhead_limit_was_exceeded = false;
291 
292   HeapWord* result = young_gen()->allocate(size);
293 
294   uint loop_count = 0;
295   uint gc_count = 0;
296 
297   while (result == nullptr) {
298     // We don't want to have multiple collections for a single filled generation.
299     // To prevent this, each thread tracks the total_collections() value, and if
300     // the count has changed, does not do a new collection.
301     //
302     // The collection count must be read only while holding the heap lock. VM
303     // operations also hold the heap lock during collections. There is a lock
304     // contention case where thread A blocks waiting on the Heap_lock, while
305     // thread B is holding it doing a collection. When thread A gets the lock,
306     // the collection count has already changed. To prevent duplicate collections,
307     // The policy MUST attempt allocations during the same period it reads the
308     // total_collections() value!
309     {
310       MutexLocker ml(Heap_lock);
311       gc_count = total_collections();
312 
313       result = young_gen()->allocate(size);
314       if (result != nullptr) {
315         return result;
316       }
317 
318       // If certain conditions hold, try allocating from the old gen.
319       if (!is_tlab) {
320         result = mem_allocate_old_gen(size);
321         if (result != nullptr) {
322           return result;
323         }
324       }
325     }
326 
327     assert(result == nullptr, "inv");
328     {
329       VM_ParallelCollectForAllocation op(size, is_tlab, gc_count);
330       VMThread::execute(&op);
331 
332       // Did the VM operation execute? If so, return the result directly.
333       // This prevents us from looping until time out on requests that can
334       // not be satisfied.
335       if (op.gc_succeeded()) {
336         assert(is_in_or_null(op.result()), "result not in heap");
337 
338         // Exit the loop if the gc time limit has been exceeded.
339         // The allocation must have failed above ("result" guarding
340         // this path is null) and the most recent collection has exceeded the
341         // gc overhead limit (although enough may have been collected to
342         // satisfy the allocation).  Exit the loop so that an out-of-memory
343         // will be thrown (return a null ignoring the contents of
344         // op.result()),
345         // but clear gc_overhead_limit_exceeded so that the next collection
346         // starts with a clean slate (i.e., forgets about previous overhead
347         // excesses).  Fill op.result() with a filler object so that the
348         // heap remains parsable.
349         const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded();
350         const bool softrefs_clear = soft_ref_policy()->all_soft_refs_clear();
351 
352         if (limit_exceeded && softrefs_clear) {
353           *gc_overhead_limit_was_exceeded = true;
354           size_policy()->set_gc_overhead_limit_exceeded(false);
355           log_trace(gc)("ParallelScavengeHeap::mem_allocate: return null because gc_overhead_limit_exceeded is set");
356           if (op.result() != nullptr) {
357             CollectedHeap::fill_with_object(op.result(), size);
358           }
359           return nullptr;
360         }
361 
362         return op.result();
363       }
364     }
365 
366     // The policy object will prevent us from looping forever. If the
367     // time spent in gc crosses a threshold, we will bail out.
368     loop_count++;
369     if ((result == nullptr) && (QueuedAllocationWarningCount > 0) &&
370         (loop_count % QueuedAllocationWarningCount == 0)) {
371       log_warning(gc)("ParallelScavengeHeap::mem_allocate retries %d times", loop_count);
372       log_warning(gc)("\tsize=%zu", size);
373     }
374   }
375 
376   return result;
377 }
378 
379 HeapWord* ParallelScavengeHeap::allocate_old_gen_and_record(size_t size) {
380   assert_locked_or_safepoint(Heap_lock);
381   HeapWord* res = old_gen()->allocate(size);
382   if (res != nullptr) {
383     _size_policy->tenured_allocation(size * HeapWordSize);
384   }
385   return res;
386 }
387 
388 HeapWord* ParallelScavengeHeap::mem_allocate_old_gen(size_t size) {
389   if (!should_alloc_in_eden(size)) {
390     // Size is too big for eden.
391     return allocate_old_gen_and_record(size);
392   }
393 
394   return nullptr;
395 }
396 
397 void ParallelScavengeHeap::do_full_collection(bool clear_all_soft_refs) {
398   if (UseCompactObjectHeaders) {
399     PSParallelCompactNew::invoke(clear_all_soft_refs, false /* serial */);
400   } else {
401     PSParallelCompact::invoke(clear_all_soft_refs);
402   }
403 }
404 
405 HeapWord* ParallelScavengeHeap::expand_heap_and_allocate(size_t size, bool is_tlab) {
406   HeapWord* result = nullptr;
407 
408   result = young_gen()->allocate(size);
409   if (result == nullptr && !is_tlab) {
410     result = old_gen()->expand_and_allocate(size);
411   }
412   return result;   // Could be null if we are out of space.
413 }
414 
415 HeapWord* ParallelScavengeHeap::satisfy_failed_allocation(size_t size, bool is_tlab) {
416   assert(size != 0, "precondition");
417 
418   HeapWord* result = nullptr;
419 
420   // If young-gen can handle this allocation, attempt young-gc firstly.
421   bool should_run_young_gc = is_tlab || should_alloc_in_eden(size);
422   collect_at_safepoint(!should_run_young_gc);
423 
424   result = expand_heap_and_allocate(size, is_tlab);
425   if (result != nullptr) {
426     return result;
427   }
428 
429   // If we reach this point, we're really out of memory. Try every trick
430   // we can to reclaim memory. Force collection of soft references. Force
431   // a complete compaction of the heap. Any additional methods for finding
432   // free memory should be here, especially if they are expensive. If this
433   // attempt fails, an OOM exception will be thrown.
434   {
435     // Make sure the heap is fully compacted
436     uintx old_interval = HeapMaximumCompactionInterval;
437     HeapMaximumCompactionInterval = 0;
438 
439     const bool clear_all_soft_refs = true;
440     if (UseCompactObjectHeaders) {
441       PSParallelCompactNew::invoke(clear_all_soft_refs, false /* serial */);
442     } else {
443       PSParallelCompact::invoke(clear_all_soft_refs);
444     }
445 
446     // Restore
447     HeapMaximumCompactionInterval = old_interval;
448   }
449 
450   result = expand_heap_and_allocate(size, is_tlab);
451   if (result != nullptr) {
452     return result;
453   }
454 
455   if (UseCompactObjectHeaders) {
456     PSParallelCompactNew::invoke(true /* clear_soft_refs */, true /* serial */);
457   }
458 
459   result = expand_heap_and_allocate(size, is_tlab);
460   if (result != nullptr) {
461     return result;
462   }
463 
464   // What else?  We might try synchronous finalization later.  If the total
465   // space available is large enough for the allocation, then a more
466   // complete compaction phase than we've tried so far might be
467   // appropriate.
468   return nullptr;
469 }
470 
471 
472 void ParallelScavengeHeap::ensure_parsability(bool retire_tlabs) {
473   CollectedHeap::ensure_parsability(retire_tlabs);
474   young_gen()->eden_space()->ensure_parsability();
475 }
476 
477 size_t ParallelScavengeHeap::tlab_capacity(Thread* thr) const {
478   return young_gen()->eden_space()->tlab_capacity(thr);
479 }
480 
481 size_t ParallelScavengeHeap::tlab_used(Thread* thr) const {
482   return young_gen()->eden_space()->tlab_used(thr);
483 }
484 
485 size_t ParallelScavengeHeap::unsafe_max_tlab_alloc(Thread* thr) const {
486   return young_gen()->eden_space()->unsafe_max_tlab_alloc(thr);
487 }
488 
489 HeapWord* ParallelScavengeHeap::allocate_new_tlab(size_t min_size, size_t requested_size, size_t* actual_size) {
490   bool dummy;
491   HeapWord* result = mem_allocate_work(requested_size /* size */,
492                                        true /* is_tlab */,
493                                        &dummy);
494   if (result != nullptr) {
495     *actual_size = requested_size;
496   }
497 
498   return result;
499 }
500 
501 void ParallelScavengeHeap::resize_all_tlabs() {
502   CollectedHeap::resize_all_tlabs();
503 }
504 
505 void ParallelScavengeHeap::prune_scavengable_nmethods() {
506   ScavengableNMethods::prune_nmethods_not_into_young();
507 }
508 
509 void ParallelScavengeHeap::prune_unlinked_nmethods() {
510   ScavengableNMethods::prune_unlinked_nmethods();
511 }
512 
513 void ParallelScavengeHeap::collect(GCCause::Cause cause) {
514   assert(!Heap_lock->owned_by_self(),
515     "this thread should not own the Heap_lock");
516 
517   uint gc_count      = 0;
518   uint full_gc_count = 0;
519   {
520     MutexLocker ml(Heap_lock);
521     // This value is guarded by the Heap_lock
522     gc_count      = total_collections();
523     full_gc_count = total_full_collections();
524   }
525 
526   VM_ParallelGCCollect op(gc_count, full_gc_count, cause);
527   VMThread::execute(&op);
528 }
529 
530 bool ParallelScavengeHeap::must_clear_all_soft_refs() {
531   return _gc_cause == GCCause::_metadata_GC_clear_soft_refs ||
532          _gc_cause == GCCause::_wb_full_gc;
533 }
534 
535 void ParallelScavengeHeap::collect_at_safepoint(bool full) {
536   assert(!GCLocker::is_active(), "precondition");
537   bool clear_soft_refs = must_clear_all_soft_refs();
538 
539   if (!full) {
540     bool success = PSScavenge::invoke(clear_soft_refs);
541     if (success) {
542       return;
543     }
544     // Upgrade to Full-GC if young-gc fails
545   }
546   if (UseCompactObjectHeaders) {
547     PSParallelCompactNew::invoke(clear_soft_refs, false /* serial */);
548   } else {
549     PSParallelCompact::invoke(clear_soft_refs);
550   }
551 }
552 
553 void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) {
554   young_gen()->object_iterate(cl);
555   old_gen()->object_iterate(cl);
556 }
557 
558 // The HeapBlockClaimer is used during parallel iteration over the heap,
559 // allowing workers to claim heap areas ("blocks"), gaining exclusive rights to these.
560 // The eden and survivor spaces are treated as single blocks as it is hard to divide
561 // these spaces.
562 // The old space is divided into fixed-size blocks.
563 class HeapBlockClaimer : public StackObj {
564   size_t _claimed_index;
565 
566 public:
567   static const size_t InvalidIndex = SIZE_MAX;
568   static const size_t EdenIndex = 0;
569   static const size_t SurvivorIndex = 1;
570   static const size_t NumNonOldGenClaims = 2;
571 
572   HeapBlockClaimer() : _claimed_index(EdenIndex) { }
573   // Claim the block and get the block index.
574   size_t claim_and_get_block() {
575     size_t block_index;
576     block_index = Atomic::fetch_then_add(&_claimed_index, 1u);
577 
578     PSOldGen* old_gen = ParallelScavengeHeap::heap()->old_gen();
579     size_t num_claims = old_gen->num_iterable_blocks() + NumNonOldGenClaims;
580 
581     return block_index < num_claims ? block_index : InvalidIndex;
582   }
583 };
584 
585 void ParallelScavengeHeap::object_iterate_parallel(ObjectClosure* cl,
586                                                    HeapBlockClaimer* claimer) {
587   size_t block_index = claimer->claim_and_get_block();
588   // Iterate until all blocks are claimed
589   if (block_index == HeapBlockClaimer::EdenIndex) {
590     young_gen()->eden_space()->object_iterate(cl);
591     block_index = claimer->claim_and_get_block();
592   }
593   if (block_index == HeapBlockClaimer::SurvivorIndex) {
594     young_gen()->from_space()->object_iterate(cl);
595     young_gen()->to_space()->object_iterate(cl);
596     block_index = claimer->claim_and_get_block();
597   }
598   while (block_index != HeapBlockClaimer::InvalidIndex) {
599     old_gen()->object_iterate_block(cl, block_index - HeapBlockClaimer::NumNonOldGenClaims);
600     block_index = claimer->claim_and_get_block();
601   }
602 }
603 
604 class PSScavengeParallelObjectIterator : public ParallelObjectIteratorImpl {
605 private:
606   ParallelScavengeHeap*  _heap;
607   HeapBlockClaimer      _claimer;
608 
609 public:
610   PSScavengeParallelObjectIterator() :
611       _heap(ParallelScavengeHeap::heap()),
612       _claimer() {}
613 
614   virtual void object_iterate(ObjectClosure* cl, uint worker_id) {
615     _heap->object_iterate_parallel(cl, &_claimer);
616   }
617 };
618 
619 ParallelObjectIteratorImpl* ParallelScavengeHeap::parallel_object_iterator(uint thread_num) {
620   return new PSScavengeParallelObjectIterator();
621 }
622 
623 HeapWord* ParallelScavengeHeap::block_start(const void* addr) const {
624   if (young_gen()->is_in_reserved(addr)) {
625     assert(young_gen()->is_in(addr),
626            "addr should be in allocated part of young gen");
627     // called from os::print_location by find or VMError
628     if (DebuggingContext::is_enabled() || VMError::is_error_reported()) {
629       return nullptr;
630     }
631     Unimplemented();
632   } else if (old_gen()->is_in_reserved(addr)) {
633     assert(old_gen()->is_in(addr),
634            "addr should be in allocated part of old gen");
635     return old_gen()->start_array()->object_start((HeapWord*)addr);
636   }
637   return nullptr;
638 }
639 
640 bool ParallelScavengeHeap::block_is_obj(const HeapWord* addr) const {
641   return block_start(addr) == addr;
642 }
643 
644 void ParallelScavengeHeap::prepare_for_verify() {
645   ensure_parsability(false);  // no need to retire TLABs for verification
646 }
647 
648 PSHeapSummary ParallelScavengeHeap::create_ps_heap_summary() {
649   PSOldGen* old = old_gen();
650   HeapWord* old_committed_end = (HeapWord*)old->virtual_space()->committed_high_addr();
651   HeapWord* old_reserved_start = old->reserved().start();
652   HeapWord* old_reserved_end = old->reserved().end();
653   VirtualSpaceSummary old_summary(old_reserved_start, old_committed_end, old_reserved_end);
654   SpaceSummary old_space(old_reserved_start, old_committed_end, old->used_in_bytes());
655 
656   PSYoungGen* young = young_gen();
657   VirtualSpaceSummary young_summary(young->reserved().start(),
658     (HeapWord*)young->virtual_space()->committed_high_addr(), young->reserved().end());
659 
660   MutableSpace* eden = young_gen()->eden_space();
661   SpaceSummary eden_space(eden->bottom(), eden->end(), eden->used_in_bytes());
662 
663   MutableSpace* from = young_gen()->from_space();
664   SpaceSummary from_space(from->bottom(), from->end(), from->used_in_bytes());
665 
666   MutableSpace* to = young_gen()->to_space();
667   SpaceSummary to_space(to->bottom(), to->end(), to->used_in_bytes());
668 
669   VirtualSpaceSummary heap_summary = create_heap_space_summary();
670   return PSHeapSummary(heap_summary, used(), old_summary, old_space, young_summary, eden_space, from_space, to_space);
671 }
672 
673 bool ParallelScavengeHeap::print_location(outputStream* st, void* addr) const {
674   return BlockLocationPrinter<ParallelScavengeHeap>::print_location(st, addr);
675 }
676 
677 void ParallelScavengeHeap::print_heap_on(outputStream* st) const {
678   if (young_gen() != nullptr) {
679     young_gen()->print_on(st);
680   }
681   if (old_gen() != nullptr) {
682     old_gen()->print_on(st);
683   }
684 }
685 
686 void ParallelScavengeHeap::print_gc_on(outputStream* st) const {
687   BarrierSet* bs = BarrierSet::barrier_set();
688   if (bs != nullptr) {
689     bs->print_on(st);
690   }
691   st->cr();
692 
693   if (UseCompactObjectHeaders) {
694     PSParallelCompactNew::print_on(st);
695   } else {
696     PSParallelCompact::print_on(st);
697   }
698 }
699 
700 void ParallelScavengeHeap::gc_threads_do(ThreadClosure* tc) const {
701   ParallelScavengeHeap::heap()->workers().threads_do(tc);
702 }
703 
704 void ParallelScavengeHeap::print_tracing_info() const {
705   AdaptiveSizePolicyOutput::print();
706   log_debug(gc, heap, exit)("Accumulated young generation GC time %3.7f secs", PSScavenge::accumulated_time()->seconds());
707   if (UseCompactObjectHeaders) {
708     log_debug(gc, heap, exit)("Accumulated old generation GC time %3.7f secs", PSParallelCompactNew::accumulated_time()->seconds());
709   } else {
710     log_debug(gc, heap, exit)("Accumulated old generation GC time %3.7f secs", PSParallelCompact::accumulated_time()->seconds());
711   }
712 }
713 
714 PreGenGCValues ParallelScavengeHeap::get_pre_gc_values() const {
715   const PSYoungGen* const young = young_gen();
716   const MutableSpace* const eden = young->eden_space();
717   const MutableSpace* const from = young->from_space();
718   const PSOldGen* const old = old_gen();
719 
720   return PreGenGCValues(young->used_in_bytes(),
721                         young->capacity_in_bytes(),
722                         eden->used_in_bytes(),
723                         eden->capacity_in_bytes(),
724                         from->used_in_bytes(),
725                         from->capacity_in_bytes(),
726                         old->used_in_bytes(),
727                         old->capacity_in_bytes());
728 }
729 
730 void ParallelScavengeHeap::print_heap_change(const PreGenGCValues& pre_gc_values) const {
731   const PSYoungGen* const young = young_gen();
732   const MutableSpace* const eden = young->eden_space();
733   const MutableSpace* const from = young->from_space();
734   const PSOldGen* const old = old_gen();
735 
736   log_info(gc, heap)(HEAP_CHANGE_FORMAT" "
737                      HEAP_CHANGE_FORMAT" "
738                      HEAP_CHANGE_FORMAT,
739                      HEAP_CHANGE_FORMAT_ARGS(young->name(),
740                                              pre_gc_values.young_gen_used(),
741                                              pre_gc_values.young_gen_capacity(),
742                                              young->used_in_bytes(),
743                                              young->capacity_in_bytes()),
744                      HEAP_CHANGE_FORMAT_ARGS("Eden",
745                                              pre_gc_values.eden_used(),
746                                              pre_gc_values.eden_capacity(),
747                                              eden->used_in_bytes(),
748                                              eden->capacity_in_bytes()),
749                      HEAP_CHANGE_FORMAT_ARGS("From",
750                                              pre_gc_values.from_used(),
751                                              pre_gc_values.from_capacity(),
752                                              from->used_in_bytes(),
753                                              from->capacity_in_bytes()));
754   log_info(gc, heap)(HEAP_CHANGE_FORMAT,
755                      HEAP_CHANGE_FORMAT_ARGS(old->name(),
756                                              pre_gc_values.old_gen_used(),
757                                              pre_gc_values.old_gen_capacity(),
758                                              old->used_in_bytes(),
759                                              old->capacity_in_bytes()));
760   MetaspaceUtils::print_metaspace_change(pre_gc_values.metaspace_sizes());
761 }
762 
763 void ParallelScavengeHeap::verify(VerifyOption option /* ignored */) {
764   // Why do we need the total_collections()-filter below?
765   if (total_collections() > 0) {
766     log_debug(gc, verify)("Tenured");
767     old_gen()->verify();
768 
769     log_debug(gc, verify)("Eden");
770     young_gen()->verify();
771 
772     log_debug(gc, verify)("CardTable");
773     card_table()->verify_all_young_refs_imprecise();
774   }
775 }
776 
777 void ParallelScavengeHeap::trace_actual_reserved_page_size(const size_t reserved_heap_size, const ReservedSpace rs) {
778   // Check if Info level is enabled, since os::trace_page_sizes() logs on Info level.
779   if(log_is_enabled(Info, pagesize)) {
780     const size_t page_size = rs.page_size();
781     os::trace_page_sizes("Heap",
782                          MinHeapSize,
783                          reserved_heap_size,
784                          rs.base(),
785                          rs.size(),
786                          page_size);
787   }
788 }
789 
790 void ParallelScavengeHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) {
791   const PSHeapSummary& heap_summary = create_ps_heap_summary();
792   gc_tracer->report_gc_heap_summary(when, heap_summary);
793 
794   const MetaspaceSummary& metaspace_summary = create_metaspace_summary();
795   gc_tracer->report_metaspace_summary(when, metaspace_summary);
796 }
797 
798 CardTableBarrierSet* ParallelScavengeHeap::barrier_set() {
799   return barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set());
800 }
801 
802 PSCardTable* ParallelScavengeHeap::card_table() {
803   return static_cast<PSCardTable*>(barrier_set()->card_table());
804 }
805 
806 void ParallelScavengeHeap::resize_young_gen(size_t eden_size,
807                                             size_t survivor_size) {
808   // Delegate the resize to the generation.
809   _young_gen->resize(eden_size, survivor_size);
810 }
811 
812 void ParallelScavengeHeap::resize_old_gen(size_t desired_free_space) {
813   // Delegate the resize to the generation.
814   _old_gen->resize(desired_free_space);
815 }
816 
817 HeapWord* ParallelScavengeHeap::allocate_loaded_archive_space(size_t size) {
818   return _old_gen->allocate(size);
819 }
820 
821 void ParallelScavengeHeap::complete_loaded_archive_space(MemRegion archive_space) {
822   assert(_old_gen->object_space()->used_region().contains(archive_space),
823          "Archive space not contained in old gen");
824   _old_gen->complete_loaded_archive_space(archive_space);
825 }
826 
827 void ParallelScavengeHeap::register_nmethod(nmethod* nm) {
828   ScavengableNMethods::register_nmethod(nm);
829 }
830 
831 void ParallelScavengeHeap::unregister_nmethod(nmethod* nm) {
832   ScavengableNMethods::unregister_nmethod(nm);
833 }
834 
835 void ParallelScavengeHeap::verify_nmethod(nmethod* nm) {
836   ScavengableNMethods::verify_nmethod(nm);
837 }
838 
839 GrowableArray<GCMemoryManager*> ParallelScavengeHeap::memory_managers() {
840   GrowableArray<GCMemoryManager*> memory_managers(2);
841   memory_managers.append(_young_manager);
842   memory_managers.append(_old_manager);
843   return memory_managers;
844 }
845 
846 GrowableArray<MemoryPool*> ParallelScavengeHeap::memory_pools() {
847   GrowableArray<MemoryPool*> memory_pools(3);
848   memory_pools.append(_eden_pool);
849   memory_pools.append(_survivor_pool);
850   memory_pools.append(_old_pool);
851   return memory_pools;
852 }
853 
854 void ParallelScavengeHeap::pin_object(JavaThread* thread, oop obj) {
855   GCLocker::enter(thread);
856 }
857 
858 void ParallelScavengeHeap::unpin_object(JavaThread* thread, oop obj) {
859   GCLocker::exit(thread);
860 }
861 
862 void ParallelScavengeHeap::update_parallel_worker_threads_cpu_time() {
863   assert(Thread::current()->is_VM_thread(),
864          "Must be called from VM thread to avoid races");
865   if (!UsePerfData || !os::is_thread_cpu_time_supported()) {
866     return;
867   }
868 
869   // Ensure ThreadTotalCPUTimeClosure destructor is called before publishing gc
870   // time.
871   {
872     ThreadTotalCPUTimeClosure tttc(CPUTimeGroups::CPUTimeType::gc_parallel_workers);
873     // Currently parallel worker threads in GCTaskManager never terminate, so it
874     // is safe for VMThread to read their CPU times. If upstream changes this
875     // behavior, we should rethink if it is still safe.
876     gc_threads_do(&tttc);
877   }
878 
879   CPUTimeCounters::publish_gc_total_cpu_time();
880 }