1 /*
  2  * Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "gc/parallel/objectStartArray.inline.hpp"
 26 #include "gc/parallel/parallelArguments.hpp"
 27 #include "gc/parallel/parallelInitLogger.hpp"
 28 #include "gc/parallel/parallelScavengeHeap.inline.hpp"
 29 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
 30 #include "gc/parallel/psMemoryPool.hpp"
 31 #include "gc/parallel/psParallelCompact.inline.hpp"
 32 #include "gc/parallel/psPromotionManager.hpp"
 33 #include "gc/parallel/psScavenge.hpp"
 34 #include "gc/parallel/psVMOperations.hpp"
 35 #include "gc/shared/fullGCForwarding.inline.hpp"
 36 #include "gc/shared/gcHeapSummary.hpp"
 37 #include "gc/shared/gcLocker.inline.hpp"
 38 #include "gc/shared/gcWhen.hpp"
 39 #include "gc/shared/genArguments.hpp"
 40 #include "gc/shared/locationPrinter.inline.hpp"
 41 #include "gc/shared/scavengableNMethods.hpp"
 42 #include "gc/shared/suspendibleThreadSet.hpp"
 43 #include "logging/log.hpp"
 44 #include "memory/iterator.hpp"
 45 #include "memory/metaspaceCounters.hpp"
 46 #include "memory/metaspaceUtils.hpp"
 47 #include "memory/reservedSpace.hpp"
 48 #include "memory/universe.hpp"
 49 #include "oops/oop.inline.hpp"
 50 #include "runtime/cpuTimeCounters.hpp"
 51 #include "runtime/handles.inline.hpp"
 52 #include "runtime/java.hpp"
 53 #include "runtime/vmThread.hpp"
 54 #include "services/memoryManager.hpp"
 55 #include "utilities/macros.hpp"
 56 #include "utilities/vmError.hpp"
 57 
 58 PSYoungGen*  ParallelScavengeHeap::_young_gen = nullptr;
 59 PSOldGen*    ParallelScavengeHeap::_old_gen = nullptr;
 60 PSAdaptiveSizePolicy* ParallelScavengeHeap::_size_policy = nullptr;
 61 PSGCAdaptivePolicyCounters* ParallelScavengeHeap::_gc_policy_counters = nullptr;
 62 
 63 jint ParallelScavengeHeap::initialize() {
 64   const size_t reserved_heap_size = ParallelArguments::heap_reserved_size_bytes();
 65 
 66   ReservedHeapSpace heap_rs = Universe::reserve_heap(reserved_heap_size, HeapAlignment);
 67 
 68   trace_actual_reserved_page_size(reserved_heap_size, heap_rs);
 69 
 70   initialize_reserved_region(heap_rs);
 71   // Layout the reserved space for the generations.
 72   ReservedSpace old_rs   = heap_rs.first_part(MaxOldSize, SpaceAlignment);
 73   ReservedSpace young_rs = heap_rs.last_part(MaxOldSize, SpaceAlignment);
 74   assert(young_rs.size() == MaxNewSize, "Didn't reserve all of the heap");
 75 
 76   PSCardTable* card_table = new PSCardTable(_reserved);
 77   card_table->initialize(old_rs.base(), young_rs.base());
 78 
 79   CardTableBarrierSet* const barrier_set = new CardTableBarrierSet(card_table);
 80   barrier_set->initialize();
 81   BarrierSet::set_barrier_set(barrier_set);
 82 
 83   // Set up WorkerThreads
 84   _workers.initialize_workers();
 85 
 86   // Create and initialize the generations.
 87   _young_gen = new PSYoungGen(
 88       young_rs,
 89       NewSize,
 90       MinNewSize,
 91       MaxNewSize);
 92   _old_gen = new PSOldGen(
 93       old_rs,
 94       OldSize,
 95       MinOldSize,
 96       MaxOldSize);
 97 
 98   assert(young_gen()->max_gen_size() == young_rs.size(),"Consistency check");
 99   assert(old_gen()->max_gen_size() == old_rs.size(), "Consistency check");
100 
101   double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0;
102 
103   const size_t eden_capacity = _young_gen->eden_space()->capacity_in_bytes();
104   const size_t old_capacity = _old_gen->capacity_in_bytes();
105   const size_t initial_promo_size = MIN2(eden_capacity, old_capacity);
106   _size_policy =
107     new PSAdaptiveSizePolicy(eden_capacity,
108                              initial_promo_size,
109                              young_gen()->to_space()->capacity_in_bytes(),
110                              SpaceAlignment,
111                              max_gc_pause_sec,
112                              GCTimeRatio
113                              );
114 
115   assert((old_gen()->virtual_space()->high_boundary() ==
116           young_gen()->virtual_space()->low_boundary()),
117          "Boundaries must meet");
118   // initialize the policy counters - 2 collectors, 2 generations
119   _gc_policy_counters =
120     new PSGCAdaptivePolicyCounters("ParScav:MSC", 2, 2, _size_policy);
121 
122   if (!PSParallelCompact::initialize_aux_data()) {
123     return JNI_ENOMEM;
124   }
125 
126   // Create CPU time counter
127   CPUTimeCounters::create_counter(CPUTimeGroups::CPUTimeType::gc_parallel_workers);
128 
129   ParallelInitLogger::print();
130 
131   FullGCForwarding::initialize(_reserved);
132 
133   return JNI_OK;
134 }
135 
136 void ParallelScavengeHeap::initialize_serviceability() {
137 
138   _eden_pool = new EdenMutableSpacePool(_young_gen,
139                                         _young_gen->eden_space(),
140                                         "PS Eden Space",
141                                         false /* support_usage_threshold */);
142 
143   _survivor_pool = new SurvivorMutableSpacePool(_young_gen,
144                                                 "PS Survivor Space",
145                                                 false /* support_usage_threshold */);
146 
147   _old_pool = new PSGenerationPool(_old_gen,
148                                    "PS Old Gen",
149                                    true /* support_usage_threshold */);
150 
151   _young_manager = new GCMemoryManager("PS Scavenge");
152   _old_manager = new GCMemoryManager("PS MarkSweep");
153 
154   _old_manager->add_pool(_eden_pool);
155   _old_manager->add_pool(_survivor_pool);
156   _old_manager->add_pool(_old_pool);
157 
158   _young_manager->add_pool(_eden_pool);
159   _young_manager->add_pool(_survivor_pool);
160 
161 }
162 
163 void ParallelScavengeHeap::safepoint_synchronize_begin() {
164   if (UseStringDeduplication) {
165     SuspendibleThreadSet::synchronize();
166   }
167 }
168 
169 void ParallelScavengeHeap::safepoint_synchronize_end() {
170   if (UseStringDeduplication) {
171     SuspendibleThreadSet::desynchronize();
172   }
173 }
174 class PSIsScavengable : public BoolObjectClosure {
175   bool do_object_b(oop obj) {
176     return ParallelScavengeHeap::heap()->is_in_young(obj);
177   }
178 };
179 
180 static PSIsScavengable _is_scavengable;
181 
182 void ParallelScavengeHeap::post_initialize() {
183   CollectedHeap::post_initialize();
184   // Need to init the tenuring threshold
185   PSScavenge::initialize();
186   PSParallelCompact::post_initialize();
187   PSPromotionManager::initialize();
188 
189   ScavengableNMethods::initialize(&_is_scavengable);
190   GCLocker::initialize();
191 }
192 
193 void ParallelScavengeHeap::update_counters() {
194   young_gen()->update_counters();
195   old_gen()->update_counters();
196   MetaspaceCounters::update_performance_counters();
197   update_parallel_worker_threads_cpu_time();
198 }
199 
200 size_t ParallelScavengeHeap::capacity() const {
201   size_t value = young_gen()->capacity_in_bytes() + old_gen()->capacity_in_bytes();
202   return value;
203 }
204 
205 size_t ParallelScavengeHeap::used() const {
206   size_t value = young_gen()->used_in_bytes() + old_gen()->used_in_bytes();
207   return value;
208 }
209 
210 size_t ParallelScavengeHeap::max_capacity() const {
211   size_t estimated = reserved_region().byte_size();
212   if (UseAdaptiveSizePolicy) {
213     estimated -= _size_policy->max_survivor_size(young_gen()->max_gen_size());
214   } else {
215     estimated -= young_gen()->to_space()->capacity_in_bytes();
216   }
217   return MAX2(estimated, capacity());
218 }
219 
220 bool ParallelScavengeHeap::is_in(const void* p) const {
221   return young_gen()->is_in(p) || old_gen()->is_in(p);
222 }
223 
224 bool ParallelScavengeHeap::is_in_reserved(const void* p) const {
225   return young_gen()->is_in_reserved(p) || old_gen()->is_in_reserved(p);
226 }
227 
228 bool ParallelScavengeHeap::requires_barriers(stackChunkOop p) const {
229   return !is_in_young(p);
230 }
231 
232 // There are two levels of allocation policy here.
233 //
234 // When an allocation request fails, the requesting thread must invoke a VM
235 // operation, transfer control to the VM thread, and await the results of a
236 // garbage collection. That is quite expensive, and we should avoid doing it
237 // multiple times if possible.
238 //
239 // To accomplish this, we have a basic allocation policy, and also a
240 // failed allocation policy.
241 //
242 // The basic allocation policy controls how you allocate memory without
243 // attempting garbage collection. It is okay to grab locks and
244 // expand the heap, if that can be done without coming to a safepoint.
245 // It is likely that the basic allocation policy will not be very
246 // aggressive.
247 //
248 // The failed allocation policy is invoked from the VM thread after
249 // the basic allocation policy is unable to satisfy a mem_allocate
250 // request. This policy needs to cover the entire range of collection,
251 // heap expansion, and out-of-memory conditions. It should make every
252 // attempt to allocate the requested memory.
253 
254 // Basic allocation policy. Should never be called at a safepoint, or
255 // from the VM thread.
256 //
257 // This method must handle cases where many mem_allocate requests fail
258 // simultaneously. When that happens, only one VM operation will succeed,
259 // and the rest will not be executed. For that reason, this method loops
260 // during failed allocation attempts. If the java heap becomes exhausted,
261 // we rely on the size_policy object to force a bail out.
262 HeapWord* ParallelScavengeHeap::mem_allocate(size_t size,
263                                              bool* gc_overhead_limit_was_exceeded) {
264   assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint");
265   assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread");
266   assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
267 
268   bool is_tlab = false;
269   return mem_allocate_work(size, is_tlab, gc_overhead_limit_was_exceeded);
270 }
271 
272 HeapWord* ParallelScavengeHeap::mem_allocate_work(size_t size,
273                                                   bool is_tlab,
274                                                   bool* gc_overhead_limit_was_exceeded) {
275 
276   // In general gc_overhead_limit_was_exceeded should be false so
277   // set it so here and reset it to true only if the gc time
278   // limit is being exceeded as checked below.
279   *gc_overhead_limit_was_exceeded = false;
280 
281   HeapWord* result = young_gen()->allocate(size);
282 
283   uint loop_count = 0;
284   uint gc_count = 0;
285 
286   while (result == nullptr) {
287     // We don't want to have multiple collections for a single filled generation.
288     // To prevent this, each thread tracks the total_collections() value, and if
289     // the count has changed, does not do a new collection.
290     //
291     // The collection count must be read only while holding the heap lock. VM
292     // operations also hold the heap lock during collections. There is a lock
293     // contention case where thread A blocks waiting on the Heap_lock, while
294     // thread B is holding it doing a collection. When thread A gets the lock,
295     // the collection count has already changed. To prevent duplicate collections,
296     // The policy MUST attempt allocations during the same period it reads the
297     // total_collections() value!
298     {
299       MutexLocker ml(Heap_lock);
300       gc_count = total_collections();
301 
302       result = young_gen()->allocate(size);
303       if (result != nullptr) {
304         return result;
305       }
306 
307       // If certain conditions hold, try allocating from the old gen.
308       if (!is_tlab) {
309         result = mem_allocate_old_gen(size);
310         if (result != nullptr) {
311           return result;
312         }
313       }
314     }
315 
316     assert(result == nullptr, "inv");
317     {
318       VM_ParallelCollectForAllocation op(size, is_tlab, gc_count);
319       VMThread::execute(&op);
320 
321       // Did the VM operation execute? If so, return the result directly.
322       // This prevents us from looping until time out on requests that can
323       // not be satisfied.
324       if (op.gc_succeeded()) {
325         assert(is_in_or_null(op.result()), "result not in heap");
326 
327         // Exit the loop if the gc time limit has been exceeded.
328         // The allocation must have failed above ("result" guarding
329         // this path is null) and the most recent collection has exceeded the
330         // gc overhead limit (although enough may have been collected to
331         // satisfy the allocation).  Exit the loop so that an out-of-memory
332         // will be thrown (return a null ignoring the contents of
333         // op.result()),
334         // but clear gc_overhead_limit_exceeded so that the next collection
335         // starts with a clean slate (i.e., forgets about previous overhead
336         // excesses).  Fill op.result() with a filler object so that the
337         // heap remains parsable.
338         const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded();
339         const bool softrefs_clear = soft_ref_policy()->all_soft_refs_clear();
340 
341         if (limit_exceeded && softrefs_clear) {
342           *gc_overhead_limit_was_exceeded = true;
343           size_policy()->set_gc_overhead_limit_exceeded(false);
344           log_trace(gc)("ParallelScavengeHeap::mem_allocate: return null because gc_overhead_limit_exceeded is set");
345           if (op.result() != nullptr) {
346             CollectedHeap::fill_with_object(op.result(), size);
347           }
348           return nullptr;
349         }
350 
351         return op.result();
352       }
353     }
354 
355     // The policy object will prevent us from looping forever. If the
356     // time spent in gc crosses a threshold, we will bail out.
357     loop_count++;
358     if ((result == nullptr) && (QueuedAllocationWarningCount > 0) &&
359         (loop_count % QueuedAllocationWarningCount == 0)) {
360       log_warning(gc)("ParallelScavengeHeap::mem_allocate retries %d times", loop_count);
361       log_warning(gc)("\tsize=%zu", size);
362     }
363   }
364 
365   return result;
366 }
367 
368 HeapWord* ParallelScavengeHeap::allocate_old_gen_and_record(size_t size) {
369   assert_locked_or_safepoint(Heap_lock);
370   HeapWord* res = old_gen()->allocate(size);
371   if (res != nullptr) {
372     _size_policy->tenured_allocation(size * HeapWordSize);
373   }
374   return res;
375 }
376 
377 HeapWord* ParallelScavengeHeap::mem_allocate_old_gen(size_t size) {
378   if (!should_alloc_in_eden(size)) {
379     // Size is too big for eden.
380     return allocate_old_gen_and_record(size);
381   }
382 
383   return nullptr;
384 }
385 
386 void ParallelScavengeHeap::do_full_collection(bool clear_all_soft_refs) {
387   PSParallelCompact::invoke(clear_all_soft_refs);
388 }
389 
390 HeapWord* ParallelScavengeHeap::expand_heap_and_allocate(size_t size, bool is_tlab) {
391   HeapWord* result = nullptr;
392 
393   result = young_gen()->allocate(size);
394   if (result == nullptr && !is_tlab) {
395     result = old_gen()->expand_and_allocate(size);
396   }
397   return result;   // Could be null if we are out of space.
398 }
399 
400 HeapWord* ParallelScavengeHeap::satisfy_failed_allocation(size_t size, bool is_tlab) {
401   assert(size != 0, "precondition");
402 
403   HeapWord* result = nullptr;
404 
405   // If young-gen can handle this allocation, attempt young-gc firstly.
406   bool should_run_young_gc = is_tlab || should_alloc_in_eden(size);
407   collect_at_safepoint(!should_run_young_gc);
408 
409   result = expand_heap_and_allocate(size, is_tlab);
410   if (result != nullptr) {
411     return result;
412   }
413 
414   // If we reach this point, we're really out of memory. Try every trick
415   // we can to reclaim memory. Force collection of soft references. Force
416   // a complete compaction of the heap. Any additional methods for finding
417   // free memory should be here, especially if they are expensive. If this
418   // attempt fails, an OOM exception will be thrown.
419   {
420     // Make sure the heap is fully compacted
421     uintx old_interval = HeapMaximumCompactionInterval;
422     HeapMaximumCompactionInterval = 0;
423 
424     const bool clear_all_soft_refs = true;
425     PSParallelCompact::invoke(clear_all_soft_refs);
426 
427     // Restore
428     HeapMaximumCompactionInterval = old_interval;
429   }
430 
431   result = expand_heap_and_allocate(size, is_tlab);
432   if (result != nullptr) {
433     return result;
434   }
435 
436   // What else?  We might try synchronous finalization later.  If the total
437   // space available is large enough for the allocation, then a more
438   // complete compaction phase than we've tried so far might be
439   // appropriate.
440   return nullptr;
441 }
442 
443 
444 void ParallelScavengeHeap::ensure_parsability(bool retire_tlabs) {
445   CollectedHeap::ensure_parsability(retire_tlabs);
446   young_gen()->eden_space()->ensure_parsability();
447 }
448 
449 size_t ParallelScavengeHeap::tlab_capacity(Thread* thr) const {
450   return young_gen()->eden_space()->tlab_capacity(thr);
451 }
452 
453 size_t ParallelScavengeHeap::tlab_used(Thread* thr) const {
454   return young_gen()->eden_space()->tlab_used(thr);
455 }
456 
457 size_t ParallelScavengeHeap::unsafe_max_tlab_alloc(Thread* thr) const {
458   return young_gen()->eden_space()->unsafe_max_tlab_alloc(thr);
459 }
460 
461 HeapWord* ParallelScavengeHeap::allocate_new_tlab(size_t min_size, size_t requested_size, size_t* actual_size) {
462   bool dummy;
463   HeapWord* result = mem_allocate_work(requested_size /* size */,
464                                        true /* is_tlab */,
465                                        &dummy);
466   if (result != nullptr) {
467     *actual_size = requested_size;
468   }
469 
470   return result;
471 }
472 
473 void ParallelScavengeHeap::resize_all_tlabs() {
474   CollectedHeap::resize_all_tlabs();
475 }
476 
477 void ParallelScavengeHeap::prune_scavengable_nmethods() {
478   ScavengableNMethods::prune_nmethods_not_into_young();
479 }
480 
481 void ParallelScavengeHeap::prune_unlinked_nmethods() {
482   ScavengableNMethods::prune_unlinked_nmethods();
483 }
484 
485 void ParallelScavengeHeap::collect(GCCause::Cause cause) {
486   assert(!Heap_lock->owned_by_self(),
487     "this thread should not own the Heap_lock");
488 
489   uint gc_count      = 0;
490   uint full_gc_count = 0;
491   {
492     MutexLocker ml(Heap_lock);
493     // This value is guarded by the Heap_lock
494     gc_count      = total_collections();
495     full_gc_count = total_full_collections();
496   }
497 
498   VM_ParallelGCCollect op(gc_count, full_gc_count, cause);
499   VMThread::execute(&op);
500 }
501 
502 bool ParallelScavengeHeap::must_clear_all_soft_refs() {
503   return _gc_cause == GCCause::_metadata_GC_clear_soft_refs ||
504          _gc_cause == GCCause::_wb_full_gc;
505 }
506 
507 void ParallelScavengeHeap::collect_at_safepoint(bool full) {
508   assert(!GCLocker::is_active(), "precondition");
509   bool clear_soft_refs = must_clear_all_soft_refs();
510 
511   if (!full) {
512     bool success = PSScavenge::invoke(clear_soft_refs);
513     if (success) {
514       return;
515     }
516     // Upgrade to Full-GC if young-gc fails
517   }
518   PSParallelCompact::invoke(clear_soft_refs);
519 }
520 
521 void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) {
522   young_gen()->object_iterate(cl);
523   old_gen()->object_iterate(cl);
524 }
525 
526 // The HeapBlockClaimer is used during parallel iteration over the heap,
527 // allowing workers to claim heap areas ("blocks"), gaining exclusive rights to these.
528 // The eden and survivor spaces are treated as single blocks as it is hard to divide
529 // these spaces.
530 // The old space is divided into fixed-size blocks.
531 class HeapBlockClaimer : public StackObj {
532   size_t _claimed_index;
533 
534 public:
535   static const size_t InvalidIndex = SIZE_MAX;
536   static const size_t EdenIndex = 0;
537   static const size_t SurvivorIndex = 1;
538   static const size_t NumNonOldGenClaims = 2;
539 
540   HeapBlockClaimer() : _claimed_index(EdenIndex) { }
541   // Claim the block and get the block index.
542   size_t claim_and_get_block() {
543     size_t block_index;
544     block_index = Atomic::fetch_then_add(&_claimed_index, 1u);
545 
546     PSOldGen* old_gen = ParallelScavengeHeap::heap()->old_gen();
547     size_t num_claims = old_gen->num_iterable_blocks() + NumNonOldGenClaims;
548 
549     return block_index < num_claims ? block_index : InvalidIndex;
550   }
551 };
552 
553 void ParallelScavengeHeap::object_iterate_parallel(ObjectClosure* cl,
554                                                    HeapBlockClaimer* claimer) {
555   size_t block_index = claimer->claim_and_get_block();
556   // Iterate until all blocks are claimed
557   if (block_index == HeapBlockClaimer::EdenIndex) {
558     young_gen()->eden_space()->object_iterate(cl);
559     block_index = claimer->claim_and_get_block();
560   }
561   if (block_index == HeapBlockClaimer::SurvivorIndex) {
562     young_gen()->from_space()->object_iterate(cl);
563     young_gen()->to_space()->object_iterate(cl);
564     block_index = claimer->claim_and_get_block();
565   }
566   while (block_index != HeapBlockClaimer::InvalidIndex) {
567     old_gen()->object_iterate_block(cl, block_index - HeapBlockClaimer::NumNonOldGenClaims);
568     block_index = claimer->claim_and_get_block();
569   }
570 }
571 
572 class PSScavengeParallelObjectIterator : public ParallelObjectIteratorImpl {
573 private:
574   ParallelScavengeHeap*  _heap;
575   HeapBlockClaimer      _claimer;
576 
577 public:
578   PSScavengeParallelObjectIterator() :
579       _heap(ParallelScavengeHeap::heap()),
580       _claimer() {}
581 
582   virtual void object_iterate(ObjectClosure* cl, uint worker_id) {
583     _heap->object_iterate_parallel(cl, &_claimer);
584   }
585 };
586 
587 ParallelObjectIteratorImpl* ParallelScavengeHeap::parallel_object_iterator(uint thread_num) {
588   return new PSScavengeParallelObjectIterator();
589 }
590 
591 HeapWord* ParallelScavengeHeap::block_start(const void* addr) const {
592   if (young_gen()->is_in_reserved(addr)) {
593     assert(young_gen()->is_in(addr),
594            "addr should be in allocated part of young gen");
595     // called from os::print_location by find or VMError
596     if (DebuggingContext::is_enabled() || VMError::is_error_reported()) {
597       return nullptr;
598     }
599     Unimplemented();
600   } else if (old_gen()->is_in_reserved(addr)) {
601     assert(old_gen()->is_in(addr),
602            "addr should be in allocated part of old gen");
603     return old_gen()->start_array()->object_start((HeapWord*)addr);
604   }
605   return nullptr;
606 }
607 
608 bool ParallelScavengeHeap::block_is_obj(const HeapWord* addr) const {
609   return block_start(addr) == addr;
610 }
611 
612 void ParallelScavengeHeap::prepare_for_verify() {
613   ensure_parsability(false);  // no need to retire TLABs for verification
614 }
615 
616 PSHeapSummary ParallelScavengeHeap::create_ps_heap_summary() {
617   PSOldGen* old = old_gen();
618   HeapWord* old_committed_end = (HeapWord*)old->virtual_space()->committed_high_addr();
619   HeapWord* old_reserved_start = old->reserved().start();
620   HeapWord* old_reserved_end = old->reserved().end();
621   VirtualSpaceSummary old_summary(old_reserved_start, old_committed_end, old_reserved_end);
622   SpaceSummary old_space(old_reserved_start, old_committed_end, old->used_in_bytes());
623 
624   PSYoungGen* young = young_gen();
625   VirtualSpaceSummary young_summary(young->reserved().start(),
626     (HeapWord*)young->virtual_space()->committed_high_addr(), young->reserved().end());
627 
628   MutableSpace* eden = young_gen()->eden_space();
629   SpaceSummary eden_space(eden->bottom(), eden->end(), eden->used_in_bytes());
630 
631   MutableSpace* from = young_gen()->from_space();
632   SpaceSummary from_space(from->bottom(), from->end(), from->used_in_bytes());
633 
634   MutableSpace* to = young_gen()->to_space();
635   SpaceSummary to_space(to->bottom(), to->end(), to->used_in_bytes());
636 
637   VirtualSpaceSummary heap_summary = create_heap_space_summary();
638   return PSHeapSummary(heap_summary, used(), old_summary, old_space, young_summary, eden_space, from_space, to_space);
639 }
640 
641 bool ParallelScavengeHeap::print_location(outputStream* st, void* addr) const {
642   return BlockLocationPrinter<ParallelScavengeHeap>::print_location(st, addr);
643 }
644 
645 void ParallelScavengeHeap::print_heap_on(outputStream* st) const {
646   if (young_gen() != nullptr) {
647     young_gen()->print_on(st);
648   }
649   if (old_gen() != nullptr) {
650     old_gen()->print_on(st);
651   }
652 }
653 
654 void ParallelScavengeHeap::print_gc_on(outputStream* st) const {
655   BarrierSet* bs = BarrierSet::barrier_set();
656   if (bs != nullptr) {
657     bs->print_on(st);
658   }
659   st->cr();
660 
661   PSParallelCompact::print_on(st);
662 }
663 
664 void ParallelScavengeHeap::gc_threads_do(ThreadClosure* tc) const {
665   ParallelScavengeHeap::heap()->workers().threads_do(tc);
666 }
667 
668 void ParallelScavengeHeap::print_tracing_info() const {
669   AdaptiveSizePolicyOutput::print();
670   log_debug(gc, heap, exit)("Accumulated young generation GC time %3.7f secs", PSScavenge::accumulated_time()->seconds());
671   log_debug(gc, heap, exit)("Accumulated old generation GC time %3.7f secs", PSParallelCompact::accumulated_time()->seconds());
672 }
673 
674 PreGenGCValues ParallelScavengeHeap::get_pre_gc_values() const {
675   const PSYoungGen* const young = young_gen();
676   const MutableSpace* const eden = young->eden_space();
677   const MutableSpace* const from = young->from_space();
678   const PSOldGen* const old = old_gen();
679 
680   return PreGenGCValues(young->used_in_bytes(),
681                         young->capacity_in_bytes(),
682                         eden->used_in_bytes(),
683                         eden->capacity_in_bytes(),
684                         from->used_in_bytes(),
685                         from->capacity_in_bytes(),
686                         old->used_in_bytes(),
687                         old->capacity_in_bytes());
688 }
689 
690 void ParallelScavengeHeap::print_heap_change(const PreGenGCValues& pre_gc_values) const {
691   const PSYoungGen* const young = young_gen();
692   const MutableSpace* const eden = young->eden_space();
693   const MutableSpace* const from = young->from_space();
694   const PSOldGen* const old = old_gen();
695 
696   log_info(gc, heap)(HEAP_CHANGE_FORMAT" "
697                      HEAP_CHANGE_FORMAT" "
698                      HEAP_CHANGE_FORMAT,
699                      HEAP_CHANGE_FORMAT_ARGS(young->name(),
700                                              pre_gc_values.young_gen_used(),
701                                              pre_gc_values.young_gen_capacity(),
702                                              young->used_in_bytes(),
703                                              young->capacity_in_bytes()),
704                      HEAP_CHANGE_FORMAT_ARGS("Eden",
705                                              pre_gc_values.eden_used(),
706                                              pre_gc_values.eden_capacity(),
707                                              eden->used_in_bytes(),
708                                              eden->capacity_in_bytes()),
709                      HEAP_CHANGE_FORMAT_ARGS("From",
710                                              pre_gc_values.from_used(),
711                                              pre_gc_values.from_capacity(),
712                                              from->used_in_bytes(),
713                                              from->capacity_in_bytes()));
714   log_info(gc, heap)(HEAP_CHANGE_FORMAT,
715                      HEAP_CHANGE_FORMAT_ARGS(old->name(),
716                                              pre_gc_values.old_gen_used(),
717                                              pre_gc_values.old_gen_capacity(),
718                                              old->used_in_bytes(),
719                                              old->capacity_in_bytes()));
720   MetaspaceUtils::print_metaspace_change(pre_gc_values.metaspace_sizes());
721 }
722 
723 void ParallelScavengeHeap::verify(VerifyOption option /* ignored */) {
724   // Why do we need the total_collections()-filter below?
725   if (total_collections() > 0) {
726     log_debug(gc, verify)("Tenured");
727     old_gen()->verify();
728 
729     log_debug(gc, verify)("Eden");
730     young_gen()->verify();
731 
732     log_debug(gc, verify)("CardTable");
733     card_table()->verify_all_young_refs_imprecise();
734   }
735 }
736 
737 void ParallelScavengeHeap::trace_actual_reserved_page_size(const size_t reserved_heap_size, const ReservedSpace rs) {
738   // Check if Info level is enabled, since os::trace_page_sizes() logs on Info level.
739   if(log_is_enabled(Info, pagesize)) {
740     const size_t page_size = rs.page_size();
741     os::trace_page_sizes("Heap",
742                          MinHeapSize,
743                          reserved_heap_size,
744                          rs.base(),
745                          rs.size(),
746                          page_size);
747   }
748 }
749 
750 void ParallelScavengeHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) {
751   const PSHeapSummary& heap_summary = create_ps_heap_summary();
752   gc_tracer->report_gc_heap_summary(when, heap_summary);
753 
754   const MetaspaceSummary& metaspace_summary = create_metaspace_summary();
755   gc_tracer->report_metaspace_summary(when, metaspace_summary);
756 }
757 
758 CardTableBarrierSet* ParallelScavengeHeap::barrier_set() {
759   return barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set());
760 }
761 
762 PSCardTable* ParallelScavengeHeap::card_table() {
763   return static_cast<PSCardTable*>(barrier_set()->card_table());
764 }
765 
766 void ParallelScavengeHeap::resize_young_gen(size_t eden_size,
767                                             size_t survivor_size) {
768   // Delegate the resize to the generation.
769   _young_gen->resize(eden_size, survivor_size);
770 }
771 
772 void ParallelScavengeHeap::resize_old_gen(size_t desired_free_space) {
773   // Delegate the resize to the generation.
774   _old_gen->resize(desired_free_space);
775 }
776 
777 HeapWord* ParallelScavengeHeap::allocate_loaded_archive_space(size_t size) {
778   return _old_gen->allocate(size);
779 }
780 
781 void ParallelScavengeHeap::complete_loaded_archive_space(MemRegion archive_space) {
782   assert(_old_gen->object_space()->used_region().contains(archive_space),
783          "Archive space not contained in old gen");
784   _old_gen->complete_loaded_archive_space(archive_space);
785 }
786 
787 void ParallelScavengeHeap::register_nmethod(nmethod* nm) {
788   ScavengableNMethods::register_nmethod(nm);
789 }
790 
791 void ParallelScavengeHeap::unregister_nmethod(nmethod* nm) {
792   ScavengableNMethods::unregister_nmethod(nm);
793 }
794 
795 void ParallelScavengeHeap::verify_nmethod(nmethod* nm) {
796   ScavengableNMethods::verify_nmethod(nm);
797 }
798 
799 GrowableArray<GCMemoryManager*> ParallelScavengeHeap::memory_managers() {
800   GrowableArray<GCMemoryManager*> memory_managers(2);
801   memory_managers.append(_young_manager);
802   memory_managers.append(_old_manager);
803   return memory_managers;
804 }
805 
806 GrowableArray<MemoryPool*> ParallelScavengeHeap::memory_pools() {
807   GrowableArray<MemoryPool*> memory_pools(3);
808   memory_pools.append(_eden_pool);
809   memory_pools.append(_survivor_pool);
810   memory_pools.append(_old_pool);
811   return memory_pools;
812 }
813 
814 void ParallelScavengeHeap::pin_object(JavaThread* thread, oop obj) {
815   GCLocker::enter(thread);
816 }
817 
818 void ParallelScavengeHeap::unpin_object(JavaThread* thread, oop obj) {
819   GCLocker::exit(thread);
820 }
821 
822 void ParallelScavengeHeap::update_parallel_worker_threads_cpu_time() {
823   assert(Thread::current()->is_VM_thread(),
824          "Must be called from VM thread to avoid races");
825   if (!UsePerfData || !os::is_thread_cpu_time_supported()) {
826     return;
827   }
828 
829   // Ensure ThreadTotalCPUTimeClosure destructor is called before publishing gc
830   // time.
831   {
832     ThreadTotalCPUTimeClosure tttc(CPUTimeGroups::CPUTimeType::gc_parallel_workers);
833     // Currently parallel worker threads in GCTaskManager never terminate, so it
834     // is safe for VMThread to read their CPU times. If upstream changes this
835     // behavior, we should rethink if it is still safe.
836     gc_threads_do(&tttc);
837   }
838 
839   CPUTimeCounters::publish_gc_total_cpu_time();
840 }