1 /*
2 * Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "gc/parallel/objectStartArray.inline.hpp"
26 #include "gc/parallel/parallelArguments.hpp"
27 #include "gc/parallel/parallelInitLogger.hpp"
28 #include "gc/parallel/parallelScavengeHeap.inline.hpp"
29 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
30 #include "gc/parallel/psMemoryPool.hpp"
31 #include "gc/parallel/psParallelCompact.inline.hpp"
32 #include "gc/parallel/psParallelCompactNew.inline.hpp"
33 #include "gc/parallel/psPromotionManager.hpp"
34 #include "gc/parallel/psScavenge.hpp"
35 #include "gc/parallel/psVMOperations.hpp"
36 #include "gc/shared/barrierSetNMethod.hpp"
37 #include "gc/shared/fullGCForwarding.inline.hpp"
38 #include "gc/shared/gcHeapSummary.hpp"
39 #include "gc/shared/gcLocker.inline.hpp"
40 #include "gc/shared/gcWhen.hpp"
41 #include "gc/shared/genArguments.hpp"
42 #include "gc/shared/locationPrinter.inline.hpp"
43 #include "gc/shared/scavengableNMethods.hpp"
44 #include "gc/shared/suspendibleThreadSet.hpp"
45 #include "logging/log.hpp"
46 #include "memory/iterator.hpp"
47 #include "memory/metaspaceCounters.hpp"
48 #include "memory/metaspaceUtils.hpp"
49 #include "memory/reservedSpace.hpp"
50 #include "memory/universe.hpp"
51 #include "oops/oop.inline.hpp"
52 #include "runtime/atomic.hpp"
53 #include "runtime/cpuTimeCounters.hpp"
54 #include "runtime/globals_extension.hpp"
55 #include "runtime/handles.inline.hpp"
56 #include "runtime/init.hpp"
57 #include "runtime/java.hpp"
58 #include "runtime/vmThread.hpp"
59 #include "services/memoryManager.hpp"
60 #include "utilities/macros.hpp"
61 #include "utilities/vmError.hpp"
62
63 PSAdaptiveSizePolicy* ParallelScavengeHeap::_size_policy = nullptr;
64 GCPolicyCounters* ParallelScavengeHeap::_gc_policy_counters = nullptr;
65 size_t ParallelScavengeHeap::_desired_page_size = 0;
66
67 jint ParallelScavengeHeap::initialize() {
68 const size_t reserved_heap_size = ParallelArguments::heap_reserved_size_bytes();
69
70 assert(_desired_page_size != 0, "Should be initialized");
71 ReservedHeapSpace heap_rs = Universe::reserve_heap(reserved_heap_size, HeapAlignment, _desired_page_size);
72 // Adjust SpaceAlignment based on actually used large page size.
73 if (UseLargePages) {
74 SpaceAlignment = MAX2(heap_rs.page_size(), default_space_alignment());
75 }
76 assert(is_aligned(SpaceAlignment, heap_rs.page_size()), "inv");
77
78 trace_actual_reserved_page_size(reserved_heap_size, heap_rs);
79
80 initialize_reserved_region(heap_rs);
81 // Layout the reserved space for the generations.
82 ReservedSpace old_rs = heap_rs.first_part(MaxOldSize, SpaceAlignment);
83 ReservedSpace young_rs = heap_rs.last_part(MaxOldSize, SpaceAlignment);
84 assert(young_rs.size() == MaxNewSize, "Didn't reserve all of the heap");
85
86 PSCardTable* card_table = new PSCardTable(_reserved);
87 card_table->initialize(old_rs.base(), young_rs.base());
88
89 CardTableBarrierSet* const barrier_set = new CardTableBarrierSet(card_table);
90 BarrierSet::set_barrier_set(barrier_set);
91
92 // Set up WorkerThreads
93 _workers.initialize_workers();
94
95 // Create and initialize the generations.
96 _young_gen = new PSYoungGen(
97 young_rs,
98 NewSize,
99 MinNewSize,
100 MaxNewSize);
101 _old_gen = new PSOldGen(
102 old_rs,
103 OldSize,
104 MinOldSize,
105 MaxOldSize);
106
107 assert(young_gen()->max_gen_size() == young_rs.size(),"Consistency check");
108 assert(old_gen()->max_gen_size() == old_rs.size(), "Consistency check");
109
110 double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0;
111
112 _size_policy = new PSAdaptiveSizePolicy(SpaceAlignment,
113 max_gc_pause_sec);
114
115 assert((old_gen()->virtual_space()->high_boundary() ==
116 young_gen()->virtual_space()->low_boundary()),
117 "Boundaries must meet");
118 // initialize the policy counters - 2 collectors, 2 generations
119 _gc_policy_counters = new GCPolicyCounters("ParScav:MSC", 2, 2);
120
121 if (UseCompactObjectHeaders) {
122 if (!PSParallelCompactNew::initialize_aux_data()) {
123 return JNI_ENOMEM;
124 }
125 } else {
126 if (!PSParallelCompact::initialize_aux_data()) {
127 return JNI_ENOMEM;
128 }
129 }
130
131 // Create CPU time counter
132 CPUTimeCounters::create_counter(CPUTimeGroups::CPUTimeType::gc_parallel_workers);
133
134 ParallelInitLogger::print();
135
136 FullGCForwarding::initialize(_reserved);
137
138 return JNI_OK;
139 }
140
141 void ParallelScavengeHeap::initialize_serviceability() {
142
143 _eden_pool = new PSEdenSpacePool(_young_gen,
144 _young_gen->eden_space(),
145 "PS Eden Space",
146 false /* support_usage_threshold */);
147
148 _survivor_pool = new PSSurvivorSpacePool(_young_gen,
149 "PS Survivor Space",
150 false /* support_usage_threshold */);
151
152 _old_pool = new PSOldGenerationPool(_old_gen,
153 "PS Old Gen",
154 true /* support_usage_threshold */);
155
156 _young_manager = new GCMemoryManager("PS Scavenge");
157 _old_manager = new GCMemoryManager("PS MarkSweep");
158
159 _old_manager->add_pool(_eden_pool);
160 _old_manager->add_pool(_survivor_pool);
161 _old_manager->add_pool(_old_pool);
162
163 _young_manager->add_pool(_eden_pool);
164 _young_manager->add_pool(_survivor_pool);
165
166 }
167
168 class PSIsScavengable : public BoolObjectClosure {
169 bool do_object_b(oop obj) {
170 return ParallelScavengeHeap::heap()->is_in_young(obj);
171 }
172 };
173
174 static PSIsScavengable _is_scavengable;
175
176 void ParallelScavengeHeap::post_initialize() {
177 CollectedHeap::post_initialize();
178 // Need to init the tenuring threshold
179 PSScavenge::initialize();
180 if (UseCompactObjectHeaders) {
181 PSParallelCompactNew::post_initialize();
182 } else {
183 PSParallelCompact::post_initialize();
184 }
185 PSPromotionManager::initialize();
186
187 ScavengableNMethods::initialize(&_is_scavengable);
188 GCLocker::initialize();
189 }
190
191 void ParallelScavengeHeap::gc_epilogue(bool full) {
192 if (_is_heap_almost_full) {
193 // Reset emergency state if eden is empty after a young/full gc
194 if (_young_gen->eden_space()->is_empty()) {
195 log_debug(gc)("Leaving memory constrained state; back to normal");
196 _is_heap_almost_full = false;
197 }
198 } else {
199 if (full && !_young_gen->eden_space()->is_empty()) {
200 log_debug(gc)("Non-empty young-gen after full-gc; in memory constrained state");
201 _is_heap_almost_full = true;
202 }
203 }
204 }
205
206 void ParallelScavengeHeap::update_counters() {
207 young_gen()->update_counters();
208 old_gen()->update_counters();
209 MetaspaceCounters::update_performance_counters();
210 update_parallel_worker_threads_cpu_time();
211 }
212
213 size_t ParallelScavengeHeap::capacity() const {
214 size_t value = young_gen()->capacity_in_bytes() + old_gen()->capacity_in_bytes();
215 return value;
216 }
217
218 size_t ParallelScavengeHeap::used() const {
219 size_t value = young_gen()->used_in_bytes() + old_gen()->used_in_bytes();
220 return value;
221 }
222
223 size_t ParallelScavengeHeap::max_capacity() const {
224 size_t estimated = reserved_region().byte_size();
225 if (UseAdaptiveSizePolicy) {
226 estimated -= _size_policy->max_survivor_size(young_gen()->max_gen_size());
227 } else {
228 estimated -= young_gen()->to_space()->capacity_in_bytes();
229 }
230 return MAX2(estimated, capacity());
231 }
232
233 bool ParallelScavengeHeap::is_in(const void* p) const {
234 return young_gen()->is_in(p) || old_gen()->is_in(p);
235 }
236
237 bool ParallelScavengeHeap::is_in_reserved(const void* p) const {
238 return young_gen()->is_in_reserved(p) || old_gen()->is_in_reserved(p);
239 }
240
241 bool ParallelScavengeHeap::requires_barriers(stackChunkOop p) const {
242 return !is_in_young(p);
243 }
244
245 // There are two levels of allocation policy here.
246 //
247 // When an allocation request fails, the requesting thread must invoke a VM
248 // operation, transfer control to the VM thread, and await the results of a
249 // garbage collection. That is quite expensive, and we should avoid doing it
250 // multiple times if possible.
251 //
252 // To accomplish this, we have a basic allocation policy, and also a
253 // failed allocation policy.
254 //
255 // The basic allocation policy controls how you allocate memory without
256 // attempting garbage collection. It is okay to grab locks and
257 // expand the heap, if that can be done without coming to a safepoint.
258 // It is likely that the basic allocation policy will not be very
259 // aggressive.
260 //
261 // The failed allocation policy is invoked from the VM thread after
262 // the basic allocation policy is unable to satisfy a mem_allocate
263 // request. This policy needs to cover the entire range of collection,
264 // heap expansion, and out-of-memory conditions. It should make every
265 // attempt to allocate the requested memory.
266
267 // Basic allocation policy. Should never be called at a safepoint, or
268 // from the VM thread.
269 //
270 // This method must handle cases where many mem_allocate requests fail
271 // simultaneously. When that happens, only one VM operation will succeed,
272 // and the rest will not be executed. For that reason, this method loops
273 // during failed allocation attempts. If the java heap becomes exhausted,
274 // we rely on the size_policy object to force a bail out.
275 HeapWord* ParallelScavengeHeap::mem_allocate(size_t size) {
276 assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint");
277 assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread");
278 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
279
280 bool is_tlab = false;
281 return mem_allocate_work(size, is_tlab);
282 }
283
284 HeapWord* ParallelScavengeHeap::mem_allocate_cas_noexpand(size_t size, bool is_tlab) {
285 // Try young-gen first.
286 HeapWord* result = young_gen()->allocate(size);
287 if (result != nullptr) {
288 return result;
289 }
290
291 // Try allocating from the old gen for non-TLAB and large allocations.
292 if (!is_tlab) {
293 if (!should_alloc_in_eden(size)) {
294 result = old_gen()->cas_allocate_noexpand(size);
295 if (result != nullptr) {
296 return result;
297 }
298 }
299 }
300
301 // In extreme cases, try allocating in from space also.
302 if (_is_heap_almost_full) {
303 result = young_gen()->from_space()->cas_allocate(size);
304 if (result != nullptr) {
305 return result;
306 }
307 if (!is_tlab) {
308 result = old_gen()->cas_allocate_noexpand(size);
309 if (result != nullptr) {
310 return result;
311 }
312 }
313 }
314
315 return nullptr;
316 }
317
318 HeapWord* ParallelScavengeHeap::mem_allocate_work(size_t size, bool is_tlab) {
319 for (uint loop_count = 0; /* empty */; ++loop_count) {
320 HeapWord* result;
321 {
322 ConditionalMutexLocker locker(Heap_lock, !is_init_completed());
323 result = mem_allocate_cas_noexpand(size, is_tlab);
324 if (result != nullptr) {
325 return result;
326 }
327 }
328
329 // Read total_collections() under the lock so that multiple
330 // allocation-failures result in one GC.
331 uint gc_count;
332 {
333 MutexLocker ml(Heap_lock);
334
335 // Re-try after acquiring the lock, because a GC might have occurred
336 // while waiting for this lock.
337 result = mem_allocate_cas_noexpand(size, is_tlab);
338 if (result != nullptr) {
339 return result;
340 }
341
342 if (!is_init_completed()) {
343 // Double checked locking, this ensure that is_init_completed() does not
344 // transition while expanding the heap.
345 MonitorLocker ml(InitCompleted_lock, Monitor::_no_safepoint_check_flag);
346 if (!is_init_completed()) {
347 // Can't do GC; try heap expansion to satisfy the request.
348 result = expand_heap_and_allocate(size, is_tlab);
349 if (result != nullptr) {
350 return result;
351 }
352 }
353 }
354
355 gc_count = total_collections();
356 }
357
358 {
359 VM_ParallelCollectForAllocation op(size, is_tlab, gc_count);
360 VMThread::execute(&op);
361
362 if (op.gc_succeeded()) {
363 assert(is_in_or_null(op.result()), "result not in heap");
364 return op.result();
365 }
366 }
367
368 // Was the gc-overhead reached inside the safepoint? If so, this mutator
369 // should return null as well for global consistency.
370 if (_gc_overhead_counter >= GCOverheadLimitThreshold) {
371 return nullptr;
372 }
373
374 if ((QueuedAllocationWarningCount > 0) &&
375 (loop_count % QueuedAllocationWarningCount == 0)) {
376 log_warning(gc)("ParallelScavengeHeap::mem_allocate retries %d times, size=%zu", loop_count, size);
377 }
378 }
379 }
380
381 void ParallelScavengeHeap::do_full_collection(bool clear_all_soft_refs) {
382 // No need for max-compaction in this context.
383 const bool should_do_max_compaction = false;
384 if (UseCompactObjectHeaders) {
385 PSParallelCompactNew::invoke(clear_all_soft_refs, should_do_max_compaction);
386 } else {
387 PSParallelCompact::invoke(clear_all_soft_refs, should_do_max_compaction);
388 }
389 }
390
391 bool ParallelScavengeHeap::should_attempt_young_gc() const {
392 const bool ShouldRunYoungGC = true;
393 const bool ShouldRunFullGC = false;
394
395 if (!_young_gen->to_space()->is_empty()) {
396 log_debug(gc, ergo)("To-space is not empty; run full-gc instead.");
397 return ShouldRunFullGC;
398 }
399
400 // Check if the predicted promoted bytes will overflow free space in old-gen.
401 PSAdaptiveSizePolicy* policy = _size_policy;
402
403 size_t avg_promoted = (size_t) policy->padded_average_promoted_in_bytes();
404 size_t promotion_estimate = MIN2(avg_promoted, _young_gen->used_in_bytes());
405 // Total free size after possible old gen expansion
406 size_t free_in_old_gen_with_expansion = _old_gen->max_gen_size() - _old_gen->used_in_bytes();
407
408 log_trace(gc, ergo)("average_promoted %zu; padded_average_promoted %zu",
409 (size_t) policy->average_promoted_in_bytes(),
410 (size_t) policy->padded_average_promoted_in_bytes());
411
412 if (promotion_estimate >= free_in_old_gen_with_expansion) {
413 log_debug(gc, ergo)("Run full-gc; predicted promotion size >= max free space in old-gen: %zu >= %zu",
414 promotion_estimate, free_in_old_gen_with_expansion);
415 return ShouldRunFullGC;
416 }
417
418 if (UseAdaptiveSizePolicy) {
419 // Also checking OS has enough free memory to commit and expand old-gen.
420 // Otherwise, the recorded gc-pause-time might be inflated to include time
421 // of OS preparing free memory, resulting in inaccurate young-gen resizing.
422 assert(_old_gen->committed().byte_size() >= _old_gen->used_in_bytes(), "inv");
423 // Use uint64_t instead of size_t for 32bit compatibility.
424 uint64_t free_mem_in_os;
425 if (os::free_memory(free_mem_in_os)) {
426 size_t actual_free = (size_t)MIN2(_old_gen->committed().byte_size() - _old_gen->used_in_bytes() + free_mem_in_os,
427 (uint64_t)SIZE_MAX);
428 if (promotion_estimate > actual_free) {
429 log_debug(gc, ergo)("Run full-gc; predicted promotion size > free space in old-gen and OS: %zu > %zu",
430 promotion_estimate, actual_free);
431 return ShouldRunFullGC;
432 }
433 }
434 }
435
436 // No particular reasons to run full-gc, so young-gc.
437 return ShouldRunYoungGC;
438 }
439
440 static bool check_gc_heap_free_limit(size_t free_bytes, size_t capacity_bytes) {
441 return (free_bytes * 100 / capacity_bytes) < GCHeapFreeLimit;
442 }
443
444 bool ParallelScavengeHeap::check_gc_overhead_limit() {
445 assert(SafepointSynchronize::is_at_safepoint(), "precondition");
446
447 if (UseGCOverheadLimit) {
448 // The goal here is to return null prematurely so that apps can exit
449 // gracefully when GC takes the most time.
450 bool little_mutator_time = _size_policy->mutator_time_percent() * 100 < (100 - GCTimeLimit);
451 bool little_free_space = check_gc_heap_free_limit(_young_gen->free_in_bytes(), _young_gen->capacity_in_bytes())
452 && check_gc_heap_free_limit( _old_gen->free_in_bytes(), _old_gen->capacity_in_bytes());
453
454 log_debug(gc)("GC Overhead Limit: GC Time %f Free Space Young %f Old %f Counter %zu",
455 (100 - _size_policy->mutator_time_percent()),
456 percent_of(_young_gen->free_in_bytes(), _young_gen->capacity_in_bytes()),
457 percent_of(_old_gen->free_in_bytes(), _old_gen->capacity_in_bytes()),
458 _gc_overhead_counter);
459
460 if (little_mutator_time && little_free_space) {
461 _gc_overhead_counter++;
462 if (_gc_overhead_counter >= GCOverheadLimitThreshold) {
463 return true;
464 }
465 } else {
466 _gc_overhead_counter = 0;
467 }
468 }
469 return false;
470 }
471
472 HeapWord* ParallelScavengeHeap::expand_heap_and_allocate(size_t size, bool is_tlab) {
473 #ifdef ASSERT
474 assert(Heap_lock->is_locked(), "precondition");
475 if (is_init_completed()) {
476 assert(SafepointSynchronize::is_at_safepoint(), "precondition");
477 assert(Thread::current()->is_VM_thread(), "precondition");
478 } else {
479 assert(Thread::current()->is_Java_thread(), "precondition");
480 assert(Heap_lock->owned_by_self(), "precondition");
481 }
482 #endif
483
484 HeapWord* result = young_gen()->expand_and_allocate(size);
485
486 if (result == nullptr && !is_tlab) {
487 result = old_gen()->expand_and_allocate(size);
488 }
489
490 return result; // Could be null if we are out of space.
491 }
492
493 HeapWord* ParallelScavengeHeap::satisfy_failed_allocation(size_t size, bool is_tlab) {
494 assert(size != 0, "precondition");
495
496 HeapWord* result = nullptr;
497
498 if (!_is_heap_almost_full) {
499 // If young-gen can handle this allocation, attempt young-gc firstly, as young-gc is usually cheaper.
500 bool should_run_young_gc = is_tlab || should_alloc_in_eden(size);
501
502 collect_at_safepoint(!should_run_young_gc);
503
504 // If gc-overhead is reached, we will skip allocation.
505 if (!check_gc_overhead_limit()) {
506 result = expand_heap_and_allocate(size, is_tlab);
507 if (result != nullptr) {
508 return result;
509 }
510 }
511 }
512
513 // Last resort GC; clear soft refs and do max-compaction before throwing OOM.
514 {
515 const bool clear_all_soft_refs = true;
516 const bool should_do_max_compaction = true;
517 if (UseCompactObjectHeaders) {
518 PSParallelCompactNew::invoke(clear_all_soft_refs, should_do_max_compaction);
519 } else {
520 PSParallelCompact::invoke(clear_all_soft_refs, should_do_max_compaction);
521 }
522 }
523
524 if (check_gc_overhead_limit()) {
525 log_info(gc)("GC Overhead Limit exceeded too often (%zu).", GCOverheadLimitThreshold);
526 return nullptr;
527 }
528
529 result = expand_heap_and_allocate(size, is_tlab);
530 return result;
531 }
532
533 void ParallelScavengeHeap::ensure_parsability(bool retire_tlabs) {
534 CollectedHeap::ensure_parsability(retire_tlabs);
535 young_gen()->eden_space()->ensure_parsability();
536 }
537
538 size_t ParallelScavengeHeap::tlab_capacity() const {
539 return young_gen()->eden_space()->tlab_capacity();
540 }
541
542 size_t ParallelScavengeHeap::tlab_used() const {
543 return young_gen()->eden_space()->tlab_used();
544 }
545
546 size_t ParallelScavengeHeap::unsafe_max_tlab_alloc() const {
547 return young_gen()->eden_space()->unsafe_max_tlab_alloc();
548 }
549
550 HeapWord* ParallelScavengeHeap::allocate_new_tlab(size_t min_size, size_t requested_size, size_t* actual_size) {
551 HeapWord* result = mem_allocate_work(requested_size /* size */,
552 true /* is_tlab */);
553 if (result != nullptr) {
554 *actual_size = requested_size;
555 }
556
557 return result;
558 }
559
560 void ParallelScavengeHeap::resize_all_tlabs() {
561 CollectedHeap::resize_all_tlabs();
562 }
563
564 void ParallelScavengeHeap::prune_scavengable_nmethods() {
565 ScavengableNMethods::prune_nmethods_not_into_young();
566 }
567
568 void ParallelScavengeHeap::prune_unlinked_nmethods() {
569 ScavengableNMethods::prune_unlinked_nmethods();
570 }
571
572 void ParallelScavengeHeap::collect(GCCause::Cause cause) {
573 assert(!Heap_lock->owned_by_self(),
574 "this thread should not own the Heap_lock");
575
576 uint gc_count = 0;
577 uint full_gc_count = 0;
578 {
579 MutexLocker ml(Heap_lock);
580 // This value is guarded by the Heap_lock
581 gc_count = total_collections();
582 full_gc_count = total_full_collections();
583 }
584
585 VM_ParallelGCCollect op(gc_count, full_gc_count, cause);
586 VMThread::execute(&op);
587 }
588
589 void ParallelScavengeHeap::collect_at_safepoint(bool is_full) {
590 assert(!GCLocker::is_active(), "precondition");
591 bool clear_soft_refs = GCCause::should_clear_all_soft_refs(_gc_cause);
592
593 if (!is_full && should_attempt_young_gc()) {
594 bool young_gc_success = PSScavenge::invoke(clear_soft_refs);
595 if (young_gc_success) {
596 return;
597 }
598 log_debug(gc, heap)("Upgrade to Full-GC since Young-gc failed.");
599 }
600
601 const bool should_do_max_compaction = false;
602 if (UseCompactObjectHeaders) {
603 PSParallelCompactNew::invoke(clear_soft_refs, should_do_max_compaction);
604 } else {
605 PSParallelCompact::invoke(clear_soft_refs, should_do_max_compaction);
606 }
607 }
608
609 void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) {
610 young_gen()->object_iterate(cl);
611 old_gen()->object_iterate(cl);
612 }
613
614 // The HeapBlockClaimer is used during parallel iteration over the heap,
615 // allowing workers to claim heap areas ("blocks"), gaining exclusive rights to these.
616 // The eden and survivor spaces are treated as single blocks as it is hard to divide
617 // these spaces.
618 // The old space is divided into fixed-size blocks.
619 class HeapBlockClaimer : public StackObj {
620 Atomic<size_t> _claimed_index;
621
622 public:
623 static const size_t InvalidIndex = SIZE_MAX;
624 static const size_t EdenIndex = 0;
625 static const size_t SurvivorIndex = 1;
626 static const size_t NumNonOldGenClaims = 2;
627
628 HeapBlockClaimer() : _claimed_index(EdenIndex) { }
629 // Claim the block and get the block index.
630 size_t claim_and_get_block() {
631 size_t block_index;
632 block_index = _claimed_index.fetch_then_add(1u);
633
634 PSOldGen* old_gen = ParallelScavengeHeap::heap()->old_gen();
635 size_t num_claims = old_gen->num_iterable_blocks() + NumNonOldGenClaims;
636
637 return block_index < num_claims ? block_index : InvalidIndex;
638 }
639 };
640
641 void ParallelScavengeHeap::object_iterate_parallel(ObjectClosure* cl,
642 HeapBlockClaimer* claimer) {
643 size_t block_index = claimer->claim_and_get_block();
644 // Iterate until all blocks are claimed
645 if (block_index == HeapBlockClaimer::EdenIndex) {
646 young_gen()->eden_space()->object_iterate(cl);
647 block_index = claimer->claim_and_get_block();
648 }
649 if (block_index == HeapBlockClaimer::SurvivorIndex) {
650 young_gen()->from_space()->object_iterate(cl);
651 young_gen()->to_space()->object_iterate(cl);
652 block_index = claimer->claim_and_get_block();
653 }
654 while (block_index != HeapBlockClaimer::InvalidIndex) {
655 old_gen()->object_iterate_block(cl, block_index - HeapBlockClaimer::NumNonOldGenClaims);
656 block_index = claimer->claim_and_get_block();
657 }
658 }
659
660 class PSScavengeParallelObjectIterator : public ParallelObjectIteratorImpl {
661 private:
662 ParallelScavengeHeap* _heap;
663 HeapBlockClaimer _claimer;
664
665 public:
666 PSScavengeParallelObjectIterator() :
667 _heap(ParallelScavengeHeap::heap()),
668 _claimer() {}
669
670 virtual void object_iterate(ObjectClosure* cl, uint worker_id) {
671 _heap->object_iterate_parallel(cl, &_claimer);
672 }
673 };
674
675 ParallelObjectIteratorImpl* ParallelScavengeHeap::parallel_object_iterator(uint thread_num) {
676 return new PSScavengeParallelObjectIterator();
677 }
678
679 HeapWord* ParallelScavengeHeap::block_start(const void* addr) const {
680 if (young_gen()->is_in_reserved(addr)) {
681 assert(young_gen()->is_in(addr),
682 "addr should be in allocated part of young gen");
683 // called from os::print_location by find or VMError
684 if (DebuggingContext::is_enabled() || VMError::is_error_reported()) {
685 return nullptr;
686 }
687 Unimplemented();
688 } else if (old_gen()->is_in_reserved(addr)) {
689 assert(old_gen()->is_in(addr),
690 "addr should be in allocated part of old gen");
691 return old_gen()->start_array()->object_start((HeapWord*)addr);
692 }
693 return nullptr;
694 }
695
696 bool ParallelScavengeHeap::block_is_obj(const HeapWord* addr) const {
697 return block_start(addr) == addr;
698 }
699
700 void ParallelScavengeHeap::prepare_for_verify() {
701 ensure_parsability(false); // no need to retire TLABs for verification
702 }
703
704 PSHeapSummary ParallelScavengeHeap::create_ps_heap_summary() {
705 PSOldGen* old = old_gen();
706 HeapWord* old_committed_end = (HeapWord*)old->virtual_space()->committed_high_addr();
707 HeapWord* old_reserved_start = old->reserved().start();
708 HeapWord* old_reserved_end = old->reserved().end();
709 VirtualSpaceSummary old_summary(old_reserved_start, old_committed_end, old_reserved_end);
710 SpaceSummary old_space(old_reserved_start, old_committed_end, old->used_in_bytes());
711
712 PSYoungGen* young = young_gen();
713 VirtualSpaceSummary young_summary(young->reserved().start(),
714 (HeapWord*)young->virtual_space()->committed_high_addr(), young->reserved().end());
715
716 MutableSpace* eden = young_gen()->eden_space();
717 SpaceSummary eden_space(eden->bottom(), eden->end(), eden->used_in_bytes());
718
719 MutableSpace* from = young_gen()->from_space();
720 SpaceSummary from_space(from->bottom(), from->end(), from->used_in_bytes());
721
722 MutableSpace* to = young_gen()->to_space();
723 SpaceSummary to_space(to->bottom(), to->end(), to->used_in_bytes());
724
725 VirtualSpaceSummary heap_summary = create_heap_space_summary();
726 return PSHeapSummary(heap_summary, used(), old_summary, old_space, young_summary, eden_space, from_space, to_space);
727 }
728
729 bool ParallelScavengeHeap::print_location(outputStream* st, void* addr) const {
730 return BlockLocationPrinter<ParallelScavengeHeap>::print_location(st, addr);
731 }
732
733 void ParallelScavengeHeap::print_heap_on(outputStream* st) const {
734 if (young_gen() != nullptr) {
735 young_gen()->print_on(st);
736 }
737 if (old_gen() != nullptr) {
738 old_gen()->print_on(st);
739 }
740 }
741
742 void ParallelScavengeHeap::print_gc_on(outputStream* st) const {
743 BarrierSet* bs = BarrierSet::barrier_set();
744 if (bs != nullptr) {
745 bs->print_on(st);
746 }
747 st->cr();
748
749 if (UseCompactObjectHeaders) {
750 PSParallelCompactNew::print_on(st);
751 } else {
752 PSParallelCompact::print_on(st);
753 }
754 }
755
756 void ParallelScavengeHeap::gc_threads_do(ThreadClosure* tc) const {
757 ParallelScavengeHeap::heap()->workers().threads_do(tc);
758 }
759
760 void ParallelScavengeHeap::print_tracing_info() const {
761 log_debug(gc, heap, exit)("Accumulated young generation GC time %3.7f secs", PSScavenge::accumulated_time()->seconds());
762 if (UseCompactObjectHeaders) {
763 log_debug(gc, heap, exit)("Accumulated old generation GC time %3.7f secs", PSParallelCompactNew::accumulated_time()->seconds());
764 } else {
765 log_debug(gc, heap, exit)("Accumulated old generation GC time %3.7f secs", PSParallelCompact::accumulated_time()->seconds());
766 }
767 }
768
769 PreGenGCValues ParallelScavengeHeap::get_pre_gc_values() const {
770 const PSYoungGen* const young = young_gen();
771 const MutableSpace* const eden = young->eden_space();
772 const MutableSpace* const from = young->from_space();
773 const PSOldGen* const old = old_gen();
774
775 return PreGenGCValues(young->used_in_bytes(),
776 young->capacity_in_bytes(),
777 eden->used_in_bytes(),
778 eden->capacity_in_bytes(),
779 from->used_in_bytes(),
780 from->capacity_in_bytes(),
781 old->used_in_bytes(),
782 old->capacity_in_bytes());
783 }
784
785 void ParallelScavengeHeap::print_heap_change(const PreGenGCValues& pre_gc_values) const {
786 const PSYoungGen* const young = young_gen();
787 const MutableSpace* const eden = young->eden_space();
788 const MutableSpace* const from = young->from_space();
789 const PSOldGen* const old = old_gen();
790
791 log_info(gc, heap)(HEAP_CHANGE_FORMAT" "
792 HEAP_CHANGE_FORMAT" "
793 HEAP_CHANGE_FORMAT,
794 HEAP_CHANGE_FORMAT_ARGS(young->name(),
795 pre_gc_values.young_gen_used(),
796 pre_gc_values.young_gen_capacity(),
797 young->used_in_bytes(),
798 young->capacity_in_bytes()),
799 HEAP_CHANGE_FORMAT_ARGS("Eden",
800 pre_gc_values.eden_used(),
801 pre_gc_values.eden_capacity(),
802 eden->used_in_bytes(),
803 eden->capacity_in_bytes()),
804 HEAP_CHANGE_FORMAT_ARGS("From",
805 pre_gc_values.from_used(),
806 pre_gc_values.from_capacity(),
807 from->used_in_bytes(),
808 from->capacity_in_bytes()));
809 log_info(gc, heap)(HEAP_CHANGE_FORMAT,
810 HEAP_CHANGE_FORMAT_ARGS(old->name(),
811 pre_gc_values.old_gen_used(),
812 pre_gc_values.old_gen_capacity(),
813 old->used_in_bytes(),
814 old->capacity_in_bytes()));
815 MetaspaceUtils::print_metaspace_change(pre_gc_values.metaspace_sizes());
816 }
817
818 void ParallelScavengeHeap::verify(VerifyOption option /* ignored */) {
819 log_debug(gc, verify)("Tenured");
820 old_gen()->verify();
821
822 log_debug(gc, verify)("Eden");
823 young_gen()->verify();
824
825 log_debug(gc, verify)("CardTable");
826 card_table()->verify_all_young_refs_imprecise();
827 }
828
829 void ParallelScavengeHeap::trace_actual_reserved_page_size(const size_t reserved_heap_size, const ReservedSpace rs) {
830 // Check if Info level is enabled, since os::trace_page_sizes() logs on Info level.
831 if(log_is_enabled(Info, pagesize)) {
832 const size_t page_size = rs.page_size();
833 os::trace_page_sizes("Heap",
834 MinHeapSize,
835 reserved_heap_size,
836 rs.base(),
837 rs.size(),
838 page_size);
839 }
840 }
841
842 void ParallelScavengeHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) {
843 const PSHeapSummary& heap_summary = create_ps_heap_summary();
844 gc_tracer->report_gc_heap_summary(when, heap_summary);
845
846 const MetaspaceSummary& metaspace_summary = create_metaspace_summary();
847 gc_tracer->report_metaspace_summary(when, metaspace_summary);
848 }
849
850 CardTableBarrierSet* ParallelScavengeHeap::barrier_set() {
851 return barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set());
852 }
853
854 PSCardTable* ParallelScavengeHeap::card_table() {
855 return static_cast<PSCardTable*>(barrier_set()->card_table());
856 }
857
858 static size_t calculate_free_from_free_ratio_flag(size_t live, uintx free_percent) {
859 assert(free_percent != 100, "precondition");
860 // We want to calculate how much free memory there can be based on the
861 // live size.
862 // percent * (free + live) = free
863 // =>
864 // free = (live * percent) / (1 - percent)
865
866 const double percent = free_percent / 100.0;
867 return live * percent / (1.0 - percent);
868 }
869
870 size_t ParallelScavengeHeap::calculate_desired_old_gen_capacity(size_t old_gen_live_size) {
871 // If min free percent is 100%, the old-gen should always be in its max capacity
872 if (MinHeapFreeRatio == 100) {
873 return _old_gen->max_gen_size();
874 }
875
876 // Using recorded data to calculate the new capacity of old-gen to avoid
877 // excessive expansion but also keep footprint low
878
879 size_t promoted_estimate = _size_policy->padded_average_promoted_in_bytes();
880 // Should have at least this free room for the next young-gc promotion.
881 size_t free_size = promoted_estimate;
882
883 size_t largest_live_size = MAX2((size_t)_size_policy->peak_old_gen_used_estimate(), old_gen_live_size);
884 free_size += largest_live_size - old_gen_live_size;
885
886 // Respect free percent
887 if (MinHeapFreeRatio != 0) {
888 size_t min_free = calculate_free_from_free_ratio_flag(old_gen_live_size, MinHeapFreeRatio);
889 free_size = MAX2(free_size, min_free);
890 }
891
892 if (MaxHeapFreeRatio != 100) {
893 size_t max_free = calculate_free_from_free_ratio_flag(old_gen_live_size, MaxHeapFreeRatio);
894 free_size = MIN2(max_free, free_size);
895 }
896
897 return old_gen_live_size + free_size;
898 }
899
900 void ParallelScavengeHeap::resize_old_gen_after_full_gc() {
901 size_t current_capacity = _old_gen->capacity_in_bytes();
902 size_t desired_capacity = calculate_desired_old_gen_capacity(old_gen()->used_in_bytes());
903
904 // If MinHeapFreeRatio is at its default value; shrink cautiously. Otherwise, users expect prompt shrinking.
905 if (FLAG_IS_DEFAULT(MinHeapFreeRatio)) {
906 if (desired_capacity < current_capacity) {
907 // Shrinking
908 if (total_full_collections() < AdaptiveSizePolicyReadyThreshold) {
909 // No enough data for shrinking
910 return;
911 }
912 }
913 }
914
915 _old_gen->resize(desired_capacity);
916 }
917
918 void ParallelScavengeHeap::resize_after_young_gc(bool is_survivor_overflowing) {
919 _young_gen->resize_after_young_gc(is_survivor_overflowing);
920
921 // Consider if should shrink old-gen
922 if (!is_survivor_overflowing) {
923 assert(old_gen()->capacity_in_bytes() >= old_gen()->min_gen_size(), "inv");
924
925 // Old gen min_gen_size constraint.
926 const size_t max_shrink_bytes_gen_size_constraint = old_gen()->capacity_in_bytes() - old_gen()->min_gen_size();
927
928 // Per-step delta to avoid too aggressive shrinking.
929 const size_t max_shrink_bytes_per_step_constraint = SpaceAlignment;
930
931 // Combining the above two constraints.
932 const size_t max_shrink_bytes = MIN2(max_shrink_bytes_gen_size_constraint,
933 max_shrink_bytes_per_step_constraint);
934
935 size_t shrink_bytes = _size_policy->compute_old_gen_shrink_bytes(old_gen()->free_in_bytes(), max_shrink_bytes);
936
937 assert(old_gen()->capacity_in_bytes() >= shrink_bytes, "inv");
938 assert(old_gen()->capacity_in_bytes() - shrink_bytes >= old_gen()->min_gen_size(), "inv");
939
940 if (shrink_bytes != 0) {
941 if (MinHeapFreeRatio != 0) {
942 size_t new_capacity = old_gen()->capacity_in_bytes() - shrink_bytes;
943 size_t new_free_size = old_gen()->free_in_bytes() - shrink_bytes;
944 if ((double)new_free_size / new_capacity * 100 < MinHeapFreeRatio) {
945 // Would violate MinHeapFreeRatio
946 return;
947 }
948 }
949 old_gen()->shrink(shrink_bytes);
950 }
951 }
952 }
953
954 void ParallelScavengeHeap::resize_after_full_gc() {
955 resize_old_gen_after_full_gc();
956 // We don't resize young-gen after full-gc because:
957 // 1. eden-size directly affects young-gc frequency (GCTimeRatio), and we
958 // don't have enough info to determine its desired size.
959 // 2. eden can contain live objs after a full-gc, which is unsafe for
960 // resizing. We will perform expansion on allocation if needed, in
961 // satisfy_failed_allocation().
962 }
963
964 HeapWord* ParallelScavengeHeap::allocate_loaded_archive_space(size_t size) {
965 return _old_gen->allocate(size);
966 }
967
968 void ParallelScavengeHeap::complete_loaded_archive_space(MemRegion archive_space) {
969 assert(_old_gen->object_space()->used_region().contains(archive_space),
970 "Archive space not contained in old gen");
971 _old_gen->complete_loaded_archive_space(archive_space);
972 }
973
974 void ParallelScavengeHeap::register_nmethod(nmethod* nm) {
975 ScavengableNMethods::register_nmethod(nm);
976 BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
977 bs_nm->disarm(nm);
978 }
979
980 void ParallelScavengeHeap::unregister_nmethod(nmethod* nm) {
981 ScavengableNMethods::unregister_nmethod(nm);
982 }
983
984 void ParallelScavengeHeap::verify_nmethod(nmethod* nm) {
985 ScavengableNMethods::verify_nmethod(nm);
986 }
987
988 GrowableArray<GCMemoryManager*> ParallelScavengeHeap::memory_managers() {
989 GrowableArray<GCMemoryManager*> memory_managers(2);
990 memory_managers.append(_young_manager);
991 memory_managers.append(_old_manager);
992 return memory_managers;
993 }
994
995 GrowableArray<MemoryPool*> ParallelScavengeHeap::memory_pools() {
996 GrowableArray<MemoryPool*> memory_pools(3);
997 memory_pools.append(_eden_pool);
998 memory_pools.append(_survivor_pool);
999 memory_pools.append(_old_pool);
1000 return memory_pools;
1001 }
1002
1003 void ParallelScavengeHeap::pin_object(JavaThread* thread, oop obj) {
1004 GCLocker::enter(thread);
1005 }
1006
1007 void ParallelScavengeHeap::unpin_object(JavaThread* thread, oop obj) {
1008 GCLocker::exit(thread);
1009 }
1010
1011 void ParallelScavengeHeap::update_parallel_worker_threads_cpu_time() {
1012 assert(Thread::current()->is_VM_thread(),
1013 "Must be called from VM thread to avoid races");
1014 if (!UsePerfData || !os::is_thread_cpu_time_supported()) {
1015 return;
1016 }
1017
1018 // Ensure ThreadTotalCPUTimeClosure destructor is called before publishing gc
1019 // time.
1020 {
1021 ThreadTotalCPUTimeClosure tttc(CPUTimeGroups::CPUTimeType::gc_parallel_workers);
1022 // Currently parallel worker threads in GCTaskManager never terminate, so it
1023 // is safe for VMThread to read their CPU times. If upstream changes this
1024 // behavior, we should rethink if it is still safe.
1025 gc_threads_do(&tttc);
1026 }
1027
1028 CPUTimeCounters::publish_gc_total_cpu_time();
1029 }