1 /*
2 * Copyright (c) 2002, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "classfile/classLoaderDataGraph.hpp"
26 #include "classfile/stringTable.hpp"
27 #include "code/codeCache.hpp"
28 #include "compiler/oopMap.hpp"
29 #include "gc/parallel/parallelScavengeHeap.hpp"
30 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
31 #include "gc/parallel/psClosure.inline.hpp"
32 #include "gc/parallel/psCompactionManager.hpp"
33 #include "gc/parallel/psParallelCompact.inline.hpp"
34 #include "gc/parallel/psPromotionManager.inline.hpp"
35 #include "gc/parallel/psRootType.hpp"
36 #include "gc/parallel/psScavenge.hpp"
37 #include "gc/shared/gcCause.hpp"
38 #include "gc/shared/gcHeapSummary.hpp"
39 #include "gc/shared/gcId.hpp"
40 #include "gc/shared/gcLocker.hpp"
41 #include "gc/shared/gcTimer.hpp"
42 #include "gc/shared/gcTrace.hpp"
43 #include "gc/shared/gcTraceTime.inline.hpp"
44 #include "gc/shared/gcVMOperations.hpp"
45 #include "gc/shared/isGCActiveMark.hpp"
46 #include "gc/shared/oopStorage.inline.hpp"
47 #include "gc/shared/oopStorageParState.inline.hpp"
48 #include "gc/shared/oopStorageSetParState.inline.hpp"
49 #include "gc/shared/referencePolicy.hpp"
50 #include "gc/shared/referenceProcessor.hpp"
51 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
52 #include "gc/shared/scavengableNMethods.hpp"
53 #include "gc/shared/spaceDecorator.hpp"
54 #include "gc/shared/taskTerminator.hpp"
55 #include "gc/shared/weakProcessor.inline.hpp"
56 #include "gc/shared/workerPolicy.hpp"
57 #include "gc/shared/workerThread.hpp"
58 #include "gc/shared/workerUtils.hpp"
59 #include "logging/log.hpp"
60 #include "memory/iterator.hpp"
61 #include "memory/resourceArea.hpp"
62 #include "memory/universe.hpp"
63 #include "oops/access.inline.hpp"
64 #include "oops/compressedOops.inline.hpp"
65 #include "oops/oop.inline.hpp"
66 #include "runtime/handles.inline.hpp"
67 #include "runtime/threads.hpp"
68 #include "runtime/vmOperations.hpp"
69 #include "runtime/vmThread.hpp"
70 #include "services/memoryService.hpp"
71 #include "utilities/stack.inline.hpp"
72
73 SpanSubjectToDiscoveryClosure PSScavenge::_span_based_discoverer;
74 ReferenceProcessor* PSScavenge::_ref_processor = nullptr;
75 PSCardTable* PSScavenge::_card_table = nullptr;
76 bool PSScavenge::_survivor_overflow = false;
77 uint PSScavenge::_tenuring_threshold = 0;
78 HeapWord* PSScavenge::_young_generation_boundary = nullptr;
79 uintptr_t PSScavenge::_young_generation_boundary_compressed = 0;
80 elapsedTimer PSScavenge::_accumulated_time;
81 STWGCTimer PSScavenge::_gc_timer;
82 ParallelScavengeTracer PSScavenge::_gc_tracer;
83 CollectorCounters* PSScavenge::_counters = nullptr;
84
85 static void scavenge_roots_work(ParallelRootType::Value root_type, uint worker_id) {
86 assert(ParallelScavengeHeap::heap()->is_stw_gc_active(), "called outside gc");
87
88 PSPromotionManager* pm = PSPromotionManager::gc_thread_promotion_manager(worker_id);
89 PSPromoteRootsClosure roots_to_old_closure(pm);
90
91 switch (root_type) {
92 case ParallelRootType::class_loader_data:
93 {
94 PSScavengeCLDClosure cld_closure(pm);
95 ClassLoaderDataGraph::cld_do(&cld_closure);
96 }
97 break;
98
99 case ParallelRootType::code_cache:
100 {
101 NMethodToOopClosure code_closure(&roots_to_old_closure, NMethodToOopClosure::FixRelocations);
102 ScavengableNMethods::nmethods_do(&code_closure);
103 }
104 break;
105
106 case ParallelRootType::sentinel:
107 DEBUG_ONLY(default:) // DEBUG_ONLY hack will create compile error on release builds (-Wswitch) and runtime check on debug builds
108 fatal("Bad enumeration value: %u", root_type);
109 break;
110 }
111
112 // Do the real work
113 pm->drain_stacks(false);
114 }
115
116 static void steal_work(TaskTerminator& terminator, uint worker_id) {
117 assert(ParallelScavengeHeap::heap()->is_stw_gc_active(), "called outside gc");
118
119 PSPromotionManager* pm =
120 PSPromotionManager::gc_thread_promotion_manager(worker_id);
121 pm->drain_stacks(true);
122 guarantee(pm->stacks_empty(),
123 "stacks should be empty at this point");
124
125 while (true) {
126 ScannerTask task;
127 if (PSPromotionManager::steal_depth(worker_id, task)) {
128 pm->process_popped_location_depth(task, true);
129 pm->drain_stacks(true);
130 } else {
131 if (terminator.offer_termination()) {
132 break;
133 }
134 }
135 }
136 guarantee(pm->stacks_empty(), "stacks should be empty at this point");
137 }
138
139 // Define before use
140 class PSIsAliveClosure: public BoolObjectClosure {
141 public:
142 bool do_object_b(oop p) {
143 return (!PSScavenge::is_obj_in_young(p)) || p->is_forwarded();
144 }
145 };
146
147 PSIsAliveClosure PSScavenge::_is_alive_closure;
148
149 class PSKeepAliveClosure: public OopClosure {
150 PSPromotionManager* _promotion_manager;
151
152 public:
153 PSKeepAliveClosure(PSPromotionManager* pm) : _promotion_manager(pm) {
154 assert(_promotion_manager != nullptr, "Sanity");
155 }
156
157 template <class T> void do_oop_work(T* p) {
158 #ifdef ASSERT
159 // Referent must be non-null and in from-space
160 oop obj = RawAccess<IS_NOT_NULL>::oop_load(p);
161 assert(oopDesc::is_oop(obj), "referent must be an oop");
162 assert(PSScavenge::is_obj_in_young(obj), "must be in young-gen");
163 assert(!PSScavenge::is_obj_in_to_space(obj), "must be in from-space");
164 #endif
165
166 _promotion_manager->copy_and_push_safe_barrier</*promote_immediately=*/false>(p);
167 }
168 virtual void do_oop(oop* p) { PSKeepAliveClosure::do_oop_work(p); }
169 virtual void do_oop(narrowOop* p) { PSKeepAliveClosure::do_oop_work(p); }
170 };
171
172 class PSEvacuateFollowersClosure: public VoidClosure {
173 private:
174 PSPromotionManager* _promotion_manager;
175 TaskTerminator* _terminator;
176 uint _worker_id;
177
178 public:
179 PSEvacuateFollowersClosure(PSPromotionManager* pm, TaskTerminator* terminator, uint worker_id)
180 : _promotion_manager(pm), _terminator(terminator), _worker_id(worker_id) {}
181
182 virtual void do_void() {
183 assert(_promotion_manager != nullptr, "Sanity");
184 _promotion_manager->drain_stacks(true);
185 guarantee(_promotion_manager->stacks_empty(),
186 "stacks should be empty at this point");
187
188 if (_terminator != nullptr) {
189 steal_work(*_terminator, _worker_id);
190 }
191 }
192 };
193
194 class ParallelScavengeRefProcProxyTask : public RefProcProxyTask {
195 TaskTerminator _terminator;
196
197 public:
198 ParallelScavengeRefProcProxyTask(uint max_workers)
199 : RefProcProxyTask("ParallelScavengeRefProcProxyTask", max_workers),
200 _terminator(max_workers, ParCompactionManager::marking_stacks()) {}
201
202 void work(uint worker_id) override {
203 assert(worker_id < _max_workers, "sanity");
204 PSPromotionManager* promotion_manager = (_tm == RefProcThreadModel::Single) ? PSPromotionManager::vm_thread_promotion_manager() : PSPromotionManager::gc_thread_promotion_manager(worker_id);
205 PSIsAliveClosure is_alive;
206 PSKeepAliveClosure keep_alive(promotion_manager);
207 BarrierEnqueueDiscoveredFieldClosure enqueue;
208 PSEvacuateFollowersClosure complete_gc(promotion_manager, (_marks_oops_alive && _tm == RefProcThreadModel::Multi) ? &_terminator : nullptr, worker_id);;
209 _rp_task->rp_work(worker_id, &is_alive, &keep_alive, &enqueue, &complete_gc);
210 }
211
212 void prepare_run_task_hook() override {
213 _terminator.reset_for_reuse(_queue_count);
214 }
215 };
216
217 class PSThreadRootsTaskClosure : public ThreadClosure {
218 PSPromotionManager* _pm;
219 public:
220 PSThreadRootsTaskClosure(PSPromotionManager* pm) : _pm(pm) {}
221 virtual void do_thread(Thread* thread) {
222 assert(ParallelScavengeHeap::heap()->is_stw_gc_active(), "called outside gc");
223
224 PSScavengeRootsClosure roots_closure(_pm);
225
226 // No need to visit nmethods, because they are handled by ScavengableNMethods.
227 thread->oops_do(&roots_closure, nullptr);
228
229 // Do the real work
230 _pm->drain_stacks(false);
231 }
232 };
233
234 class ScavengeRootsTask : public WorkerTask {
235 ThreadsClaimTokenScope _threads_claim_token_scope; // needed for Threads::possibly_parallel_threads_do
236 OopStorageSetStrongParState<false /* concurrent */, false /* is_const */> _oop_storage_strong_par_state;
237 SequentialSubTasksDone _subtasks;
238 PSOldGen* _old_gen;
239 HeapWord* _gen_top;
240 uint _active_workers;
241 bool _is_old_gen_empty;
242 TaskTerminator _terminator;
243
244 public:
245 ScavengeRootsTask(PSOldGen* old_gen,
246 uint active_workers) :
247 WorkerTask("ScavengeRootsTask"),
248 _threads_claim_token_scope(),
249 _subtasks(ParallelRootType::sentinel),
250 _old_gen(old_gen),
251 _gen_top(old_gen->object_space()->top()),
252 _active_workers(active_workers),
253 _is_old_gen_empty(old_gen->object_space()->is_empty()),
254 _terminator(active_workers, PSPromotionManager::vm_thread_promotion_manager()->stack_array_depth()) {
255 if (!_is_old_gen_empty) {
256 PSCardTable* card_table = ParallelScavengeHeap::heap()->card_table();
257 card_table->pre_scavenge(active_workers);
258 }
259 }
260
261 virtual void work(uint worker_id) {
262 assert(worker_id < _active_workers, "Sanity");
263 ResourceMark rm;
264 PSPromotionManager* pm = PSPromotionManager::gc_thread_promotion_manager(worker_id);
265
266 if (!_is_old_gen_empty) {
267 // There are only old-to-young pointers if there are objects
268 // in the old gen.
269 {
270 PSCardTable* card_table = ParallelScavengeHeap::heap()->card_table();
271
272 // The top of the old gen changes during scavenge when objects are promoted.
273 card_table->scavenge_contents_parallel(_old_gen->start_array(),
274 _old_gen->object_space()->bottom(),
275 _gen_top,
276 pm,
277 worker_id,
278 _active_workers);
279
280 // Do the real work
281 pm->drain_stacks(false);
282 }
283 }
284
285 for (uint root_type = 0; _subtasks.try_claim_task(root_type); /* empty */ ) {
286 scavenge_roots_work(static_cast<ParallelRootType::Value>(root_type), worker_id);
287 }
288
289 PSThreadRootsTaskClosure thread_closure(pm);
290 Threads::possibly_parallel_threads_do(_active_workers > 1 /* is_par */, &thread_closure);
291
292 // Scavenge OopStorages
293 {
294 PSScavengeRootsClosure root_closure(pm);
295 _oop_storage_strong_par_state.oops_do(&root_closure);
296
297 // Do the real work
298 pm->drain_stacks(false);
299 }
300
301 // If active_workers can exceed 1, add a steal_work().
302 // PSPromotionManager::drain_stacks_depth() does not fully drain its
303 // stacks and expects a steal_work() to complete the draining if
304 // ParallelGCThreads is > 1.
305
306 if (_active_workers > 1) {
307 steal_work(_terminator, worker_id);
308 }
309 }
310 };
311
312 bool PSScavenge::invoke(bool clear_soft_refs) {
313 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
314 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
315
316 IsSTWGCActiveMark mark;
317
318 _gc_timer.register_gc_start();
319
320 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
321 GCCause::Cause gc_cause = heap->gc_cause();
322
323 SvcGCMarker sgcm(SvcGCMarker::MINOR);
324 GCIdMark gc_id_mark;
325 _gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start());
326
327 bool promotion_failure_occurred = false;
328
329 PSYoungGen* young_gen = heap->young_gen();
330 PSOldGen* old_gen = heap->old_gen();
331 PSAdaptiveSizePolicy* size_policy = heap->size_policy();
332
333 assert(young_gen->to_space()->is_empty(), "precondition");
334
335 heap->increment_total_collections();
336
337 // Gather the feedback data for eden occupancy.
338 young_gen->eden_space()->accumulate_statistics();
339
340 heap->print_before_gc();
341 heap->trace_heap_before_gc(&_gc_tracer);
342
343 assert(!NeverTenure || _tenuring_threshold == markWord::max_age + 1, "Sanity");
344 assert(!AlwaysTenure || _tenuring_threshold == 0, "Sanity");
345
346 // Fill in TLABs
347 heap->ensure_parsability(true); // retire TLABs
348
349 if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
350 Universe::verify("Before GC");
351 }
352
353 {
354 ResourceMark rm;
355
356 GCTraceCPUTime tcpu(&_gc_tracer);
357 GCTraceTime(Info, gc) tm("Pause Young", nullptr, gc_cause, true);
358 TraceCollectorStats tcs(counters());
359 TraceMemoryManagerStats tms(heap->young_gc_manager(), gc_cause, "end of minor GC");
360
361 if (log_is_enabled(Debug, gc, heap, exit)) {
362 accumulated_time()->start();
363 }
364
365 // Let the size policy know we're starting
366 size_policy->minor_collection_begin();
367
368 #if COMPILER2_OR_JVMCI
369 DerivedPointerTable::clear();
370 #endif
371
372 reference_processor()->start_discovery(clear_soft_refs);
373
374 const PreGenGCValues pre_gc_values = heap->get_pre_gc_values();
375
376 // Reset our survivor overflow.
377 set_survivor_overflow(false);
378
379 const uint active_workers =
380 WorkerPolicy::calc_active_workers(ParallelScavengeHeap::heap()->workers().max_workers(),
381 ParallelScavengeHeap::heap()->workers().active_workers(),
382 Threads::number_of_non_daemon_threads());
383 ParallelScavengeHeap::heap()->workers().set_active_workers(active_workers);
384
385 PSPromotionManager::pre_scavenge();
386
387 {
388 GCTraceTime(Debug, gc, phases) tm("Scavenge", &_gc_timer);
389
390 ScavengeRootsTask task(old_gen, active_workers);
391 ParallelScavengeHeap::heap()->workers().run_task(&task);
392 }
393
394 // Process reference objects discovered during scavenge
395 {
396 GCTraceTime(Debug, gc, phases) tm("Reference Processing", &_gc_timer);
397
398 ReferenceProcessorStats stats;
399 ReferenceProcessorPhaseTimes pt(&_gc_timer, reference_processor()->max_num_queues());
400
401 ParallelScavengeRefProcProxyTask task(reference_processor()->max_num_queues());
402 stats = reference_processor()->process_discovered_references(task, &ParallelScavengeHeap::heap()->workers(), pt);
403
404 _gc_tracer.report_gc_reference_stats(stats);
405 pt.print_all_references();
406 }
407
408 {
409 GCTraceTime(Debug, gc, phases) tm("Weak Processing", &_gc_timer);
410 PSAdjustWeakRootsClosure root_closure;
411 WeakProcessor::weak_oops_do(&heap->workers(), &_is_alive_closure, &root_closure, 1);
412 }
413
414 // Finally, flush the promotion_manager's labs, and deallocate its stacks.
415 promotion_failure_occurred = PSPromotionManager::post_scavenge(_gc_tracer);
416 if (promotion_failure_occurred) {
417 clean_up_failed_promotion();
418 log_info(gc, promotion)("Promotion failed");
419 }
420
421 _gc_tracer.report_tenuring_threshold(tenuring_threshold());
422
423 // This is an underestimate, since it excludes time on auto-resizing. The
424 // most expensive part in auto-resizing is commit/uncommit OS API calls.
425 size_policy->minor_collection_end(young_gen->eden_space()->capacity_in_bytes());
426
427 if (!promotion_failure_occurred) {
428 // Swap the survivor spaces.
429 young_gen->eden_space()->clear(SpaceDecorator::Mangle);
430 young_gen->from_space()->clear(SpaceDecorator::Mangle);
431 young_gen->swap_spaces();
432
433 size_t survived = young_gen->from_space()->used_in_bytes();
434 assert(old_gen->used_in_bytes() >= pre_gc_values.old_gen_used(), "inv");
435 size_t promoted = old_gen->used_in_bytes() - pre_gc_values.old_gen_used();
436 size_policy->update_averages(_survivor_overflow, survived, promoted);
437 size_policy->sample_old_gen_used_bytes(old_gen->used_in_bytes());
438
439 if (UseAdaptiveSizePolicy) {
440 _tenuring_threshold = size_policy->compute_tenuring_threshold(_survivor_overflow,
441 _tenuring_threshold);
442
443 log_debug(gc, age)("New threshold %u (max threshold %u)", _tenuring_threshold, MaxTenuringThreshold);
444
445 if (young_gen->is_from_to_layout()) {
446 size_policy->print_stats(_survivor_overflow);
447 heap->resize_after_young_gc(_survivor_overflow);
448 }
449
450 if (UsePerfData) {
451 GCPolicyCounters* counters = ParallelScavengeHeap::gc_policy_counters();
452 counters->tenuring_threshold()->set_value(_tenuring_threshold);
453 counters->desired_survivor_size()->set_value(young_gen->from_space()->capacity_in_bytes());
454 }
455
456 {
457 // In case the counter overflows
458 uint num_minor_gcs = heap->total_collections() > heap->total_full_collections()
459 ? heap->total_collections() - heap->total_full_collections()
460 : 1;
461 size_policy->decay_supplemental_growth(num_minor_gcs);
462 }
463 }
464
465 // Update the structure of the eden. With NUMA-eden CPU hotplugging or offlining can
466 // cause the change of the heap layout. Make sure eden is reshaped if that's the case.
467 // Also update() will case adaptive NUMA chunk resizing.
468 assert(young_gen->eden_space()->is_empty(), "eden space should be empty now");
469 young_gen->eden_space()->update();
470
471 heap->resize_all_tlabs();
472
473 assert(young_gen->to_space()->is_empty(), "to space should be empty now");
474
475 heap->gc_epilogue(false);
476 }
477
478 #if COMPILER2_OR_JVMCI
479 DerivedPointerTable::update_pointers();
480 #endif
481
482 size_policy->record_gc_pause_end_instant();
483
484 if (log_is_enabled(Debug, gc, heap, exit)) {
485 accumulated_time()->stop();
486 }
487
488 heap->print_heap_change(pre_gc_values);
489
490 // Track memory usage and detect low memory
491 MemoryService::track_memory_usage();
492 heap->update_counters();
493 }
494
495 if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
496 Universe::verify("After GC");
497 }
498
499 heap->print_after_gc();
500 heap->trace_heap_after_gc(&_gc_tracer);
501
502 _gc_timer.register_gc_end();
503
504 _gc_tracer.report_gc_end(_gc_timer.gc_end(), _gc_timer.time_partitions());
505
506 return !promotion_failure_occurred;
507 }
508
509 void PSScavenge::clean_up_failed_promotion() {
510 PSPromotionManager::restore_preserved_marks();
511
512 // Reset the PromotionFailureALot counters.
513 NOT_PRODUCT(ParallelScavengeHeap::heap()->reset_promotion_should_fail();)
514 }
515
516 // Adaptive size policy support.
517 void PSScavenge::set_young_generation_boundary(HeapWord* v) {
518 _young_generation_boundary = v;
519 if (UseCompressedOops) {
520 _young_generation_boundary_compressed = (uintptr_t)CompressedOops::encode(cast_to_oop(v));
521 }
522 }
523
524 void PSScavenge::initialize() {
525 // Arguments must have been parsed
526
527 if (AlwaysTenure || NeverTenure) {
528 assert(MaxTenuringThreshold == 0 || MaxTenuringThreshold == markWord::max_age + 1,
529 "MaxTenuringThreshold should be 0 or markWord::max_age + 1, but is %d", (int) MaxTenuringThreshold);
530 _tenuring_threshold = MaxTenuringThreshold;
531 } else {
532 // We want to smooth out our startup times for the AdaptiveSizePolicy
533 _tenuring_threshold = (UseAdaptiveSizePolicy) ? InitialTenuringThreshold :
534 MaxTenuringThreshold;
535 }
536
537 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
538 PSYoungGen* young_gen = heap->young_gen();
539 PSOldGen* old_gen = heap->old_gen();
540
541 // Set boundary between young_gen and old_gen
542 assert(old_gen->reserved().end() == young_gen->reserved().start(),
543 "old above young");
544 set_young_generation_boundary(young_gen->reserved().start());
545
546 // Initialize ref handling object for scavenging.
547 _span_based_discoverer.set_span(young_gen->reserved());
548 _ref_processor =
549 new ReferenceProcessor(&_span_based_discoverer,
550 ParallelGCThreads, // mt processing degree
551 ParallelGCThreads, // mt discovery degree
552 false, // concurrent_discovery
553 &_is_alive_closure); // header provides liveness info
554
555 // Cache the cardtable
556 _card_table = heap->card_table();
557
558 _counters = new CollectorCounters("Parallel young collection pauses", 0);
559 }