1 /*
  2  * Copyright (c) 2002, 2025, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "classfile/classLoaderDataGraph.hpp"
 26 #include "classfile/stringTable.hpp"
 27 #include "code/codeCache.hpp"
 28 #include "compiler/oopMap.hpp"
 29 #include "gc/parallel/parallelScavengeHeap.hpp"
 30 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
 31 #include "gc/parallel/psClosure.inline.hpp"
 32 #include "gc/parallel/psCompactionManager.hpp"
 33 #include "gc/parallel/psCompactionManagerNew.hpp"
 34 #include "gc/parallel/psParallelCompact.inline.hpp"
 35 #include "gc/parallel/psPromotionManager.inline.hpp"
 36 #include "gc/parallel/psRootType.hpp"
 37 #include "gc/parallel/psScavenge.hpp"
 38 #include "gc/shared/gcCause.hpp"
 39 #include "gc/shared/gcHeapSummary.hpp"
 40 #include "gc/shared/gcId.hpp"
 41 #include "gc/shared/gcLocker.hpp"
 42 #include "gc/shared/gcTimer.hpp"
 43 #include "gc/shared/gcTrace.hpp"
 44 #include "gc/shared/gcTraceTime.inline.hpp"
 45 #include "gc/shared/gcVMOperations.hpp"
 46 #include "gc/shared/isGCActiveMark.hpp"
 47 #include "gc/shared/oopStorage.inline.hpp"
 48 #include "gc/shared/oopStorageParState.inline.hpp"
 49 #include "gc/shared/oopStorageSetParState.inline.hpp"
 50 #include "gc/shared/referencePolicy.hpp"
 51 #include "gc/shared/referenceProcessor.hpp"
 52 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
 53 #include "gc/shared/scavengableNMethods.hpp"
 54 #include "gc/shared/spaceDecorator.hpp"
 55 #include "gc/shared/taskTerminator.hpp"
 56 #include "gc/shared/weakProcessor.inline.hpp"
 57 #include "gc/shared/workerPolicy.hpp"
 58 #include "gc/shared/workerThread.hpp"
 59 #include "gc/shared/workerUtils.hpp"
 60 #include "logging/log.hpp"
 61 #include "memory/iterator.hpp"
 62 #include "memory/resourceArea.hpp"
 63 #include "memory/universe.hpp"
 64 #include "oops/access.inline.hpp"
 65 #include "oops/compressedOops.inline.hpp"
 66 #include "oops/oop.inline.hpp"
 67 #include "runtime/handles.inline.hpp"
 68 #include "runtime/threads.hpp"
 69 #include "runtime/vmOperations.hpp"
 70 #include "runtime/vmThread.hpp"
 71 #include "services/memoryService.hpp"
 72 #include "utilities/stack.inline.hpp"
 73 
 74 SpanSubjectToDiscoveryClosure PSScavenge::_span_based_discoverer;
 75 ReferenceProcessor*           PSScavenge::_ref_processor = nullptr;
 76 PSCardTable*                  PSScavenge::_card_table = nullptr;
 77 bool                          PSScavenge::_survivor_overflow = false;
 78 uint                          PSScavenge::_tenuring_threshold = 0;
 79 HeapWord*                     PSScavenge::_young_generation_boundary = nullptr;
 80 uintptr_t                     PSScavenge::_young_generation_boundary_compressed = 0;
 81 elapsedTimer                  PSScavenge::_accumulated_time;
 82 STWGCTimer                    PSScavenge::_gc_timer;
 83 ParallelScavengeTracer        PSScavenge::_gc_tracer;
 84 CollectorCounters*            PSScavenge::_counters = nullptr;
 85 
 86 static void scavenge_roots_work(ParallelRootType::Value root_type, uint worker_id) {
 87   assert(ParallelScavengeHeap::heap()->is_stw_gc_active(), "called outside gc");
 88 
 89   PSPromotionManager* pm = PSPromotionManager::gc_thread_promotion_manager(worker_id);
 90   PSPromoteRootsClosure  roots_to_old_closure(pm);
 91 
 92   switch (root_type) {
 93     case ParallelRootType::class_loader_data:
 94       {
 95         PSScavengeCLDClosure cld_closure(pm);
 96         ClassLoaderDataGraph::cld_do(&cld_closure);
 97       }
 98       break;
 99 
100     case ParallelRootType::code_cache:
101       {
102         NMethodToOopClosure code_closure(&roots_to_old_closure, NMethodToOopClosure::FixRelocations);
103         ScavengableNMethods::nmethods_do(&code_closure);
104       }
105       break;
106 
107     case ParallelRootType::sentinel:
108     DEBUG_ONLY(default:) // DEBUG_ONLY hack will create compile error on release builds (-Wswitch) and runtime check on debug builds
109       fatal("Bad enumeration value: %u", root_type);
110       break;
111   }
112 
113   // Do the real work
114   pm->drain_stacks(false);
115 }
116 
117 static void steal_work(TaskTerminator& terminator, uint worker_id) {
118   assert(ParallelScavengeHeap::heap()->is_stw_gc_active(), "called outside gc");
119 
120   PSPromotionManager* pm =
121     PSPromotionManager::gc_thread_promotion_manager(worker_id);
122   pm->drain_stacks(true);
123   guarantee(pm->stacks_empty(),
124             "stacks should be empty at this point");
125 
126   while (true) {
127     ScannerTask task;
128     if (PSPromotionManager::steal_depth(worker_id, task)) {
129       pm->process_popped_location_depth(task, true);
130       pm->drain_stacks(true);
131     } else {
132       if (terminator.offer_termination()) {
133         break;
134       }
135     }
136   }
137   guarantee(pm->stacks_empty(), "stacks should be empty at this point");
138 }
139 
140 // Define before use
141 class PSIsAliveClosure: public BoolObjectClosure {
142 public:
143   bool do_object_b(oop p) {
144     return (!PSScavenge::is_obj_in_young(p)) || p->is_forwarded();
145   }
146 };
147 
148 PSIsAliveClosure PSScavenge::_is_alive_closure;
149 
150 class PSKeepAliveClosure: public OopClosure {
151   PSPromotionManager* _promotion_manager;
152 
153 public:
154   PSKeepAliveClosure(PSPromotionManager* pm) : _promotion_manager(pm) {
155     assert(_promotion_manager != nullptr, "Sanity");
156   }
157 
158   template <class T> void do_oop_work(T* p) {
159 #ifdef ASSERT
160     // Referent must be non-null and in from-space
161     oop obj = RawAccess<IS_NOT_NULL>::oop_load(p);
162     assert(oopDesc::is_oop(obj), "referent must be an oop");
163     assert(PSScavenge::is_obj_in_young(obj), "must be in young-gen");
164     assert(!PSScavenge::is_obj_in_to_space(obj), "must be in from-space");
165 #endif
166 
167     _promotion_manager->copy_and_push_safe_barrier</*promote_immediately=*/false>(p);
168   }
169   virtual void do_oop(oop* p)       { PSKeepAliveClosure::do_oop_work(p); }
170   virtual void do_oop(narrowOop* p) { PSKeepAliveClosure::do_oop_work(p); }
171 };
172 
173 class PSEvacuateFollowersClosure: public VoidClosure {
174  private:
175   PSPromotionManager* _promotion_manager;
176   TaskTerminator* _terminator;
177   uint _worker_id;
178 
179  public:
180   PSEvacuateFollowersClosure(PSPromotionManager* pm, TaskTerminator* terminator, uint worker_id)
181     : _promotion_manager(pm), _terminator(terminator), _worker_id(worker_id) {}
182 
183   virtual void do_void() {
184     assert(_promotion_manager != nullptr, "Sanity");
185     _promotion_manager->drain_stacks(true);
186     guarantee(_promotion_manager->stacks_empty(),
187               "stacks should be empty at this point");
188 
189     if (_terminator != nullptr) {
190       steal_work(*_terminator, _worker_id);
191     }
192   }
193 };
194 
195 class ParallelScavengeRefProcProxyTask : public RefProcProxyTask {
196   TaskTerminator _terminator;
197 
198 public:
199   ParallelScavengeRefProcProxyTask(uint max_workers)
200     : RefProcProxyTask("ParallelScavengeRefProcProxyTask", max_workers),
201       _terminator(max_workers, UseCompactObjectHeaders ? ParCompactionManagerNew::marking_stacks() : ParCompactionManager::marking_stacks()) {}
202 
203   void work(uint worker_id) override {
204     assert(worker_id < _max_workers, "sanity");
205     PSPromotionManager* promotion_manager = (_tm == RefProcThreadModel::Single) ? PSPromotionManager::vm_thread_promotion_manager() : PSPromotionManager::gc_thread_promotion_manager(worker_id);
206     PSIsAliveClosure is_alive;
207     PSKeepAliveClosure keep_alive(promotion_manager);
208     BarrierEnqueueDiscoveredFieldClosure enqueue;
209     PSEvacuateFollowersClosure complete_gc(promotion_manager, (_marks_oops_alive && _tm == RefProcThreadModel::Multi) ? &_terminator : nullptr, worker_id);;
210     _rp_task->rp_work(worker_id, &is_alive, &keep_alive, &enqueue, &complete_gc);
211   }
212 
213   void prepare_run_task_hook() override {
214     _terminator.reset_for_reuse(_queue_count);
215   }
216 };
217 
218 class PSThreadRootsTaskClosure : public ThreadClosure {
219   PSPromotionManager* _pm;
220 public:
221   PSThreadRootsTaskClosure(PSPromotionManager* pm) : _pm(pm) {}
222   virtual void do_thread(Thread* thread) {
223     assert(ParallelScavengeHeap::heap()->is_stw_gc_active(), "called outside gc");
224 
225     PSScavengeRootsClosure roots_closure(_pm);
226 
227     // No need to visit nmethods, because they are handled by ScavengableNMethods.
228     thread->oops_do(&roots_closure, nullptr);
229 
230     // Do the real work
231     _pm->drain_stacks(false);
232   }
233 };
234 
235 class ScavengeRootsTask : public WorkerTask {
236   ThreadsClaimTokenScope _threads_claim_token_scope; // needed for Threads::possibly_parallel_threads_do
237   OopStorageSetStrongParState<false /* concurrent */, false /* is_const */> _oop_storage_strong_par_state;
238   SequentialSubTasksDone _subtasks;
239   PSOldGen* _old_gen;
240   HeapWord* _gen_top;
241   uint _active_workers;
242   bool _is_old_gen_empty;
243   TaskTerminator _terminator;
244 
245 public:
246   ScavengeRootsTask(PSOldGen* old_gen,
247                     uint active_workers) :
248     WorkerTask("ScavengeRootsTask"),
249     _threads_claim_token_scope(),
250     _subtasks(ParallelRootType::sentinel),
251     _old_gen(old_gen),
252     _gen_top(old_gen->object_space()->top()),
253     _active_workers(active_workers),
254     _is_old_gen_empty(old_gen->object_space()->is_empty()),
255     _terminator(active_workers, PSPromotionManager::vm_thread_promotion_manager()->stack_array_depth()) {
256     if (!_is_old_gen_empty) {
257       PSCardTable* card_table = ParallelScavengeHeap::heap()->card_table();
258       card_table->pre_scavenge(active_workers);
259     }
260   }
261 
262   virtual void work(uint worker_id) {
263     assert(worker_id < _active_workers, "Sanity");
264     ResourceMark rm;
265     PSPromotionManager* pm = PSPromotionManager::gc_thread_promotion_manager(worker_id);
266 
267     if (!_is_old_gen_empty) {
268       // There are only old-to-young pointers if there are objects
269       // in the old gen.
270       {
271         PSCardTable* card_table = ParallelScavengeHeap::heap()->card_table();
272 
273         // The top of the old gen changes during scavenge when objects are promoted.
274         card_table->scavenge_contents_parallel(_old_gen->start_array(),
275                                                _old_gen->object_space()->bottom(),
276                                                _gen_top,
277                                                pm,
278                                                worker_id,
279                                                _active_workers);
280 
281         // Do the real work
282         pm->drain_stacks(false);
283       }
284     }
285 
286     for (uint root_type = 0; _subtasks.try_claim_task(root_type); /* empty */ ) {
287       scavenge_roots_work(static_cast<ParallelRootType::Value>(root_type), worker_id);
288     }
289 
290     PSThreadRootsTaskClosure thread_closure(pm);
291     Threads::possibly_parallel_threads_do(_active_workers > 1 /* is_par */, &thread_closure);
292 
293     // Scavenge OopStorages
294     {
295       PSScavengeRootsClosure root_closure(pm);
296       _oop_storage_strong_par_state.oops_do(&root_closure);
297 
298       // Do the real work
299       pm->drain_stacks(false);
300     }
301 
302     // If active_workers can exceed 1, add a steal_work().
303     // PSPromotionManager::drain_stacks_depth() does not fully drain its
304     // stacks and expects a steal_work() to complete the draining if
305     // ParallelGCThreads is > 1.
306 
307     if (_active_workers > 1) {
308       steal_work(_terminator, worker_id);
309     }
310   }
311 };
312 
313 bool PSScavenge::invoke(bool clear_soft_refs) {
314   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
315   assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
316 
317   IsSTWGCActiveMark mark;
318 
319   _gc_timer.register_gc_start();
320 
321   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
322   GCCause::Cause gc_cause = heap->gc_cause();
323 
324   SvcGCMarker sgcm(SvcGCMarker::MINOR);
325   GCIdMark gc_id_mark;
326   _gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start());
327 
328   bool promotion_failure_occurred = false;
329 
330   PSYoungGen* young_gen = heap->young_gen();
331   PSOldGen* old_gen = heap->old_gen();
332   PSAdaptiveSizePolicy* size_policy = heap->size_policy();
333 
334   assert(young_gen->to_space()->is_empty(), "precondition");
335 
336   heap->increment_total_collections();
337 
338   // Gather the feedback data for eden occupancy.
339   young_gen->eden_space()->accumulate_statistics();
340 
341   heap->print_before_gc();
342   heap->trace_heap_before_gc(&_gc_tracer);
343 
344   assert(!NeverTenure || _tenuring_threshold == markWord::max_age + 1, "Sanity");
345   assert(!AlwaysTenure || _tenuring_threshold == 0, "Sanity");
346 
347   // Fill in TLABs
348   heap->ensure_parsability(true);  // retire TLABs
349 
350   if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
351     Universe::verify("Before GC");
352   }
353 
354   {
355     ResourceMark rm;
356 
357     GCTraceCPUTime tcpu(&_gc_tracer);
358     GCTraceTime(Info, gc) tm("Pause Young", nullptr, gc_cause, true);
359     TraceCollectorStats tcs(counters());
360     TraceMemoryManagerStats tms(heap->young_gc_manager(), gc_cause, "end of minor GC");
361 
362     if (log_is_enabled(Debug, gc, heap, exit)) {
363       accumulated_time()->start();
364     }
365 
366     // Let the size policy know we're starting
367     size_policy->minor_collection_begin();
368 
369 #if COMPILER2_OR_JVMCI
370     DerivedPointerTable::clear();
371 #endif
372 
373     reference_processor()->start_discovery(clear_soft_refs);
374 
375     const PreGenGCValues pre_gc_values = heap->get_pre_gc_values();
376 
377     // Reset our survivor overflow.
378     set_survivor_overflow(false);
379 
380     const uint active_workers =
381       WorkerPolicy::calc_active_workers(ParallelScavengeHeap::heap()->workers().max_workers(),
382                                         ParallelScavengeHeap::heap()->workers().active_workers(),
383                                         Threads::number_of_non_daemon_threads());
384     ParallelScavengeHeap::heap()->workers().set_active_workers(active_workers);
385 
386     PSPromotionManager::pre_scavenge();
387 
388     {
389       GCTraceTime(Debug, gc, phases) tm("Scavenge", &_gc_timer);
390 
391       ScavengeRootsTask task(old_gen, active_workers);
392       ParallelScavengeHeap::heap()->workers().run_task(&task);
393     }
394 
395     // Process reference objects discovered during scavenge
396     {
397       GCTraceTime(Debug, gc, phases) tm("Reference Processing", &_gc_timer);
398 
399       ReferenceProcessorStats stats;
400       ReferenceProcessorPhaseTimes pt(&_gc_timer, reference_processor()->max_num_queues());
401 
402       ParallelScavengeRefProcProxyTask task(reference_processor()->max_num_queues());
403       stats = reference_processor()->process_discovered_references(task, &ParallelScavengeHeap::heap()->workers(), pt);
404 
405       _gc_tracer.report_gc_reference_stats(stats);
406       pt.print_all_references();
407     }
408 
409     {
410       GCTraceTime(Debug, gc, phases) tm("Weak Processing", &_gc_timer);
411       PSAdjustWeakRootsClosure root_closure;
412       WeakProcessor::weak_oops_do(&heap->workers(), &_is_alive_closure, &root_closure, 1);
413     }
414 
415     // Finally, flush the promotion_manager's labs, and deallocate its stacks.
416     promotion_failure_occurred = PSPromotionManager::post_scavenge(_gc_tracer);
417     if (promotion_failure_occurred) {
418       clean_up_failed_promotion();
419       log_info(gc, promotion)("Promotion failed");
420     }
421 
422     _gc_tracer.report_tenuring_threshold(tenuring_threshold());
423 
424     // This is an underestimate, since it excludes time on auto-resizing. The
425     // most expensive part in auto-resizing is commit/uncommit OS API calls.
426     size_policy->minor_collection_end(young_gen->eden_space()->capacity_in_bytes());
427 
428     if (!promotion_failure_occurred) {
429       // Swap the survivor spaces.
430       young_gen->eden_space()->clear(SpaceDecorator::Mangle);
431       young_gen->from_space()->clear(SpaceDecorator::Mangle);
432       young_gen->swap_spaces();
433 
434       size_t survived = young_gen->from_space()->used_in_bytes();
435       assert(old_gen->used_in_bytes() >= pre_gc_values.old_gen_used(), "inv");
436       size_t promoted = old_gen->used_in_bytes() - pre_gc_values.old_gen_used();
437       size_policy->update_averages(_survivor_overflow, survived, promoted);
438       size_policy->sample_old_gen_used_bytes(old_gen->used_in_bytes());
439 
440       if (UseAdaptiveSizePolicy) {
441         _tenuring_threshold = size_policy->compute_tenuring_threshold(_survivor_overflow,
442                                                                       _tenuring_threshold);
443 
444         log_debug(gc, age)("New threshold %u (max threshold %u)", _tenuring_threshold, MaxTenuringThreshold);
445 
446         if (young_gen->is_from_to_layout()) {
447           size_policy->print_stats(_survivor_overflow);
448           heap->resize_after_young_gc(_survivor_overflow);
449         }
450 
451         if (UsePerfData) {
452           GCPolicyCounters* counters = ParallelScavengeHeap::gc_policy_counters();
453           counters->tenuring_threshold()->set_value(_tenuring_threshold);
454           counters->desired_survivor_size()->set_value(young_gen->from_space()->capacity_in_bytes());
455         }
456 
457         {
458           // In case the counter overflows
459           uint num_minor_gcs = heap->total_collections() > heap->total_full_collections()
460                                  ? heap->total_collections() - heap->total_full_collections()
461                                  : 1;
462           size_policy->decay_supplemental_growth(num_minor_gcs);
463         }
464       }
465 
466       // Update the structure of the eden. With NUMA-eden CPU hotplugging or offlining can
467       // cause the change of the heap layout. Make sure eden is reshaped if that's the case.
468       // Also update() will case adaptive NUMA chunk resizing.
469       assert(young_gen->eden_space()->is_empty(), "eden space should be empty now");
470       young_gen->eden_space()->update();
471 
472       heap->resize_all_tlabs();
473 
474       assert(young_gen->to_space()->is_empty(), "to space should be empty now");
475 
476       heap->gc_epilogue(false);
477     }
478 
479 #if COMPILER2_OR_JVMCI
480     DerivedPointerTable::update_pointers();
481 #endif
482 
483     size_policy->record_gc_pause_end_instant();
484 
485     if (log_is_enabled(Debug, gc, heap, exit)) {
486       accumulated_time()->stop();
487     }
488 
489     heap->print_heap_change(pre_gc_values);
490 
491     // Track memory usage and detect low memory
492     MemoryService::track_memory_usage();
493     heap->update_counters();
494   }
495 
496   if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
497     Universe::verify("After GC");
498   }
499 
500   heap->print_after_gc();
501   heap->trace_heap_after_gc(&_gc_tracer);
502 
503   _gc_timer.register_gc_end();
504 
505   _gc_tracer.report_gc_end(_gc_timer.gc_end(), _gc_timer.time_partitions());
506 
507   return !promotion_failure_occurred;
508 }
509 
510 void PSScavenge::clean_up_failed_promotion() {
511   PSPromotionManager::restore_preserved_marks();
512 
513   // Reset the PromotionFailureALot counters.
514   NOT_PRODUCT(ParallelScavengeHeap::heap()->reset_promotion_should_fail();)
515 }
516 
517 // Adaptive size policy support.
518 void PSScavenge::set_young_generation_boundary(HeapWord* v) {
519   _young_generation_boundary = v;
520   if (UseCompressedOops) {
521     _young_generation_boundary_compressed = (uintptr_t)CompressedOops::encode(cast_to_oop(v));
522   }
523 }
524 
525 void PSScavenge::initialize() {
526   // Arguments must have been parsed
527 
528   if (AlwaysTenure || NeverTenure) {
529     assert(MaxTenuringThreshold == 0 || MaxTenuringThreshold == markWord::max_age + 1,
530            "MaxTenuringThreshold should be 0 or markWord::max_age + 1, but is %d", (int) MaxTenuringThreshold);
531     _tenuring_threshold = MaxTenuringThreshold;
532   } else {
533     // We want to smooth out our startup times for the AdaptiveSizePolicy
534     _tenuring_threshold = (UseAdaptiveSizePolicy) ? InitialTenuringThreshold :
535                                                     MaxTenuringThreshold;
536   }
537 
538   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
539   PSYoungGen* young_gen = heap->young_gen();
540   PSOldGen* old_gen = heap->old_gen();
541 
542   // Set boundary between young_gen and old_gen
543   assert(old_gen->reserved().end() == young_gen->reserved().start(),
544          "old above young");
545   set_young_generation_boundary(young_gen->reserved().start());
546 
547   // Initialize ref handling object for scavenging.
548   _span_based_discoverer.set_span(young_gen->reserved());
549   _ref_processor =
550     new ReferenceProcessor(&_span_based_discoverer,
551                            ParallelGCThreads,          // mt processing degree
552                            ParallelGCThreads,          // mt discovery degree
553                            false,                      // concurrent_discovery
554                            &_is_alive_closure);        // header provides liveness info
555 
556   // Cache the cardtable
557   _card_table = heap->card_table();
558 
559   _counters = new CollectorCounters("Parallel young collection pauses", 0);
560 }