1 /*
  2  * Copyright (c) 2002, 2025, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "classfile/classLoaderDataGraph.hpp"
 26 #include "classfile/stringTable.hpp"
 27 #include "code/codeCache.hpp"
 28 #include "compiler/oopMap.hpp"
 29 #include "gc/parallel/parallelScavengeHeap.hpp"
 30 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
 31 #include "gc/parallel/psClosure.inline.hpp"
 32 #include "gc/parallel/psCompactionManager.hpp"
 33 #include "gc/parallel/psCompactionManagerNew.hpp"
 34 #include "gc/parallel/psParallelCompact.inline.hpp"
 35 #include "gc/parallel/psPromotionManager.inline.hpp"
 36 #include "gc/parallel/psRootType.hpp"
 37 #include "gc/parallel/psScavenge.inline.hpp"
 38 #include "gc/shared/gcCause.hpp"
 39 #include "gc/shared/gcHeapSummary.hpp"
 40 #include "gc/shared/gcId.hpp"
 41 #include "gc/shared/gcLocker.hpp"
 42 #include "gc/shared/gcTimer.hpp"
 43 #include "gc/shared/gcTrace.hpp"
 44 #include "gc/shared/gcTraceTime.inline.hpp"
 45 #include "gc/shared/gcVMOperations.hpp"
 46 #include "gc/shared/isGCActiveMark.hpp"
 47 #include "gc/shared/oopStorage.inline.hpp"
 48 #include "gc/shared/oopStorageSetParState.inline.hpp"
 49 #include "gc/shared/oopStorageParState.inline.hpp"
 50 #include "gc/shared/referencePolicy.hpp"
 51 #include "gc/shared/referenceProcessor.hpp"
 52 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
 53 #include "gc/shared/scavengableNMethods.hpp"
 54 #include "gc/shared/spaceDecorator.hpp"
 55 #include "gc/shared/strongRootsScope.hpp"
 56 #include "gc/shared/taskTerminator.hpp"
 57 #include "gc/shared/weakProcessor.inline.hpp"
 58 #include "gc/shared/workerPolicy.hpp"
 59 #include "gc/shared/workerThread.hpp"
 60 #include "gc/shared/workerUtils.hpp"
 61 #include "memory/iterator.hpp"
 62 #include "memory/resourceArea.hpp"
 63 #include "memory/universe.hpp"
 64 #include "logging/log.hpp"
 65 #include "oops/access.inline.hpp"
 66 #include "oops/compressedOops.inline.hpp"
 67 #include "oops/oop.inline.hpp"
 68 #include "runtime/handles.inline.hpp"
 69 #include "runtime/threadCritical.hpp"
 70 #include "runtime/threads.hpp"
 71 #include "runtime/vmThread.hpp"
 72 #include "runtime/vmOperations.hpp"
 73 #include "services/memoryService.hpp"
 74 #include "utilities/stack.inline.hpp"
 75 
 76 SpanSubjectToDiscoveryClosure PSScavenge::_span_based_discoverer;
 77 ReferenceProcessor*           PSScavenge::_ref_processor = nullptr;
 78 PSCardTable*                  PSScavenge::_card_table = nullptr;
 79 bool                          PSScavenge::_survivor_overflow = false;
 80 uint                          PSScavenge::_tenuring_threshold = 0;
 81 HeapWord*                     PSScavenge::_young_generation_boundary = nullptr;
 82 uintptr_t                     PSScavenge::_young_generation_boundary_compressed = 0;
 83 elapsedTimer                  PSScavenge::_accumulated_time;
 84 STWGCTimer                    PSScavenge::_gc_timer;
 85 ParallelScavengeTracer        PSScavenge::_gc_tracer;
 86 CollectorCounters*            PSScavenge::_counters = nullptr;
 87 
 88 static void scavenge_roots_work(ParallelRootType::Value root_type, uint worker_id) {
 89   assert(ParallelScavengeHeap::heap()->is_stw_gc_active(), "called outside gc");
 90 
 91   PSPromotionManager* pm = PSPromotionManager::gc_thread_promotion_manager(worker_id);
 92   PSPromoteRootsClosure  roots_to_old_closure(pm);
 93 
 94   switch (root_type) {
 95     case ParallelRootType::class_loader_data:
 96       {
 97         PSScavengeCLDClosure cld_closure(pm);
 98         ClassLoaderDataGraph::cld_do(&cld_closure);
 99       }
100       break;
101 
102     case ParallelRootType::code_cache:
103       {
104         MarkingNMethodClosure code_closure(&roots_to_old_closure, NMethodToOopClosure::FixRelocations, false /* keepalive nmethods */);
105         ScavengableNMethods::nmethods_do(&code_closure);
106       }
107       break;
108 
109     case ParallelRootType::sentinel:
110     DEBUG_ONLY(default:) // DEBUG_ONLY hack will create compile error on release builds (-Wswitch) and runtime check on debug builds
111       fatal("Bad enumeration value: %u", root_type);
112       break;
113   }
114 
115   // Do the real work
116   pm->drain_stacks(false);
117 }
118 
119 static void steal_work(TaskTerminator& terminator, uint worker_id) {
120   assert(ParallelScavengeHeap::heap()->is_stw_gc_active(), "called outside gc");
121 
122   PSPromotionManager* pm =
123     PSPromotionManager::gc_thread_promotion_manager(worker_id);
124   pm->drain_stacks(true);
125   guarantee(pm->stacks_empty(),
126             "stacks should be empty at this point");
127 
128   while (true) {
129     ScannerTask task;
130     if (PSPromotionManager::steal_depth(worker_id, task)) {
131       pm->process_popped_location_depth(task, true);
132       pm->drain_stacks_depth(true);
133     } else {
134       if (terminator.offer_termination()) {
135         break;
136       }
137     }
138   }
139   guarantee(pm->stacks_empty(), "stacks should be empty at this point");
140 }
141 
142 // Define before use
143 class PSIsAliveClosure: public BoolObjectClosure {
144 public:
145   bool do_object_b(oop p) {
146     return (!PSScavenge::is_obj_in_young(p)) || p->is_forwarded();
147   }
148 };
149 
150 PSIsAliveClosure PSScavenge::_is_alive_closure;
151 
152 class PSKeepAliveClosure: public OopClosure {
153 protected:
154   MutableSpace* _to_space;
155   PSPromotionManager* _promotion_manager;
156 
157 public:
158   PSKeepAliveClosure(PSPromotionManager* pm) : _promotion_manager(pm) {
159     ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
160     _to_space = heap->young_gen()->to_space();
161 
162     assert(_promotion_manager != nullptr, "Sanity");
163   }
164 
165   template <class T> void do_oop_work(T* p) {
166 #ifdef ASSERT
167     // Referent must be non-null and in from-space
168     oop obj = RawAccess<IS_NOT_NULL>::oop_load(p);
169     assert(oopDesc::is_oop(obj), "referent must be an oop");
170     assert(PSScavenge::is_obj_in_young(obj), "must be in young-gen");
171     assert(!PSScavenge::is_obj_in_to_space(obj), "must be in from-space");
172 #endif
173 
174     _promotion_manager->copy_and_push_safe_barrier</*promote_immediately=*/false>(p);
175   }
176   virtual void do_oop(oop* p)       { PSKeepAliveClosure::do_oop_work(p); }
177   virtual void do_oop(narrowOop* p) { PSKeepAliveClosure::do_oop_work(p); }
178 };
179 
180 class PSEvacuateFollowersClosure: public VoidClosure {
181  private:
182   PSPromotionManager* _promotion_manager;
183   TaskTerminator* _terminator;
184   uint _worker_id;
185 
186  public:
187   PSEvacuateFollowersClosure(PSPromotionManager* pm, TaskTerminator* terminator, uint worker_id)
188     : _promotion_manager(pm), _terminator(terminator), _worker_id(worker_id) {}
189 
190   virtual void do_void() {
191     assert(_promotion_manager != nullptr, "Sanity");
192     _promotion_manager->drain_stacks(true);
193     guarantee(_promotion_manager->stacks_empty(),
194               "stacks should be empty at this point");
195 
196     if (_terminator != nullptr) {
197       steal_work(*_terminator, _worker_id);
198     }
199   }
200 };
201 
202 class ParallelScavengeRefProcProxyTask : public RefProcProxyTask {
203   TaskTerminator _terminator;
204 
205 public:
206   ParallelScavengeRefProcProxyTask(uint max_workers)
207     : RefProcProxyTask("ParallelScavengeRefProcProxyTask", max_workers),
208       _terminator(max_workers, UseCompactObjectHeaders ? ParCompactionManagerNew::marking_stacks() : ParCompactionManager::marking_stacks()) {}
209 
210   void work(uint worker_id) override {
211     assert(worker_id < _max_workers, "sanity");
212     PSPromotionManager* promotion_manager = (_tm == RefProcThreadModel::Single) ? PSPromotionManager::vm_thread_promotion_manager() : PSPromotionManager::gc_thread_promotion_manager(worker_id);
213     PSIsAliveClosure is_alive;
214     PSKeepAliveClosure keep_alive(promotion_manager);
215     BarrierEnqueueDiscoveredFieldClosure enqueue;
216     PSEvacuateFollowersClosure complete_gc(promotion_manager, (_marks_oops_alive && _tm == RefProcThreadModel::Multi) ? &_terminator : nullptr, worker_id);;
217     _rp_task->rp_work(worker_id, &is_alive, &keep_alive, &enqueue, &complete_gc);
218   }
219 
220   void prepare_run_task_hook() override {
221     _terminator.reset_for_reuse(_queue_count);
222   }
223 };
224 
225 class PSThreadRootsTaskClosure : public ThreadClosure {
226   uint _worker_id;
227 public:
228   PSThreadRootsTaskClosure(uint worker_id) : _worker_id(worker_id) { }
229   virtual void do_thread(Thread* thread) {
230     assert(ParallelScavengeHeap::heap()->is_stw_gc_active(), "called outside gc");
231 
232     PSPromotionManager* pm = PSPromotionManager::gc_thread_promotion_manager(_worker_id);
233     PSScavengeRootsClosure roots_closure(pm);
234     MarkingNMethodClosure roots_in_nmethods(&roots_closure, NMethodToOopClosure::FixRelocations, false /* keepalive nmethods */);
235 
236     thread->oops_do(&roots_closure, &roots_in_nmethods);
237 
238     // Do the real work
239     pm->drain_stacks(false);
240   }
241 };
242 
243 class ScavengeRootsTask : public WorkerTask {
244   StrongRootsScope _strong_roots_scope; // needed for Threads::possibly_parallel_threads_do
245   OopStorageSetStrongParState<false /* concurrent */, false /* is_const */> _oop_storage_strong_par_state;
246   SequentialSubTasksDone _subtasks;
247   PSOldGen* _old_gen;
248   HeapWord* _gen_top;
249   uint _active_workers;
250   bool _is_old_gen_empty;
251   TaskTerminator _terminator;
252 
253 public:
254   ScavengeRootsTask(PSOldGen* old_gen,
255                     uint active_workers) :
256     WorkerTask("ScavengeRootsTask"),
257     _strong_roots_scope(active_workers),
258     _subtasks(ParallelRootType::sentinel),
259     _old_gen(old_gen),
260     _gen_top(old_gen->object_space()->top()),
261     _active_workers(active_workers),
262     _is_old_gen_empty(old_gen->object_space()->is_empty()),
263     _terminator(active_workers, PSPromotionManager::vm_thread_promotion_manager()->stack_array_depth()) {
264     if (!_is_old_gen_empty) {
265       PSCardTable* card_table = ParallelScavengeHeap::heap()->card_table();
266       card_table->pre_scavenge(active_workers);
267     }
268   }
269 
270   virtual void work(uint worker_id) {
271     assert(worker_id < _active_workers, "Sanity");
272     ResourceMark rm;
273 
274     if (!_is_old_gen_empty) {
275       // There are only old-to-young pointers if there are objects
276       // in the old gen.
277       {
278         PSPromotionManager* pm = PSPromotionManager::gc_thread_promotion_manager(worker_id);
279         PSCardTable* card_table = ParallelScavengeHeap::heap()->card_table();
280 
281         // The top of the old gen changes during scavenge when objects are promoted.
282         card_table->scavenge_contents_parallel(_old_gen->start_array(),
283                                                _old_gen->object_space()->bottom(),
284                                                _gen_top,
285                                                pm,
286                                                worker_id,
287                                                _active_workers);
288 
289         // Do the real work
290         pm->drain_stacks(false);
291       }
292     }
293 
294     for (uint root_type = 0; _subtasks.try_claim_task(root_type); /* empty */ ) {
295       scavenge_roots_work(static_cast<ParallelRootType::Value>(root_type), worker_id);
296     }
297 
298     PSThreadRootsTaskClosure closure(worker_id);
299     Threads::possibly_parallel_threads_do(true /* is_par */, &closure);
300 
301     // Scavenge OopStorages
302     {
303       PSPromotionManager* pm = PSPromotionManager::gc_thread_promotion_manager(worker_id);
304       PSScavengeRootsClosure closure(pm);
305       _oop_storage_strong_par_state.oops_do(&closure);
306       // Do the real work
307       pm->drain_stacks(false);
308     }
309 
310     // If active_workers can exceed 1, add a steal_work().
311     // PSPromotionManager::drain_stacks_depth() does not fully drain its
312     // stacks and expects a steal_work() to complete the draining if
313     // ParallelGCThreads is > 1.
314 
315     if (_active_workers > 1) {
316       steal_work(_terminator, worker_id);
317     }
318   }
319 };
320 
321 bool PSScavenge::invoke(bool clear_soft_refs) {
322   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
323   assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
324 
325   // Check for potential problems.
326   if (!should_attempt_scavenge()) {
327     return false;
328   }
329 
330   IsSTWGCActiveMark mark;
331 
332   _gc_timer.register_gc_start();
333 
334   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
335   GCCause::Cause gc_cause = heap->gc_cause();
336 
337   SvcGCMarker sgcm(SvcGCMarker::MINOR);
338   GCIdMark gc_id_mark;
339   _gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start());
340 
341   bool promotion_failure_occurred = false;
342 
343   PSYoungGen* young_gen = heap->young_gen();
344   PSOldGen* old_gen = heap->old_gen();
345   PSAdaptiveSizePolicy* size_policy = heap->size_policy();
346 
347   assert(young_gen->to_space()->is_empty(),
348          "Attempt to scavenge with live objects in to_space");
349 
350   heap->increment_total_collections();
351 
352   if (AdaptiveSizePolicy::should_update_eden_stats(gc_cause)) {
353     // Gather the feedback data for eden occupancy.
354     young_gen->eden_space()->accumulate_statistics();
355   }
356 
357   heap->print_heap_before_gc();
358   heap->trace_heap_before_gc(&_gc_tracer);
359 
360   assert(!NeverTenure || _tenuring_threshold == markWord::max_age + 1, "Sanity");
361   assert(!AlwaysTenure || _tenuring_threshold == 0, "Sanity");
362 
363   // Fill in TLABs
364   heap->ensure_parsability(true);  // retire TLABs
365 
366   if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
367     Universe::verify("Before GC");
368   }
369 
370   {
371     ResourceMark rm;
372 
373     GCTraceCPUTime tcpu(&_gc_tracer);
374     GCTraceTime(Info, gc) tm("Pause Young", nullptr, gc_cause, true);
375     TraceCollectorStats tcs(counters());
376     TraceMemoryManagerStats tms(heap->young_gc_manager(), gc_cause, "end of minor GC");
377 
378     if (log_is_enabled(Debug, gc, heap, exit)) {
379       accumulated_time()->start();
380     }
381 
382     // Let the size policy know we're starting
383     size_policy->minor_collection_begin();
384 
385 #if COMPILER2_OR_JVMCI
386     DerivedPointerTable::clear();
387 #endif
388 
389     reference_processor()->start_discovery(clear_soft_refs);
390 
391     const PreGenGCValues pre_gc_values = heap->get_pre_gc_values();
392 
393     // Reset our survivor overflow.
394     set_survivor_overflow(false);
395 
396     const uint active_workers =
397       WorkerPolicy::calc_active_workers(ParallelScavengeHeap::heap()->workers().max_workers(),
398                                         ParallelScavengeHeap::heap()->workers().active_workers(),
399                                         Threads::number_of_non_daemon_threads());
400     ParallelScavengeHeap::heap()->workers().set_active_workers(active_workers);
401 
402     PSPromotionManager::pre_scavenge();
403 
404     {
405       GCTraceTime(Debug, gc, phases) tm("Scavenge", &_gc_timer);
406 
407       ScavengeRootsTask task(old_gen, active_workers);
408       ParallelScavengeHeap::heap()->workers().run_task(&task);
409     }
410 
411     // Process reference objects discovered during scavenge
412     {
413       GCTraceTime(Debug, gc, phases) tm("Reference Processing", &_gc_timer);
414 
415       reference_processor()->set_active_mt_degree(active_workers);
416       ReferenceProcessorStats stats;
417       ReferenceProcessorPhaseTimes pt(&_gc_timer, reference_processor()->max_num_queues());
418 
419       ParallelScavengeRefProcProxyTask task(reference_processor()->max_num_queues());
420       stats = reference_processor()->process_discovered_references(task, pt);
421 
422       _gc_tracer.report_gc_reference_stats(stats);
423       pt.print_all_references();
424     }
425 
426     {
427       GCTraceTime(Debug, gc, phases) tm("Weak Processing", &_gc_timer);
428       PSAdjustWeakRootsClosure root_closure;
429       WeakProcessor::weak_oops_do(&ParallelScavengeHeap::heap()->workers(), &_is_alive_closure, &root_closure, 1);
430     }
431 
432     // Finally, flush the promotion_manager's labs, and deallocate its stacks.
433     promotion_failure_occurred = PSPromotionManager::post_scavenge(_gc_tracer);
434     if (promotion_failure_occurred) {
435       clean_up_failed_promotion();
436       log_info(gc, promotion)("Promotion failed");
437     }
438 
439     _gc_tracer.report_tenuring_threshold(tenuring_threshold());
440 
441     // Let the size policy know we're done.  Note that we count promotion
442     // failure cleanup time as part of the collection (otherwise, we're
443     // implicitly saying it's mutator time).
444     size_policy->minor_collection_end(gc_cause);
445 
446     if (!promotion_failure_occurred) {
447       // Swap the survivor spaces.
448       young_gen->eden_space()->clear(SpaceDecorator::Mangle);
449       young_gen->from_space()->clear(SpaceDecorator::Mangle);
450       young_gen->swap_spaces();
451 
452       size_t survived = young_gen->from_space()->used_in_bytes();
453       size_t promoted = old_gen->used_in_bytes() - pre_gc_values.old_gen_used();
454       size_policy->update_averages(_survivor_overflow, survived, promoted);
455 
456       // A successful scavenge should restart the GC time limit count which is
457       // for full GC's.
458       size_policy->reset_gc_overhead_limit_count();
459       if (UseAdaptiveSizePolicy) {
460         // Calculate the new survivor size and tenuring threshold
461 
462         log_debug(gc, ergo)("AdaptiveSizeStart:  collection: %d ", heap->total_collections());
463         log_trace(gc, ergo)("old_gen_capacity: %zu young_gen_capacity: %zu",
464                             old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes());
465 
466         if (UsePerfData) {
467           PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
468           counters->update_old_eden_size(
469             size_policy->calculated_eden_size_in_bytes());
470           counters->update_old_promo_size(
471             size_policy->calculated_promo_size_in_bytes());
472           counters->update_old_capacity(old_gen->capacity_in_bytes());
473           counters->update_young_capacity(young_gen->capacity_in_bytes());
474           counters->update_survived(survived);
475           counters->update_promoted(promoted);
476           counters->update_survivor_overflowed(_survivor_overflow);
477         }
478 
479         size_t max_young_size = young_gen->max_gen_size();
480 
481         // Deciding a free ratio in the young generation is tricky, so if
482         // MinHeapFreeRatio or MaxHeapFreeRatio are in use (implicating
483         // that the old generation size may have been limited because of them) we
484         // should then limit our young generation size using NewRatio to have it
485         // follow the old generation size.
486         if (MinHeapFreeRatio != 0 || MaxHeapFreeRatio != 100) {
487           max_young_size = MIN2(old_gen->capacity_in_bytes() / NewRatio,
488                                 young_gen->max_gen_size());
489         }
490 
491         size_t survivor_limit =
492           size_policy->max_survivor_size(max_young_size);
493         _tenuring_threshold =
494           size_policy->compute_survivor_space_size_and_threshold(_survivor_overflow,
495                                                                  _tenuring_threshold,
496                                                                  survivor_limit);
497 
498         log_debug(gc, age)("Desired survivor size %zu bytes, new threshold %u (max threshold %u)",
499                            size_policy->calculated_survivor_size_in_bytes(),
500                            _tenuring_threshold, MaxTenuringThreshold);
501 
502         if (UsePerfData) {
503           PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
504           counters->update_tenuring_threshold(_tenuring_threshold);
505           counters->update_survivor_size_counters();
506         }
507 
508         // Do call at minor collections?
509         // Don't check if the size_policy is ready at this
510         // level.  Let the size_policy check that internally.
511         if (UseAdaptiveGenerationSizePolicyAtMinorCollection &&
512             AdaptiveSizePolicy::should_update_eden_stats(gc_cause)) {
513           // Calculate optimal free space amounts
514           assert(young_gen->max_gen_size() >
515                  young_gen->from_space()->capacity_in_bytes() +
516                  young_gen->to_space()->capacity_in_bytes(),
517                  "Sizes of space in young gen are out-of-bounds");
518 
519           size_t young_live = young_gen->used_in_bytes();
520           size_t eden_live = young_gen->eden_space()->used_in_bytes();
521           size_t cur_eden = young_gen->eden_space()->capacity_in_bytes();
522           size_t max_old_gen_size = old_gen->max_gen_size();
523           size_t max_eden_size = max_young_size -
524                                  young_gen->from_space()->capacity_in_bytes() -
525                                  young_gen->to_space()->capacity_in_bytes();
526 
527           // Used for diagnostics
528           size_policy->clear_generation_free_space_flags();
529 
530           size_policy->compute_eden_space_size(young_live,
531                                                eden_live,
532                                                cur_eden,
533                                                max_eden_size,
534                                                false /* not full gc*/);
535 
536           size_policy->check_gc_overhead_limit(eden_live,
537                                                max_old_gen_size,
538                                                max_eden_size,
539                                                false /* not full gc*/,
540                                                gc_cause,
541                                                heap->soft_ref_policy());
542 
543           size_policy->decay_supplemental_growth(false /* not full gc*/);
544         }
545         // Resize the young generation at every collection
546         // even if new sizes have not been calculated.  This is
547         // to allow resizes that may have been inhibited by the
548         // relative location of the "to" and "from" spaces.
549 
550         // Resizing the old gen at young collections can cause increases
551         // that don't feed back to the generation sizing policy until
552         // a full collection.  Don't resize the old gen here.
553 
554         heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(),
555                                size_policy->calculated_survivor_size_in_bytes());
556 
557         log_debug(gc, ergo)("AdaptiveSizeStop: collection: %d ", heap->total_collections());
558       }
559 
560       // Update the structure of the eden. With NUMA-eden CPU hotplugging or offlining can
561       // cause the change of the heap layout. Make sure eden is reshaped if that's the case.
562       // Also update() will case adaptive NUMA chunk resizing.
563       assert(young_gen->eden_space()->is_empty(), "eden space should be empty now");
564       young_gen->eden_space()->update();
565 
566       heap->gc_policy_counters()->update_counters();
567 
568       heap->resize_all_tlabs();
569 
570       assert(young_gen->to_space()->is_empty(), "to space should be empty now");
571     }
572 
573 #if COMPILER2_OR_JVMCI
574     DerivedPointerTable::update_pointers();
575 #endif
576 
577     if (log_is_enabled(Debug, gc, heap, exit)) {
578       accumulated_time()->stop();
579     }
580 
581     heap->print_heap_change(pre_gc_values);
582 
583     // Track memory usage and detect low memory
584     MemoryService::track_memory_usage();
585     heap->update_counters();
586   }
587 
588   if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
589     Universe::verify("After GC");
590   }
591 
592   heap->print_heap_after_gc();
593   heap->trace_heap_after_gc(&_gc_tracer);
594 
595   AdaptiveSizePolicyOutput::print(size_policy, heap->total_collections());
596 
597   _gc_timer.register_gc_end();
598 
599   _gc_tracer.report_gc_end(_gc_timer.gc_end(), _gc_timer.time_partitions());
600 
601   return !promotion_failure_occurred;
602 }
603 
604 void PSScavenge::clean_up_failed_promotion() {
605   PSPromotionManager::restore_preserved_marks();
606 
607   // Reset the PromotionFailureALot counters.
608   NOT_PRODUCT(ParallelScavengeHeap::heap()->reset_promotion_should_fail();)
609 }
610 
611 bool PSScavenge::should_attempt_scavenge() {
612   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
613 
614   PSYoungGen* young_gen = heap->young_gen();
615   PSOldGen* old_gen = heap->old_gen();
616 
617   if (!young_gen->to_space()->is_empty()) {
618     // To-space is not empty; should run full-gc instead.
619     return false;
620   }
621 
622   // Test to see if the scavenge will likely fail.
623   PSAdaptiveSizePolicy* policy = heap->size_policy();
624 
625   size_t avg_promoted = (size_t) policy->padded_average_promoted_in_bytes();
626   size_t promotion_estimate = MIN2(avg_promoted, young_gen->used_in_bytes());
627   // Total free size after possible old gen expansion
628   size_t free_in_old_gen = old_gen->max_gen_size() - old_gen->used_in_bytes();
629   bool result = promotion_estimate < free_in_old_gen;
630 
631   log_trace(ergo)("%s scavenge: average_promoted %zu padded_average_promoted %zu free in old gen %zu",
632                 result ? "Do" : "Skip", (size_t) policy->average_promoted_in_bytes(),
633                 (size_t) policy->padded_average_promoted_in_bytes(),
634                 free_in_old_gen);
635 
636   return result;
637 }
638 
639 // Adaptive size policy support.
640 void PSScavenge::set_young_generation_boundary(HeapWord* v) {
641   _young_generation_boundary = v;
642   if (UseCompressedOops) {
643     _young_generation_boundary_compressed = (uintptr_t)CompressedOops::encode(cast_to_oop(v));
644   }
645 }
646 
647 void PSScavenge::initialize() {
648   // Arguments must have been parsed
649 
650   if (AlwaysTenure || NeverTenure) {
651     assert(MaxTenuringThreshold == 0 || MaxTenuringThreshold == markWord::max_age + 1,
652            "MaxTenuringThreshold should be 0 or markWord::max_age + 1, but is %d", (int) MaxTenuringThreshold);
653     _tenuring_threshold = MaxTenuringThreshold;
654   } else {
655     // We want to smooth out our startup times for the AdaptiveSizePolicy
656     _tenuring_threshold = (UseAdaptiveSizePolicy) ? InitialTenuringThreshold :
657                                                     MaxTenuringThreshold;
658   }
659 
660   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
661   PSYoungGen* young_gen = heap->young_gen();
662   PSOldGen* old_gen = heap->old_gen();
663 
664   // Set boundary between young_gen and old_gen
665   assert(old_gen->reserved().end() <= young_gen->eden_space()->bottom(),
666          "old above young");
667   set_young_generation_boundary(young_gen->eden_space()->bottom());
668 
669   // Initialize ref handling object for scavenging.
670   _span_based_discoverer.set_span(young_gen->reserved());
671   _ref_processor =
672     new ReferenceProcessor(&_span_based_discoverer,
673                            ParallelGCThreads,          // mt processing degree
674                            ParallelGCThreads,          // mt discovery degree
675                            false,                      // concurrent_discovery
676                            &_is_alive_closure);        // header provides liveness info
677 
678   // Cache the cardtable
679   _card_table = heap->card_table();
680 
681   _counters = new CollectorCounters("Parallel young collection pauses", 0);
682 }