1 /*
2 * Copyright (c) 2002, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "classfile/classLoaderDataGraph.hpp"
26 #include "classfile/stringTable.hpp"
27 #include "code/codeCache.hpp"
28 #include "compiler/oopMap.hpp"
29 #include "gc/parallel/parallelScavengeHeap.hpp"
30 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
31 #include "gc/parallel/psClosure.inline.hpp"
32 #include "gc/parallel/psCompactionManager.hpp"
33 #include "gc/parallel/psParallelCompact.inline.hpp"
34 #include "gc/parallel/psPromotionManager.inline.hpp"
35 #include "gc/parallel/psRootType.hpp"
36 #include "gc/parallel/psScavenge.inline.hpp"
37 #include "gc/shared/gcCause.hpp"
38 #include "gc/shared/gcHeapSummary.hpp"
39 #include "gc/shared/gcId.hpp"
40 #include "gc/shared/gcLocker.hpp"
41 #include "gc/shared/gcTimer.hpp"
42 #include "gc/shared/gcTrace.hpp"
43 #include "gc/shared/gcTraceTime.inline.hpp"
44 #include "gc/shared/gcVMOperations.hpp"
45 #include "gc/shared/isGCActiveMark.hpp"
46 #include "gc/shared/oopStorage.inline.hpp"
47 #include "gc/shared/oopStorageParState.inline.hpp"
48 #include "gc/shared/oopStorageSetParState.inline.hpp"
49 #include "gc/shared/referencePolicy.hpp"
50 #include "gc/shared/referenceProcessor.hpp"
51 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
52 #include "gc/shared/scavengableNMethods.hpp"
53 #include "gc/shared/spaceDecorator.hpp"
54 #include "gc/shared/strongRootsScope.hpp"
55 #include "gc/shared/taskTerminator.hpp"
56 #include "gc/shared/weakProcessor.inline.hpp"
57 #include "gc/shared/workerPolicy.hpp"
58 #include "gc/shared/workerThread.hpp"
59 #include "gc/shared/workerUtils.hpp"
60 #include "logging/log.hpp"
61 #include "memory/iterator.hpp"
62 #include "memory/resourceArea.hpp"
63 #include "memory/universe.hpp"
64 #include "oops/access.inline.hpp"
65 #include "oops/compressedOops.inline.hpp"
66 #include "oops/oop.inline.hpp"
67 #include "runtime/handles.inline.hpp"
68 #include "runtime/threads.hpp"
69 #include "runtime/vmOperations.hpp"
70 #include "runtime/vmThread.hpp"
71 #include "services/memoryService.hpp"
72 #include "utilities/stack.inline.hpp"
73
74 SpanSubjectToDiscoveryClosure PSScavenge::_span_based_discoverer;
75 ReferenceProcessor* PSScavenge::_ref_processor = nullptr;
76 PSCardTable* PSScavenge::_card_table = nullptr;
77 bool PSScavenge::_survivor_overflow = false;
78 uint PSScavenge::_tenuring_threshold = 0;
79 HeapWord* PSScavenge::_young_generation_boundary = nullptr;
80 uintptr_t PSScavenge::_young_generation_boundary_compressed = 0;
81 elapsedTimer PSScavenge::_accumulated_time;
82 STWGCTimer PSScavenge::_gc_timer;
83 ParallelScavengeTracer PSScavenge::_gc_tracer;
84 CollectorCounters* PSScavenge::_counters = nullptr;
85
86 static void scavenge_roots_work(ParallelRootType::Value root_type, uint worker_id) {
87 assert(ParallelScavengeHeap::heap()->is_stw_gc_active(), "called outside gc");
88
89 PSPromotionManager* pm = PSPromotionManager::gc_thread_promotion_manager(worker_id);
90 PSPromoteRootsClosure roots_to_old_closure(pm);
91
92 switch (root_type) {
93 case ParallelRootType::class_loader_data:
94 {
95 PSScavengeCLDClosure cld_closure(pm);
96 ClassLoaderDataGraph::cld_do(&cld_closure);
97 }
98 break;
99
100 case ParallelRootType::code_cache:
101 {
102 MarkingNMethodClosure code_closure(&roots_to_old_closure, NMethodToOopClosure::FixRelocations, false /* keepalive nmethods */);
103 ScavengableNMethods::nmethods_do(&code_closure);
104 }
105 break;
106
107 case ParallelRootType::sentinel:
108 DEBUG_ONLY(default:) // DEBUG_ONLY hack will create compile error on release builds (-Wswitch) and runtime check on debug builds
109 fatal("Bad enumeration value: %u", root_type);
110 break;
111 }
112
113 // Do the real work
114 pm->drain_stacks(false);
115 }
116
117 static void steal_work(TaskTerminator& terminator, uint worker_id) {
118 assert(ParallelScavengeHeap::heap()->is_stw_gc_active(), "called outside gc");
119
120 PSPromotionManager* pm =
121 PSPromotionManager::gc_thread_promotion_manager(worker_id);
122 pm->drain_stacks(true);
123 guarantee(pm->stacks_empty(),
124 "stacks should be empty at this point");
125
126 while (true) {
127 ScannerTask task;
128 if (PSPromotionManager::steal_depth(worker_id, task)) {
129 pm->process_popped_location_depth(task, true);
130 pm->drain_stacks_depth(true);
131 } else {
132 if (terminator.offer_termination()) {
133 break;
134 }
135 }
136 }
137 guarantee(pm->stacks_empty(), "stacks should be empty at this point");
138 }
139
140 // Define before use
141 class PSIsAliveClosure: public BoolObjectClosure {
142 public:
143 bool do_object_b(oop p) {
144 return (!PSScavenge::is_obj_in_young(p)) || p->is_forwarded();
145 }
146 };
147
148 PSIsAliveClosure PSScavenge::_is_alive_closure;
149
150 class PSKeepAliveClosure: public OopClosure {
151 protected:
152 MutableSpace* _to_space;
153 PSPromotionManager* _promotion_manager;
154
155 public:
156 PSKeepAliveClosure(PSPromotionManager* pm) : _promotion_manager(pm) {
157 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
158 _to_space = heap->young_gen()->to_space();
159
160 assert(_promotion_manager != nullptr, "Sanity");
161 }
162
163 template <class T> void do_oop_work(T* p) {
164 #ifdef ASSERT
165 // Referent must be non-null and in from-space
166 oop obj = RawAccess<IS_NOT_NULL>::oop_load(p);
167 assert(oopDesc::is_oop(obj), "referent must be an oop");
168 assert(PSScavenge::is_obj_in_young(obj), "must be in young-gen");
169 assert(!PSScavenge::is_obj_in_to_space(obj), "must be in from-space");
170 #endif
171
172 _promotion_manager->copy_and_push_safe_barrier</*promote_immediately=*/false>(p);
173 }
174 virtual void do_oop(oop* p) { PSKeepAliveClosure::do_oop_work(p); }
175 virtual void do_oop(narrowOop* p) { PSKeepAliveClosure::do_oop_work(p); }
176 };
177
178 class PSEvacuateFollowersClosure: public VoidClosure {
179 private:
180 PSPromotionManager* _promotion_manager;
181 TaskTerminator* _terminator;
182 uint _worker_id;
183
184 public:
185 PSEvacuateFollowersClosure(PSPromotionManager* pm, TaskTerminator* terminator, uint worker_id)
186 : _promotion_manager(pm), _terminator(terminator), _worker_id(worker_id) {}
187
188 virtual void do_void() {
189 assert(_promotion_manager != nullptr, "Sanity");
190 _promotion_manager->drain_stacks(true);
191 guarantee(_promotion_manager->stacks_empty(),
192 "stacks should be empty at this point");
193
194 if (_terminator != nullptr) {
195 steal_work(*_terminator, _worker_id);
196 }
197 }
198 };
199
200 class ParallelScavengeRefProcProxyTask : public RefProcProxyTask {
201 TaskTerminator _terminator;
202
203 public:
204 ParallelScavengeRefProcProxyTask(uint max_workers)
205 : RefProcProxyTask("ParallelScavengeRefProcProxyTask", max_workers),
206 _terminator(max_workers, ParCompactionManager::marking_stacks()) {}
207
208 void work(uint worker_id) override {
209 assert(worker_id < _max_workers, "sanity");
210 PSPromotionManager* promotion_manager = (_tm == RefProcThreadModel::Single) ? PSPromotionManager::vm_thread_promotion_manager() : PSPromotionManager::gc_thread_promotion_manager(worker_id);
211 PSIsAliveClosure is_alive;
212 PSKeepAliveClosure keep_alive(promotion_manager);
213 BarrierEnqueueDiscoveredFieldClosure enqueue;
214 PSEvacuateFollowersClosure complete_gc(promotion_manager, (_marks_oops_alive && _tm == RefProcThreadModel::Multi) ? &_terminator : nullptr, worker_id);;
215 _rp_task->rp_work(worker_id, &is_alive, &keep_alive, &enqueue, &complete_gc);
216 }
217
218 void prepare_run_task_hook() override {
219 _terminator.reset_for_reuse(_queue_count);
220 }
221 };
222
223 class PSThreadRootsTaskClosure : public ThreadClosure {
224 uint _worker_id;
225 public:
226 PSThreadRootsTaskClosure(uint worker_id) : _worker_id(worker_id) { }
227 virtual void do_thread(Thread* thread) {
228 assert(ParallelScavengeHeap::heap()->is_stw_gc_active(), "called outside gc");
229
230 PSPromotionManager* pm = PSPromotionManager::gc_thread_promotion_manager(_worker_id);
231 PSScavengeRootsClosure roots_closure(pm);
232 MarkingNMethodClosure roots_in_nmethods(&roots_closure, NMethodToOopClosure::FixRelocations, false /* keepalive nmethods */);
233
234 thread->oops_do(&roots_closure, &roots_in_nmethods);
235
236 // Do the real work
237 pm->drain_stacks(false);
238 }
239 };
240
241 class ScavengeRootsTask : public WorkerTask {
242 StrongRootsScope _strong_roots_scope; // needed for Threads::possibly_parallel_threads_do
243 OopStorageSetStrongParState<false /* concurrent */, false /* is_const */> _oop_storage_strong_par_state;
244 SequentialSubTasksDone _subtasks;
245 PSOldGen* _old_gen;
246 HeapWord* _gen_top;
247 uint _active_workers;
248 bool _is_old_gen_empty;
249 TaskTerminator _terminator;
250
251 public:
252 ScavengeRootsTask(PSOldGen* old_gen,
253 uint active_workers) :
254 WorkerTask("ScavengeRootsTask"),
255 _strong_roots_scope(active_workers),
256 _subtasks(ParallelRootType::sentinel),
257 _old_gen(old_gen),
258 _gen_top(old_gen->object_space()->top()),
259 _active_workers(active_workers),
260 _is_old_gen_empty(old_gen->object_space()->is_empty()),
261 _terminator(active_workers, PSPromotionManager::vm_thread_promotion_manager()->stack_array_depth()) {
262 if (!_is_old_gen_empty) {
263 PSCardTable* card_table = ParallelScavengeHeap::heap()->card_table();
264 card_table->pre_scavenge(active_workers);
265 }
266 }
267
268 virtual void work(uint worker_id) {
269 assert(worker_id < _active_workers, "Sanity");
270 ResourceMark rm;
271
272 if (!_is_old_gen_empty) {
273 // There are only old-to-young pointers if there are objects
274 // in the old gen.
275 {
276 PSPromotionManager* pm = PSPromotionManager::gc_thread_promotion_manager(worker_id);
277 PSCardTable* card_table = ParallelScavengeHeap::heap()->card_table();
278
279 // The top of the old gen changes during scavenge when objects are promoted.
280 card_table->scavenge_contents_parallel(_old_gen->start_array(),
281 _old_gen->object_space()->bottom(),
282 _gen_top,
283 pm,
284 worker_id,
285 _active_workers);
286
287 // Do the real work
288 pm->drain_stacks(false);
289 }
290 }
291
292 for (uint root_type = 0; _subtasks.try_claim_task(root_type); /* empty */ ) {
293 scavenge_roots_work(static_cast<ParallelRootType::Value>(root_type), worker_id);
294 }
295
296 PSThreadRootsTaskClosure closure(worker_id);
297 Threads::possibly_parallel_threads_do(_active_workers > 1 /* is_par */, &closure);
298
299 // Scavenge OopStorages
300 {
301 PSPromotionManager* pm = PSPromotionManager::gc_thread_promotion_manager(worker_id);
302 PSScavengeRootsClosure closure(pm);
303 _oop_storage_strong_par_state.oops_do(&closure);
304 // Do the real work
305 pm->drain_stacks(false);
306 }
307
308 // If active_workers can exceed 1, add a steal_work().
309 // PSPromotionManager::drain_stacks_depth() does not fully drain its
310 // stacks and expects a steal_work() to complete the draining if
311 // ParallelGCThreads is > 1.
312
313 if (_active_workers > 1) {
314 steal_work(_terminator, worker_id);
315 }
316 }
317 };
318
319 bool PSScavenge::invoke(bool clear_soft_refs) {
320 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
321 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
322
323 // Check for potential problems.
324 if (!should_attempt_scavenge()) {
325 return false;
326 }
327
328 IsSTWGCActiveMark mark;
329
330 _gc_timer.register_gc_start();
331
332 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
333 GCCause::Cause gc_cause = heap->gc_cause();
334
335 SvcGCMarker sgcm(SvcGCMarker::MINOR);
336 GCIdMark gc_id_mark;
337 _gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start());
338
339 bool promotion_failure_occurred = false;
340
341 PSYoungGen* young_gen = heap->young_gen();
342 PSOldGen* old_gen = heap->old_gen();
343 PSAdaptiveSizePolicy* size_policy = heap->size_policy();
344
345 assert(young_gen->to_space()->is_empty(),
346 "Attempt to scavenge with live objects in to_space");
347
348 heap->increment_total_collections();
349
350 if (AdaptiveSizePolicy::should_update_eden_stats(gc_cause)) {
351 // Gather the feedback data for eden occupancy.
352 young_gen->eden_space()->accumulate_statistics();
353 }
354
355 heap->print_before_gc();
356 heap->trace_heap_before_gc(&_gc_tracer);
357
358 assert(!NeverTenure || _tenuring_threshold == markWord::max_age + 1, "Sanity");
359 assert(!AlwaysTenure || _tenuring_threshold == 0, "Sanity");
360
361 // Fill in TLABs
362 heap->ensure_parsability(true); // retire TLABs
363
364 if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
365 Universe::verify("Before GC");
366 }
367
368 {
369 ResourceMark rm;
370
371 GCTraceCPUTime tcpu(&_gc_tracer);
372 GCTraceTime(Info, gc) tm("Pause Young", nullptr, gc_cause, true);
373 TraceCollectorStats tcs(counters());
374 TraceMemoryManagerStats tms(heap->young_gc_manager(), gc_cause, "end of minor GC");
375
376 if (log_is_enabled(Debug, gc, heap, exit)) {
377 accumulated_time()->start();
378 }
379
380 // Let the size policy know we're starting
381 size_policy->minor_collection_begin();
382
383 #if COMPILER2_OR_JVMCI
384 DerivedPointerTable::clear();
385 #endif
386
387 reference_processor()->start_discovery(clear_soft_refs);
388
389 const PreGenGCValues pre_gc_values = heap->get_pre_gc_values();
390
391 // Reset our survivor overflow.
392 set_survivor_overflow(false);
393
394 const uint active_workers =
395 WorkerPolicy::calc_active_workers(ParallelScavengeHeap::heap()->workers().max_workers(),
396 ParallelScavengeHeap::heap()->workers().active_workers(),
397 Threads::number_of_non_daemon_threads());
398 ParallelScavengeHeap::heap()->workers().set_active_workers(active_workers);
399
400 PSPromotionManager::pre_scavenge();
401
402 {
403 GCTraceTime(Debug, gc, phases) tm("Scavenge", &_gc_timer);
404
405 ScavengeRootsTask task(old_gen, active_workers);
406 ParallelScavengeHeap::heap()->workers().run_task(&task);
407 }
408
409 // Process reference objects discovered during scavenge
410 {
411 GCTraceTime(Debug, gc, phases) tm("Reference Processing", &_gc_timer);
412
413 ReferenceProcessorStats stats;
414 ReferenceProcessorPhaseTimes pt(&_gc_timer, reference_processor()->max_num_queues());
415
416 ParallelScavengeRefProcProxyTask task(reference_processor()->max_num_queues());
417 stats = reference_processor()->process_discovered_references(task, &ParallelScavengeHeap::heap()->workers(), pt);
418
419 _gc_tracer.report_gc_reference_stats(stats);
420 pt.print_all_references();
421 }
422
423 {
424 GCTraceTime(Debug, gc, phases) tm("Weak Processing", &_gc_timer);
425 PSAdjustWeakRootsClosure root_closure;
426 WeakProcessor::weak_oops_do(&ParallelScavengeHeap::heap()->workers(), &_is_alive_closure, &root_closure, 1);
427 }
428
429 // Finally, flush the promotion_manager's labs, and deallocate its stacks.
430 promotion_failure_occurred = PSPromotionManager::post_scavenge(_gc_tracer);
431 if (promotion_failure_occurred) {
432 clean_up_failed_promotion();
433 log_info(gc, promotion)("Promotion failed");
434 }
435
436 _gc_tracer.report_tenuring_threshold(tenuring_threshold());
437
438 // Let the size policy know we're done. Note that we count promotion
439 // failure cleanup time as part of the collection (otherwise, we're
440 // implicitly saying it's mutator time).
441 size_policy->minor_collection_end(gc_cause);
442
443 if (!promotion_failure_occurred) {
444 // Swap the survivor spaces.
445 young_gen->eden_space()->clear(SpaceDecorator::Mangle);
446 young_gen->from_space()->clear(SpaceDecorator::Mangle);
447 young_gen->swap_spaces();
448
449 size_t survived = young_gen->from_space()->used_in_bytes();
450 size_t promoted = old_gen->used_in_bytes() - pre_gc_values.old_gen_used();
451 size_policy->update_averages(_survivor_overflow, survived, promoted);
452
453 // A successful scavenge should restart the GC time limit count which is
454 // for full GC's.
455 size_policy->reset_gc_overhead_limit_count();
456 if (UseAdaptiveSizePolicy) {
457 // Calculate the new survivor size and tenuring threshold
458
459 log_debug(gc, ergo)("AdaptiveSizeStart: collection: %d ", heap->total_collections());
460 log_trace(gc, ergo)("old_gen_capacity: %zu young_gen_capacity: %zu",
461 old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes());
462
463 if (UsePerfData) {
464 PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
465 counters->update_old_eden_size(
466 size_policy->calculated_eden_size_in_bytes());
467 counters->update_old_promo_size(
468 size_policy->calculated_promo_size_in_bytes());
469 counters->update_old_capacity(old_gen->capacity_in_bytes());
470 counters->update_young_capacity(young_gen->capacity_in_bytes());
471 counters->update_survived(survived);
472 counters->update_promoted(promoted);
473 counters->update_survivor_overflowed(_survivor_overflow);
474 }
475
476 size_t max_young_size = young_gen->max_gen_size();
477
478 // Deciding a free ratio in the young generation is tricky, so if
479 // MinHeapFreeRatio or MaxHeapFreeRatio are in use (implicating
480 // that the old generation size may have been limited because of them) we
481 // should then limit our young generation size using NewRatio to have it
482 // follow the old generation size.
483 if (MinHeapFreeRatio != 0 || MaxHeapFreeRatio != 100) {
484 max_young_size = MIN2(old_gen->capacity_in_bytes() / NewRatio,
485 young_gen->max_gen_size());
486 }
487
488 size_t survivor_limit =
489 size_policy->max_survivor_size(max_young_size);
490 _tenuring_threshold =
491 size_policy->compute_survivor_space_size_and_threshold(_survivor_overflow,
492 _tenuring_threshold,
493 survivor_limit);
494
495 log_debug(gc, age)("Desired survivor size %zu bytes, new threshold %u (max threshold %u)",
496 size_policy->calculated_survivor_size_in_bytes(),
497 _tenuring_threshold, MaxTenuringThreshold);
498
499 if (UsePerfData) {
500 PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
501 counters->update_tenuring_threshold(_tenuring_threshold);
502 counters->update_survivor_size_counters();
503 }
504
505 // Do call at minor collections?
506 // Don't check if the size_policy is ready at this
507 // level. Let the size_policy check that internally.
508 if (UseAdaptiveGenerationSizePolicyAtMinorCollection &&
509 AdaptiveSizePolicy::should_update_eden_stats(gc_cause)) {
510 // Calculate optimal free space amounts
511 assert(young_gen->max_gen_size() >
512 young_gen->from_space()->capacity_in_bytes() +
513 young_gen->to_space()->capacity_in_bytes(),
514 "Sizes of space in young gen are out-of-bounds");
515
516 size_t young_live = young_gen->used_in_bytes();
517 size_t eden_live = young_gen->eden_space()->used_in_bytes();
518 size_t cur_eden = young_gen->eden_space()->capacity_in_bytes();
519 size_t max_old_gen_size = old_gen->max_gen_size();
520 size_t max_eden_size = max_young_size -
521 young_gen->from_space()->capacity_in_bytes() -
522 young_gen->to_space()->capacity_in_bytes();
523
524 // Used for diagnostics
525 size_policy->clear_generation_free_space_flags();
526
527 size_policy->compute_eden_space_size(young_live,
528 eden_live,
529 cur_eden,
530 max_eden_size,
531 false /* not full gc*/);
532
533 size_policy->check_gc_overhead_limit(eden_live,
534 max_old_gen_size,
535 max_eden_size,
536 false /* not full gc*/,
537 gc_cause,
538 heap->soft_ref_policy());
539
540 size_policy->decay_supplemental_growth(false /* not full gc*/);
541 }
542 // Resize the young generation at every collection
543 // even if new sizes have not been calculated. This is
544 // to allow resizes that may have been inhibited by the
545 // relative location of the "to" and "from" spaces.
546
547 // Resizing the old gen at young collections can cause increases
548 // that don't feed back to the generation sizing policy until
549 // a full collection. Don't resize the old gen here.
550
551 heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(),
552 size_policy->calculated_survivor_size_in_bytes());
553
554 log_debug(gc, ergo)("AdaptiveSizeStop: collection: %d ", heap->total_collections());
555 }
556
557 // Update the structure of the eden. With NUMA-eden CPU hotplugging or offlining can
558 // cause the change of the heap layout. Make sure eden is reshaped if that's the case.
559 // Also update() will case adaptive NUMA chunk resizing.
560 assert(young_gen->eden_space()->is_empty(), "eden space should be empty now");
561 young_gen->eden_space()->update();
562
563 heap->gc_policy_counters()->update_counters();
564
565 heap->resize_all_tlabs();
566
567 assert(young_gen->to_space()->is_empty(), "to space should be empty now");
568 }
569
570 #if COMPILER2_OR_JVMCI
571 DerivedPointerTable::update_pointers();
572 #endif
573
574 if (log_is_enabled(Debug, gc, heap, exit)) {
575 accumulated_time()->stop();
576 }
577
578 heap->print_heap_change(pre_gc_values);
579
580 // Track memory usage and detect low memory
581 MemoryService::track_memory_usage();
582 heap->update_counters();
583 }
584
585 if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
586 Universe::verify("After GC");
587 }
588
589 heap->print_after_gc();
590 heap->trace_heap_after_gc(&_gc_tracer);
591
592 AdaptiveSizePolicyOutput::print(size_policy, heap->total_collections());
593
594 _gc_timer.register_gc_end();
595
596 _gc_tracer.report_gc_end(_gc_timer.gc_end(), _gc_timer.time_partitions());
597
598 return !promotion_failure_occurred;
599 }
600
601 void PSScavenge::clean_up_failed_promotion() {
602 PSPromotionManager::restore_preserved_marks();
603
604 // Reset the PromotionFailureALot counters.
605 NOT_PRODUCT(ParallelScavengeHeap::heap()->reset_promotion_should_fail();)
606 }
607
608 bool PSScavenge::should_attempt_scavenge() {
609 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
610
611 PSYoungGen* young_gen = heap->young_gen();
612 PSOldGen* old_gen = heap->old_gen();
613
614 if (!young_gen->to_space()->is_empty()) {
615 // To-space is not empty; should run full-gc instead.
616 return false;
617 }
618
619 // Test to see if the scavenge will likely fail.
620 PSAdaptiveSizePolicy* policy = heap->size_policy();
621
622 size_t avg_promoted = (size_t) policy->padded_average_promoted_in_bytes();
623 size_t promotion_estimate = MIN2(avg_promoted, young_gen->used_in_bytes());
624 // Total free size after possible old gen expansion
625 size_t free_in_old_gen = old_gen->max_gen_size() - old_gen->used_in_bytes();
626 bool result = promotion_estimate < free_in_old_gen;
627
628 log_trace(ergo)("%s scavenge: average_promoted %zu padded_average_promoted %zu free in old gen %zu",
629 result ? "Do" : "Skip", (size_t) policy->average_promoted_in_bytes(),
630 (size_t) policy->padded_average_promoted_in_bytes(),
631 free_in_old_gen);
632
633 return result;
634 }
635
636 // Adaptive size policy support.
637 void PSScavenge::set_young_generation_boundary(HeapWord* v) {
638 _young_generation_boundary = v;
639 if (UseCompressedOops) {
640 _young_generation_boundary_compressed = (uintptr_t)CompressedOops::encode(cast_to_oop(v));
641 }
642 }
643
644 void PSScavenge::initialize() {
645 // Arguments must have been parsed
646
647 if (AlwaysTenure || NeverTenure) {
648 assert(MaxTenuringThreshold == 0 || MaxTenuringThreshold == markWord::max_age + 1,
649 "MaxTenuringThreshold should be 0 or markWord::max_age + 1, but is %d", (int) MaxTenuringThreshold);
650 _tenuring_threshold = MaxTenuringThreshold;
651 } else {
652 // We want to smooth out our startup times for the AdaptiveSizePolicy
653 _tenuring_threshold = (UseAdaptiveSizePolicy) ? InitialTenuringThreshold :
654 MaxTenuringThreshold;
655 }
656
657 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
658 PSYoungGen* young_gen = heap->young_gen();
659 PSOldGen* old_gen = heap->old_gen();
660
661 // Set boundary between young_gen and old_gen
662 assert(old_gen->reserved().end() <= young_gen->eden_space()->bottom(),
663 "old above young");
664 set_young_generation_boundary(young_gen->eden_space()->bottom());
665
666 // Initialize ref handling object for scavenging.
667 _span_based_discoverer.set_span(young_gen->reserved());
668 _ref_processor =
669 new ReferenceProcessor(&_span_based_discoverer,
670 ParallelGCThreads, // mt processing degree
671 ParallelGCThreads, // mt discovery degree
672 false, // concurrent_discovery
673 &_is_alive_closure); // header provides liveness info
674
675 // Cache the cardtable
676 _card_table = heap->card_table();
677
678 _counters = new CollectorCounters("Parallel young collection pauses", 0);
679 }