1 /*
2 * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2021, 2022, Red Hat, Inc. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27
28 #include "gc/shared/barrierSetNMethod.hpp"
29 #include "gc/shared/collectorCounters.hpp"
30 #include "gc/shared/continuationGCSupport.inline.hpp"
31 #include "gc/shenandoah/shenandoahBreakpoint.hpp"
32 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
33 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
34 #include "gc/shenandoah/shenandoahFreeSet.hpp"
35 #include "gc/shenandoah/shenandoahLock.hpp"
36 #include "gc/shenandoah/shenandoahMark.inline.hpp"
37 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
38 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
39 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
40 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
41 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
42 #include "gc/shenandoah/shenandoahStackWatermark.hpp"
43 #include "gc/shenandoah/shenandoahUtils.hpp"
44 #include "gc/shenandoah/shenandoahVerifier.hpp"
45 #include "gc/shenandoah/shenandoahVMOperations.hpp"
46 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
47 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
48 #include "memory/allocation.hpp"
49 #include "prims/jvmtiTagMap.hpp"
50 #include "runtime/vmThread.hpp"
51 #include "utilities/events.hpp"
52
53 // Breakpoint support
54 class ShenandoahBreakpointGCScope : public StackObj {
69 }
70 };
71
72 class ShenandoahBreakpointMarkScope : public StackObj {
73 private:
74 const GCCause::Cause _cause;
75 public:
76 ShenandoahBreakpointMarkScope(GCCause::Cause cause) : _cause(cause) {
77 if (_cause == GCCause::_wb_breakpoint) {
78 ShenandoahBreakpoint::at_after_marking_started();
79 }
80 }
81
82 ~ShenandoahBreakpointMarkScope() {
83 if (_cause == GCCause::_wb_breakpoint) {
84 ShenandoahBreakpoint::at_before_marking_completed();
85 }
86 }
87 };
88
89 ShenandoahConcurrentGC::ShenandoahConcurrentGC() :
90 _mark(),
91 _degen_point(ShenandoahDegenPoint::_degenerated_unset),
92 _abbreviated(false) {
93 }
94
95 ShenandoahGC::ShenandoahDegenPoint ShenandoahConcurrentGC::degen_point() const {
96 return _degen_point;
97 }
98
99 void ShenandoahConcurrentGC::cancel() {
100 ShenandoahConcurrentMark::cancel();
101 }
102
103 bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) {
104 ShenandoahHeap* const heap = ShenandoahHeap::heap();
105 ShenandoahBreakpointGCScope breakpoint_gc_scope(cause);
106
107 // Reset for upcoming marking
108 entry_reset();
109
110 // Start initial mark under STW
111 vmop_entry_init_mark();
112
113 {
114 ShenandoahBreakpointMarkScope breakpoint_mark_scope(cause);
115 // Concurrent mark roots
116 entry_mark_roots();
117 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_outside_cycle)) {
118 return false;
119 }
120
121 // Continue concurrent mark
122 entry_mark();
123 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_mark)) {
124 return false;
125 }
126 }
127
128 // Complete marking under STW, and start evacuation
129 vmop_entry_final_mark();
130
131 // If the GC was cancelled before final mark, nothing happens on the safepoint. We are still
132 // in the marking phase and must resume the degenerated cycle from there. If the GC was cancelled
133 // after final mark, then we've entered the evacuation phase and must resume the degenerated cycle
134 // from that phase.
135 if (heap->is_concurrent_mark_in_progress()) {
136 bool cancelled = check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_mark);
137 assert(cancelled, "GC must have been cancelled between concurrent and final mark");
138 return false;
139 }
140
141 // Concurrent stack processing
142 if (heap->is_evacuation_in_progress()) {
143 entry_thread_roots();
144 }
145
146 // Process weak roots that might still point to regions that would be broken by cleanup
147 if (heap->is_concurrent_weak_root_in_progress()) {
148 entry_weak_refs();
149 entry_weak_roots();
150 }
151
152 // Final mark might have reclaimed some immediate garbage, kick cleanup to reclaim
153 // the space. This would be the last action if there is nothing to evacuate.
154 entry_cleanup_early();
155
156 heap->free_set()->log_status_under_lock();
157
158 // Perform concurrent class unloading
159 if (heap->unload_classes() &&
160 heap->is_concurrent_weak_root_in_progress()) {
161 entry_class_unloading();
162 }
163
164 // Processing strong roots
165 // This may be skipped if there is nothing to update/evacuate.
166 // If so, strong_root_in_progress would be unset.
167 if (heap->is_concurrent_strong_root_in_progress()) {
168 entry_strong_roots();
169 }
170
171 // Continue the cycle with evacuation and optional update-refs.
172 // This may be skipped if there is nothing to evacuate.
173 // If so, evac_in_progress would be unset by collection set preparation code.
174 if (heap->is_evacuation_in_progress()) {
175 // Concurrently evacuate
176 entry_evacuate();
177 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_evac)) {
178 return false;
179 }
180
181 // Perform update-refs phase.
182 vmop_entry_init_updaterefs();
183 entry_updaterefs();
184 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_updaterefs)) {
185 return false;
186 }
187
188 // Concurrent update thread roots
189 entry_update_thread_roots();
190 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_updaterefs)) {
191 return false;
192 }
193
194 vmop_entry_final_updaterefs();
195
196 // Update references freed up collection set, kick the cleanup to reclaim the space.
197 entry_cleanup_complete();
198 } else {
199 vmop_entry_final_roots();
200 _abbreviated = true;
201 }
202
203 return true;
204 }
205
206 void ShenandoahConcurrentGC::vmop_entry_init_mark() {
207 ShenandoahHeap* const heap = ShenandoahHeap::heap();
208 TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
209 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_mark_gross);
210
211 heap->try_inject_alloc_failure();
212 VM_ShenandoahInitMark op(this);
213 VMThread::execute(&op); // jump to entry_init_mark() under safepoint
214 }
215
216 void ShenandoahConcurrentGC::vmop_entry_final_mark() {
217 ShenandoahHeap* const heap = ShenandoahHeap::heap();
218 TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
219 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_mark_gross);
220
221 heap->try_inject_alloc_failure();
222 VM_ShenandoahFinalMarkStartEvac op(this);
292 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_update_refs);
293 EventMark em("%s", msg);
294
295 ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
296 ShenandoahWorkerPolicy::calc_workers_for_final_update_ref(),
297 "final reference update");
298
299 op_final_updaterefs();
300 }
301
302 void ShenandoahConcurrentGC::entry_final_roots() {
303 static const char* msg = "Pause Final Roots";
304 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_roots);
305 EventMark em("%s", msg);
306
307 op_final_roots();
308 }
309
310 void ShenandoahConcurrentGC::entry_reset() {
311 ShenandoahHeap* const heap = ShenandoahHeap::heap();
312 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
313 static const char* msg = "Concurrent reset";
314 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_reset);
315 EventMark em("%s", msg);
316
317 ShenandoahWorkerScope scope(heap->workers(),
318 ShenandoahWorkerPolicy::calc_workers_for_conc_reset(),
319 "concurrent reset");
320
321 heap->try_inject_alloc_failure();
322 op_reset();
323 }
324
325 void ShenandoahConcurrentGC::entry_mark_roots() {
326 ShenandoahHeap* const heap = ShenandoahHeap::heap();
327 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
328 const char* msg = "Concurrent marking roots";
329 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_mark_roots);
330 EventMark em("%s", msg);
331
332 ShenandoahWorkerScope scope(heap->workers(),
333 ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
334 "concurrent marking roots");
335
336 heap->try_inject_alloc_failure();
337 op_mark_roots();
338 }
339
340 void ShenandoahConcurrentGC::entry_mark() {
341 ShenandoahHeap* const heap = ShenandoahHeap::heap();
342 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
483 op_updaterefs();
484 }
485
486 void ShenandoahConcurrentGC::entry_cleanup_complete() {
487 ShenandoahHeap* const heap = ShenandoahHeap::heap();
488 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
489 static const char* msg = "Concurrent cleanup";
490 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_cleanup_complete, true /* log_heap_usage */);
491 EventMark em("%s", msg);
492
493 // This phase does not use workers, no need for setup
494 heap->try_inject_alloc_failure();
495 op_cleanup_complete();
496 }
497
498 void ShenandoahConcurrentGC::op_reset() {
499 ShenandoahHeap* const heap = ShenandoahHeap::heap();
500 if (ShenandoahPacing) {
501 heap->pacer()->setup_for_reset();
502 }
503
504 heap->prepare_gc();
505 }
506
507 class ShenandoahInitMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
508 private:
509 ShenandoahMarkingContext* const _ctx;
510 public:
511 ShenandoahInitMarkUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
512
513 void heap_region_do(ShenandoahHeapRegion* r) {
514 assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index());
515 if (r->is_active()) {
516 // Check if region needs updating its TAMS. We have updated it already during concurrent
517 // reset, so it is very likely we don't need to do another write here.
518 if (_ctx->top_at_mark_start(r) != r->top()) {
519 _ctx->capture_top_at_mark_start(r);
520 }
521 } else {
522 assert(_ctx->top_at_mark_start(r) == r->top(),
523 "Region " SIZE_FORMAT " should already have correct TAMS", r->index());
524 }
525 }
526
527 bool is_thread_safe() { return true; }
528 };
529
530 void ShenandoahConcurrentGC::start_mark() {
531 _mark.start_mark();
532 }
533
534 void ShenandoahConcurrentGC::op_init_mark() {
535 ShenandoahHeap* const heap = ShenandoahHeap::heap();
536 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
537 assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
538
539 assert(heap->marking_context()->is_bitmap_clear(), "need clear marking bitmap");
540 assert(!heap->marking_context()->is_complete(), "should not be complete");
541 assert(!heap->has_forwarded_objects(), "No forwarded objects on this path");
542
543 if (ShenandoahVerify) {
544 heap->verifier()->verify_before_concmark();
545 }
546
547 if (VerifyBeforeGC) {
548 Universe::verify();
549 }
550
551 heap->set_concurrent_mark_in_progress(true);
552
553 start_mark();
554
555 {
556 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states);
557 ShenandoahInitMarkUpdateRegionStateClosure cl;
558 heap->parallel_heap_region_iterate(&cl);
559 }
560
561 // Weak reference processing
562 ShenandoahReferenceProcessor* rp = heap->ref_processor();
563 rp->reset_thread_locals();
564 rp->set_soft_reference_policy(heap->soft_ref_policy()->should_clear_all_soft_refs());
565
566 // Make above changes visible to worker threads
567 OrderAccess::fence();
568
569 // Arm nmethods for concurrent mark
570 ShenandoahCodeRoots::arm_nmethods_for_mark();
571
572 ShenandoahStackWatermark::change_epoch_id();
573 if (ShenandoahPacing) {
574 heap->pacer()->setup_for_mark();
575 }
576 }
577
578 void ShenandoahConcurrentGC::op_mark_roots() {
579 _mark.mark_concurrent_roots();
580 }
581
582 void ShenandoahConcurrentGC::op_mark() {
583 _mark.concurrent_mark();
584 }
585
586 void ShenandoahConcurrentGC::op_final_mark() {
587 ShenandoahHeap* const heap = ShenandoahHeap::heap();
588 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
589 assert(!heap->has_forwarded_objects(), "No forwarded objects on this path");
590
591 if (ShenandoahVerify) {
592 heap->verifier()->verify_roots_no_forwarded();
593 }
594
595 if (!heap->cancelled_gc()) {
596 _mark.finish_mark();
597 assert(!heap->cancelled_gc(), "STW mark cannot OOM");
598
599 // Notify JVMTI that the tagmap table will need cleaning.
600 JvmtiTagMap::set_needs_cleaning();
601
602 heap->prepare_regions_and_collection_set(true /*concurrent*/);
603
604 // Has to be done after cset selection
605 heap->prepare_concurrent_roots();
606
607 if (!heap->collection_set()->is_empty()) {
608 if (ShenandoahVerify) {
609 heap->verifier()->verify_before_evacuation();
610 }
611
612 heap->set_evacuation_in_progress(true);
613 // From here on, we need to update references.
614 heap->set_has_forwarded_objects(true);
615
616 // Verify before arming for concurrent processing.
617 // Otherwise, verification can trigger stack processing.
618 if (ShenandoahVerify) {
619 heap->verifier()->verify_during_evacuation();
620 }
621
622 // Arm nmethods/stack for concurrent processing
623 ShenandoahCodeRoots::arm_nmethods_for_evac();
624 ShenandoahStackWatermark::change_epoch_id();
625
626 if (ShenandoahPacing) {
627 heap->pacer()->setup_for_evac();
628 }
629 } else {
630 if (ShenandoahVerify) {
631 heap->verifier()->verify_after_concmark();
632 }
633
634 if (VerifyAfterGC) {
635 Universe::verify();
636 }
637 }
638 }
639 }
640
641 class ShenandoahConcurrentEvacThreadClosure : public ThreadClosure {
642 private:
643 OopClosure* const _oops;
644
645 public:
646 ShenandoahConcurrentEvacThreadClosure(OopClosure* oops);
647 void do_thread(Thread* thread);
648 };
649
650 ShenandoahConcurrentEvacThreadClosure::ShenandoahConcurrentEvacThreadClosure(OopClosure* oops) :
651 _oops(oops) {
652 }
653
654 void ShenandoahConcurrentEvacThreadClosure::do_thread(Thread* thread) {
655 JavaThread* const jt = JavaThread::cast(thread);
656 StackWatermarkSet::finish_processing(jt, _oops, StackWatermarkKind::gc);
657 }
658
659 class ShenandoahConcurrentEvacUpdateThreadTask : public WorkerTask {
660 private:
661 ShenandoahJavaThreadsIterator _java_threads;
662
663 public:
664 ShenandoahConcurrentEvacUpdateThreadTask(uint n_workers) :
665 WorkerTask("Shenandoah Evacuate/Update Concurrent Thread Roots"),
666 _java_threads(ShenandoahPhaseTimings::conc_thread_roots, n_workers) {
667 }
668
669 void work(uint worker_id) {
670 // ShenandoahEvacOOMScope has to be setup by ShenandoahContextEvacuateUpdateRootsClosure.
671 // Otherwise, may deadlock with watermark lock
672 ShenandoahContextEvacuateUpdateRootsClosure oops_cl;
673 ShenandoahConcurrentEvacThreadClosure thr_cl(&oops_cl);
674 _java_threads.threads_do(&thr_cl, worker_id);
675 }
676 };
677
678 void ShenandoahConcurrentGC::op_thread_roots() {
679 ShenandoahHeap* const heap = ShenandoahHeap::heap();
680 assert(heap->is_evacuation_in_progress(), "Checked by caller");
681 ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_thread_roots);
682 ShenandoahConcurrentEvacUpdateThreadTask task(heap->workers()->active_workers());
683 heap->workers()->run_task(&task);
684 }
685
686 void ShenandoahConcurrentGC::op_weak_refs() {
687 ShenandoahHeap* const heap = ShenandoahHeap::heap();
688 assert(heap->is_concurrent_weak_root_in_progress(), "Only during this phase");
689 // Concurrent weak refs processing
690 ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_refs);
691 if (heap->gc_cause() == GCCause::_wb_breakpoint) {
692 ShenandoahBreakpoint::at_after_reference_processing_started();
693 }
694 heap->ref_processor()->process_references(ShenandoahPhaseTimings::conc_weak_refs, heap->workers(), true /* concurrent */);
695 }
696
697 class ShenandoahEvacUpdateCleanupOopStorageRootsClosure : public BasicOopIterateClosure {
698 private:
699 ShenandoahHeap* const _heap;
700 ShenandoahMarkingContext* const _mark_context;
701 bool _evac_in_progress;
702 Thread* const _thread;
703
704 public:
705 ShenandoahEvacUpdateCleanupOopStorageRootsClosure();
706 void do_oop(oop* p);
707 void do_oop(narrowOop* p);
708 };
709
710 ShenandoahEvacUpdateCleanupOopStorageRootsClosure::ShenandoahEvacUpdateCleanupOopStorageRootsClosure() :
711 _heap(ShenandoahHeap::heap()),
712 _mark_context(ShenandoahHeap::heap()->marking_context()),
713 _evac_in_progress(ShenandoahHeap::heap()->is_evacuation_in_progress()),
714 _thread(Thread::current()) {
715 }
716
717 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(oop* p) {
718 const oop obj = RawAccess<>::oop_load(p);
719 if (!CompressedOops::is_null(obj)) {
720 if (!_mark_context->is_marked(obj)) {
721 shenandoah_assert_correct(p, obj);
722 ShenandoahHeap::atomic_clear_oop(p, obj);
723 } else if (_evac_in_progress && _heap->in_collection_set(obj)) {
724 oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
725 if (resolved == obj) {
726 resolved = _heap->evacuate_object(obj, _thread);
727 }
728 shenandoah_assert_not_in_cset_except(p, resolved, _heap->cancelled_gc());
729 ShenandoahHeap::atomic_update_oop(resolved, p, obj);
730 }
731 }
732 }
733
734 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(narrowOop* p) {
735 ShouldNotReachHere();
736 }
737
738 class ShenandoahIsCLDAliveClosure : public CLDClosure {
739 public:
740 void do_cld(ClassLoaderData* cld) {
741 cld->is_alive();
742 }
815 }
816 }
817 };
818
819 void ShenandoahConcurrentGC::op_weak_roots() {
820 ShenandoahHeap* const heap = ShenandoahHeap::heap();
821 assert(heap->is_concurrent_weak_root_in_progress(), "Only during this phase");
822 // Concurrent weak root processing
823 {
824 ShenandoahTimingsTracker t(ShenandoahPhaseTimings::conc_weak_roots_work);
825 ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_roots_work);
826 ShenandoahConcurrentWeakRootsEvacUpdateTask task(ShenandoahPhaseTimings::conc_weak_roots_work);
827 heap->workers()->run_task(&task);
828 }
829
830 // Perform handshake to flush out dead oops
831 {
832 ShenandoahTimingsTracker t(ShenandoahPhaseTimings::conc_weak_roots_rendezvous);
833 heap->rendezvous_threads();
834 }
835 }
836
837 void ShenandoahConcurrentGC::op_class_unloading() {
838 ShenandoahHeap* const heap = ShenandoahHeap::heap();
839 assert (heap->is_concurrent_weak_root_in_progress() &&
840 heap->unload_classes(),
841 "Checked by caller");
842 heap->do_class_unloading();
843 }
844
845 class ShenandoahEvacUpdateCodeCacheClosure : public NMethodClosure {
846 private:
847 BarrierSetNMethod* const _bs;
848 ShenandoahEvacuateUpdateMetadataClosure _cl;
849
850 public:
851 ShenandoahEvacUpdateCodeCacheClosure() :
852 _bs(BarrierSet::barrier_set()->barrier_set_nmethod()),
853 _cl() {
854 }
922 assert(heap->is_concurrent_strong_root_in_progress(), "Checked by caller");
923 ShenandoahConcurrentRootsEvacUpdateTask task(ShenandoahPhaseTimings::conc_strong_roots);
924 heap->workers()->run_task(&task);
925 heap->set_concurrent_strong_root_in_progress(false);
926 }
927
928 void ShenandoahConcurrentGC::op_cleanup_early() {
929 ShenandoahHeap::heap()->free_set()->recycle_trash();
930 }
931
932 void ShenandoahConcurrentGC::op_evacuate() {
933 ShenandoahHeap::heap()->evacuate_collection_set(true /*concurrent*/);
934 }
935
936 void ShenandoahConcurrentGC::op_init_updaterefs() {
937 ShenandoahHeap* const heap = ShenandoahHeap::heap();
938 heap->set_evacuation_in_progress(false);
939 heap->set_concurrent_weak_root_in_progress(false);
940 heap->prepare_update_heap_references(true /*concurrent*/);
941 heap->set_update_refs_in_progress(true);
942
943 if (ShenandoahPacing) {
944 heap->pacer()->setup_for_updaterefs();
945 }
946 }
947
948 void ShenandoahConcurrentGC::op_updaterefs() {
949 ShenandoahHeap::heap()->update_heap_references(true /*concurrent*/);
950 }
951
952 class ShenandoahUpdateThreadClosure : public HandshakeClosure {
953 private:
954 ShenandoahUpdateRefsClosure _cl;
955 public:
956 ShenandoahUpdateThreadClosure();
957 void do_thread(Thread* thread);
958 };
959
960 ShenandoahUpdateThreadClosure::ShenandoahUpdateThreadClosure() :
961 HandshakeClosure("Shenandoah Update Thread Roots") {
962 }
967 ResourceMark rm;
968 jt->oops_do(&_cl, nullptr);
969 }
970 }
971
972 void ShenandoahConcurrentGC::op_update_thread_roots() {
973 ShenandoahUpdateThreadClosure cl;
974 Handshake::execute(&cl);
975 }
976
977 void ShenandoahConcurrentGC::op_final_updaterefs() {
978 ShenandoahHeap* const heap = ShenandoahHeap::heap();
979 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
980 assert(!heap->_update_refs_iterator.has_next(), "Should have finished update references");
981
982 heap->finish_concurrent_roots();
983
984 // Clear cancelled GC, if set. On cancellation path, the block before would handle
985 // everything.
986 if (heap->cancelled_gc()) {
987 heap->clear_cancelled_gc();
988 }
989
990 // Has to be done before cset is clear
991 if (ShenandoahVerify) {
992 heap->verifier()->verify_roots_in_to_space();
993 }
994
995 heap->update_heap_region_states(true /*concurrent*/);
996
997 heap->set_update_refs_in_progress(false);
998 heap->set_has_forwarded_objects(false);
999
1000 if (ShenandoahVerify) {
1001 heap->verifier()->verify_after_updaterefs();
1002 }
1003
1004 if (VerifyAfterGC) {
1005 Universe::verify();
1006 }
1007
1008 heap->rebuild_free_set(true /*concurrent*/);
1009 }
1010
1011 void ShenandoahConcurrentGC::op_final_roots() {
1012 ShenandoahHeap::heap()->set_concurrent_weak_root_in_progress(false);
1013 }
1014
1015 void ShenandoahConcurrentGC::op_cleanup_complete() {
1016 ShenandoahHeap::heap()->free_set()->recycle_trash();
1017 }
1018
1019 bool ShenandoahConcurrentGC::check_cancellation_and_abort(ShenandoahDegenPoint point) {
1020 if (ShenandoahHeap::heap()->cancelled_gc()) {
1021 _degen_point = point;
1022 return true;
1023 }
1024 return false;
1025 }
1026
1027 const char* ShenandoahConcurrentGC::init_mark_event_message() const {
1028 ShenandoahHeap* const heap = ShenandoahHeap::heap();
1029 assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here");
1030 if (heap->unload_classes()) {
1031 return "Pause Init Mark (unload classes)";
1032 } else {
1033 return "Pause Init Mark";
1034 }
1035 }
1036
1037 const char* ShenandoahConcurrentGC::final_mark_event_message() const {
1038 ShenandoahHeap* const heap = ShenandoahHeap::heap();
1039 assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here");
1040 if (heap->unload_classes()) {
1041 return "Pause Final Mark (unload classes)";
1042 } else {
1043 return "Pause Final Mark";
1044 }
1045 }
1046
1047 const char* ShenandoahConcurrentGC::conc_mark_event_message() const {
1048 ShenandoahHeap* const heap = ShenandoahHeap::heap();
1049 assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here");
1050 if (heap->unload_classes()) {
1051 return "Concurrent marking (unload classes)";
1052 } else {
1053 return "Concurrent marking";
1054 }
1055 }
|
1 /*
2 * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2021, 2022, Red Hat, Inc. All rights reserved.
4 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
5 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 *
7 * This code is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 only, as
9 * published by the Free Software Foundation.
10 *
11 * This code is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * version 2 for more details (a copy is included in the LICENSE file that
15 * accompanied this code).
16 *
17 * You should have received a copy of the GNU General Public License version
18 * 2 along with this work; if not, write to the Free Software Foundation,
19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20 *
21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
22 * or visit www.oracle.com if you need additional information or have any
23 * questions.
24 *
25 */
26
27 #include "precompiled.hpp"
28
29 #include "gc/shared/barrierSetNMethod.hpp"
30 #include "gc/shared/collectorCounters.hpp"
31 #include "gc/shared/continuationGCSupport.inline.hpp"
32 #include "gc/shenandoah/shenandoahBreakpoint.hpp"
33 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
34 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
35 #include "gc/shenandoah/shenandoahFreeSet.hpp"
36 #include "gc/shenandoah/shenandoahGeneration.hpp"
37 #include "gc/shenandoah/shenandoahGenerationalHeap.hpp"
38 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
39 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
40 #include "gc/shenandoah/shenandoahLock.hpp"
41 #include "gc/shenandoah/shenandoahMark.inline.hpp"
42 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
43 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
44 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
45 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
46 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
47 #include "gc/shenandoah/shenandoahStackWatermark.hpp"
48 #include "gc/shenandoah/shenandoahUtils.hpp"
49 #include "gc/shenandoah/shenandoahVerifier.hpp"
50 #include "gc/shenandoah/shenandoahVMOperations.hpp"
51 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
52 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
53 #include "memory/allocation.hpp"
54 #include "prims/jvmtiTagMap.hpp"
55 #include "runtime/vmThread.hpp"
56 #include "utilities/events.hpp"
57
58 // Breakpoint support
59 class ShenandoahBreakpointGCScope : public StackObj {
74 }
75 };
76
77 class ShenandoahBreakpointMarkScope : public StackObj {
78 private:
79 const GCCause::Cause _cause;
80 public:
81 ShenandoahBreakpointMarkScope(GCCause::Cause cause) : _cause(cause) {
82 if (_cause == GCCause::_wb_breakpoint) {
83 ShenandoahBreakpoint::at_after_marking_started();
84 }
85 }
86
87 ~ShenandoahBreakpointMarkScope() {
88 if (_cause == GCCause::_wb_breakpoint) {
89 ShenandoahBreakpoint::at_before_marking_completed();
90 }
91 }
92 };
93
94 ShenandoahConcurrentGC::ShenandoahConcurrentGC(ShenandoahGeneration* generation, bool do_old_gc_bootstrap) :
95 _mark(generation),
96 _degen_point(ShenandoahDegenPoint::_degenerated_unset),
97 _abbreviated(false),
98 _do_old_gc_bootstrap(do_old_gc_bootstrap),
99 _generation(generation) {
100 }
101
102 ShenandoahGC::ShenandoahDegenPoint ShenandoahConcurrentGC::degen_point() const {
103 return _degen_point;
104 }
105
106 bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) {
107 ShenandoahHeap* const heap = ShenandoahHeap::heap();
108
109 ShenandoahBreakpointGCScope breakpoint_gc_scope(cause);
110
111 // Reset for upcoming marking
112 entry_reset();
113
114 // Start initial mark under STW
115 vmop_entry_init_mark();
116
117 {
118 ShenandoahBreakpointMarkScope breakpoint_mark_scope(cause);
119
120 // Reset task queue stats here, rather than in mark_concurrent_roots,
121 // because remembered set scan will `push` oops into the queues and
122 // resetting after this happens will lose those counts.
123 TASKQUEUE_STATS_ONLY(_mark.task_queues()->reset_taskqueue_stats());
124
125 // Concurrent remembered set scanning
126 entry_scan_remembered_set();
127 // TODO: When RS scanning yields, we will need a check_cancellation_and_abort() degeneration point here.
128
129 // Concurrent mark roots
130 entry_mark_roots();
131 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_roots)) {
132 return false;
133 }
134
135 // Continue concurrent mark
136 entry_mark();
137 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_mark)) {
138 return false;
139 }
140 }
141
142 // Complete marking under STW, and start evacuation
143 vmop_entry_final_mark();
144
145 // If the GC was cancelled before final mark, nothing happens on the safepoint. We are still
146 // in the marking phase and must resume the degenerated cycle from there. If the GC was cancelled
147 // after final mark, then we've entered the evacuation phase and must resume the degenerated cycle
148 // from that phase.
149 if (_generation->is_concurrent_mark_in_progress()) {
150 bool cancelled = check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_mark);
151 assert(cancelled, "GC must have been cancelled between concurrent and final mark");
152 return false;
153 }
154
155 // Concurrent stack processing
156 if (heap->is_evacuation_in_progress()) {
157 entry_thread_roots();
158 }
159
160 // Process weak roots that might still point to regions that would be broken by cleanup
161 if (heap->is_concurrent_weak_root_in_progress()) {
162 entry_weak_refs();
163 entry_weak_roots();
164 }
165
166 // Final mark might have reclaimed some immediate garbage, kick cleanup to reclaim
167 // the space. This would be the last action if there is nothing to evacuate. Note that
168 // we will not age young-gen objects in the case that we skip evacuation.
169 entry_cleanup_early();
170
171 heap->free_set()->log_status_under_lock();
172
173 // Perform concurrent class unloading
174 if (heap->unload_classes() &&
175 heap->is_concurrent_weak_root_in_progress()) {
176 entry_class_unloading();
177 }
178
179 // Processing strong roots
180 // This may be skipped if there is nothing to update/evacuate.
181 // If so, strong_root_in_progress would be unset.
182 if (heap->is_concurrent_strong_root_in_progress()) {
183 entry_strong_roots();
184 }
185
186 // Continue the cycle with evacuation and optional update-refs.
187 // This may be skipped if there is nothing to evacuate.
188 // If so, evac_in_progress would be unset by collection set preparation code.
189 if (heap->is_evacuation_in_progress()) {
190 // Concurrently evacuate
191 entry_evacuate();
192 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_evac)) {
193 return false;
194 }
195 }
196
197 if (heap->has_forwarded_objects()) {
198 // Perform update-refs phase.
199 vmop_entry_init_updaterefs();
200 entry_updaterefs();
201 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_updaterefs)) {
202 return false;
203 }
204
205 // Concurrent update thread roots
206 entry_update_thread_roots();
207 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_updaterefs)) {
208 return false;
209 }
210
211 vmop_entry_final_updaterefs();
212
213 // Update references freed up collection set, kick the cleanup to reclaim the space.
214 entry_cleanup_complete();
215 } else {
216 // We chose not to evacuate because we found sufficient immediate garbage. Note that we
217 // do not check for cancellation here because, at this point, the cycle is effectively
218 // complete. If the cycle has been cancelled here, the control thread will detect it
219 // on its next iteration and run a degenerated young cycle.
220 vmop_entry_final_roots();
221 _abbreviated = true;
222 }
223
224 // We defer generation resizing actions until after cset regions have been recycled. We do this even following an
225 // abbreviated cycle.
226 if (heap->mode()->is_generational()) {
227 ShenandoahGenerationalHeap::heap()->complete_concurrent_cycle();
228 }
229 return true;
230 }
231
232 void ShenandoahConcurrentGC::vmop_entry_init_mark() {
233 ShenandoahHeap* const heap = ShenandoahHeap::heap();
234 TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
235 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_mark_gross);
236
237 heap->try_inject_alloc_failure();
238 VM_ShenandoahInitMark op(this);
239 VMThread::execute(&op); // jump to entry_init_mark() under safepoint
240 }
241
242 void ShenandoahConcurrentGC::vmop_entry_final_mark() {
243 ShenandoahHeap* const heap = ShenandoahHeap::heap();
244 TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
245 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_mark_gross);
246
247 heap->try_inject_alloc_failure();
248 VM_ShenandoahFinalMarkStartEvac op(this);
318 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_update_refs);
319 EventMark em("%s", msg);
320
321 ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
322 ShenandoahWorkerPolicy::calc_workers_for_final_update_ref(),
323 "final reference update");
324
325 op_final_updaterefs();
326 }
327
328 void ShenandoahConcurrentGC::entry_final_roots() {
329 static const char* msg = "Pause Final Roots";
330 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_roots);
331 EventMark em("%s", msg);
332
333 op_final_roots();
334 }
335
336 void ShenandoahConcurrentGC::entry_reset() {
337 ShenandoahHeap* const heap = ShenandoahHeap::heap();
338 heap->try_inject_alloc_failure();
339
340 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
341 {
342 static const char* msg = "Concurrent reset";
343 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_reset);
344 EventMark em("%s", msg);
345
346 ShenandoahWorkerScope scope(heap->workers(),
347 ShenandoahWorkerPolicy::calc_workers_for_conc_reset(),
348 msg);
349 op_reset();
350 }
351
352 if (_do_old_gc_bootstrap) {
353 static const char* msg = "Concurrent reset (OLD)";
354 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_reset_old);
355 ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
356 ShenandoahWorkerPolicy::calc_workers_for_conc_reset(),
357 msg);
358 EventMark em("%s", msg);
359
360 heap->old_generation()->prepare_gc();
361 }
362 }
363
364 void ShenandoahConcurrentGC::entry_scan_remembered_set() {
365 if (_generation->is_young()) {
366 ShenandoahHeap* const heap = ShenandoahHeap::heap();
367 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
368 const char* msg = "Concurrent remembered set scanning";
369 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::init_scan_rset);
370 EventMark em("%s", msg);
371
372 ShenandoahWorkerScope scope(heap->workers(),
373 ShenandoahWorkerPolicy::calc_workers_for_rs_scanning(),
374 msg);
375
376 heap->try_inject_alloc_failure();
377 _generation->scan_remembered_set(true /* is_concurrent */);
378 }
379 }
380
381 void ShenandoahConcurrentGC::entry_mark_roots() {
382 ShenandoahHeap* const heap = ShenandoahHeap::heap();
383 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
384 const char* msg = "Concurrent marking roots";
385 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_mark_roots);
386 EventMark em("%s", msg);
387
388 ShenandoahWorkerScope scope(heap->workers(),
389 ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
390 "concurrent marking roots");
391
392 heap->try_inject_alloc_failure();
393 op_mark_roots();
394 }
395
396 void ShenandoahConcurrentGC::entry_mark() {
397 ShenandoahHeap* const heap = ShenandoahHeap::heap();
398 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
539 op_updaterefs();
540 }
541
542 void ShenandoahConcurrentGC::entry_cleanup_complete() {
543 ShenandoahHeap* const heap = ShenandoahHeap::heap();
544 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
545 static const char* msg = "Concurrent cleanup";
546 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_cleanup_complete, true /* log_heap_usage */);
547 EventMark em("%s", msg);
548
549 // This phase does not use workers, no need for setup
550 heap->try_inject_alloc_failure();
551 op_cleanup_complete();
552 }
553
554 void ShenandoahConcurrentGC::op_reset() {
555 ShenandoahHeap* const heap = ShenandoahHeap::heap();
556 if (ShenandoahPacing) {
557 heap->pacer()->setup_for_reset();
558 }
559 _generation->prepare_gc();
560 }
561
562 class ShenandoahInitMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
563 private:
564 ShenandoahMarkingContext* const _ctx;
565 public:
566 ShenandoahInitMarkUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
567
568 void heap_region_do(ShenandoahHeapRegion* r) {
569 assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index());
570 if (r->is_active()) {
571 // Check if region needs updating its TAMS. We have updated it already during concurrent
572 // reset, so it is very likely we don't need to do another write here. Since most regions
573 // are not "active", this path is relatively rare.
574 if (_ctx->top_at_mark_start(r) != r->top()) {
575 _ctx->capture_top_at_mark_start(r);
576 }
577 } else {
578 assert(_ctx->top_at_mark_start(r) == r->top(),
579 "Region " SIZE_FORMAT " should already have correct TAMS", r->index());
580 }
581 }
582
583 bool is_thread_safe() { return true; }
584 };
585
586 void ShenandoahConcurrentGC::start_mark() {
587 _mark.start_mark();
588 }
589
590 void ShenandoahConcurrentGC::op_init_mark() {
591 ShenandoahHeap* const heap = ShenandoahHeap::heap();
592 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
593 assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
594
595 assert(_generation->is_bitmap_clear(), "need clear marking bitmap");
596 assert(!_generation->is_mark_complete(), "should not be complete");
597 assert(!heap->has_forwarded_objects(), "No forwarded objects on this path");
598
599
600 if (heap->mode()->is_generational()) {
601 if (_generation->is_young() || (_generation->is_global() && ShenandoahVerify)) {
602 // The current implementation of swap_remembered_set() copies the write-card-table
603 // to the read-card-table. The remembered sets are also swapped for GLOBAL collections
604 // so that the verifier works with the correct copy of the card table when verifying.
605 // TODO: This path should not really depend on ShenandoahVerify.
606 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_swap_rset);
607 _generation->swap_remembered_set();
608 }
609
610 if (_generation->is_global()) {
611 heap->old_generation()->cancel_gc();
612 } else if (heap->is_concurrent_old_mark_in_progress()) {
613 // Purge the SATB buffers, transferring any valid, old pointers to the
614 // old generation mark queue. Any pointers in a young region will be
615 // abandoned.
616 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_transfer_satb);
617 heap->old_generation()->transfer_pointers_from_satb();
618 }
619 }
620
621 if (ShenandoahVerify) {
622 heap->verifier()->verify_before_concmark();
623 }
624
625 if (VerifyBeforeGC) {
626 Universe::verify();
627 }
628
629 _generation->set_concurrent_mark_in_progress(true);
630
631 start_mark();
632
633 if (_do_old_gc_bootstrap) {
634 shenandoah_assert_generational();
635 // Update region state for both young and old regions
636 // TODO: We should be able to pull this out of the safepoint for the bootstrap
637 // cycle. The top of an old region will only move when a GC cycle evacuates
638 // objects into it. When we start an old cycle, we know that nothing can touch
639 // the top of old regions.
640 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states);
641 ShenandoahInitMarkUpdateRegionStateClosure cl;
642 heap->parallel_heap_region_iterate(&cl);
643 heap->old_generation()->ref_processor()->reset_thread_locals();
644 } else {
645 // Update region state for only young regions
646 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states);
647 ShenandoahInitMarkUpdateRegionStateClosure cl;
648 _generation->parallel_heap_region_iterate(&cl);
649 }
650
651 // Weak reference processing
652 ShenandoahReferenceProcessor* rp = _generation->ref_processor();
653 rp->reset_thread_locals();
654 rp->set_soft_reference_policy(heap->soft_ref_policy()->should_clear_all_soft_refs());
655
656 // Make above changes visible to worker threads
657 OrderAccess::fence();
658
659 // Arm nmethods for concurrent mark
660 ShenandoahCodeRoots::arm_nmethods_for_mark();
661
662 ShenandoahStackWatermark::change_epoch_id();
663 if (ShenandoahPacing) {
664 heap->pacer()->setup_for_mark();
665 }
666 }
667
668 void ShenandoahConcurrentGC::op_mark_roots() {
669 _mark.mark_concurrent_roots();
670 }
671
672 void ShenandoahConcurrentGC::op_mark() {
673 _mark.concurrent_mark();
674 }
675
676 void ShenandoahConcurrentGC::op_final_mark() {
677 ShenandoahHeap* const heap = ShenandoahHeap::heap();
678 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
679 assert(!heap->has_forwarded_objects(), "No forwarded objects on this path");
680
681 if (ShenandoahVerify) {
682 heap->verifier()->verify_roots_no_forwarded();
683 }
684
685 if (!heap->cancelled_gc()) {
686 _mark.finish_mark();
687 assert(!heap->cancelled_gc(), "STW mark cannot OOM");
688
689 // Notify JVMTI that the tagmap table will need cleaning.
690 JvmtiTagMap::set_needs_cleaning();
691
692 // The collection set is chosen by prepare_regions_and_collection_set().
693 //
694 // TODO: Under severe memory overload conditions that can be checked here, we may want to limit
695 // the inclusion of old-gen candidates within the collection set. This would allow us to prioritize efforts on
696 // evacuating young-gen, This remediation is most appropriate when old-gen availability is very high (so there
697 // are negligible negative impacts from delaying completion of old-gen evacuation) and when young-gen collections
698 // are "under duress" (as signalled by very low availability of memory within young-gen, indicating that/ young-gen
699 // collections are not triggering frequently enough).
700 _generation->prepare_regions_and_collection_set(true /*concurrent*/);
701
702 // Upon return from prepare_regions_and_collection_set(), certain parameters have been established to govern the
703 // evacuation efforts that are about to begin. In particular:
704 //
705 // heap->get_promoted_reserve() represents the amount of memory within old-gen's available memory that has
706 // been set aside to hold objects promoted from young-gen memory. This represents an estimated percentage
707 // of the live young-gen memory within the collection set. If there is more data ready to be promoted than
708 // can fit within this reserve, the promotion of some objects will be deferred until a subsequent evacuation
709 // pass.
710 //
711 // heap->get_old_evac_reserve() represents the amount of memory within old-gen's available memory that has been
712 // set aside to hold objects evacuated from the old-gen collection set.
713 //
714 // heap->get_young_evac_reserve() represents the amount of memory within young-gen's available memory that has
715 // been set aside to hold objects evacuated from the young-gen collection set. Conservatively, this value
716 // equals the entire amount of live young-gen memory within the collection set, even though some of this memory
717 // will likely be promoted.
718
719 // Has to be done after cset selection
720 heap->prepare_concurrent_roots();
721
722 if (!heap->collection_set()->is_empty() || has_in_place_promotions(heap)) {
723 // Even if the collection set is empty, we need to do evacuation if there are regions to be promoted in place.
724 // Concurrent evacuation takes responsibility for registering objects and setting the remembered set cards to dirty.
725
726 LogTarget(Debug, gc, cset) lt;
727 if (lt.is_enabled()) {
728 ResourceMark rm;
729 LogStream ls(lt);
730 heap->collection_set()->print_on(&ls);
731 }
732
733 if (ShenandoahVerify) {
734 heap->verifier()->verify_before_evacuation();
735 }
736
737 // TODO: Do we need to set this if we are only promoting regions in place? We don't need the barriers on for that.
738 heap->set_evacuation_in_progress(true);
739
740 // Verify before arming for concurrent processing.
741 // Otherwise, verification can trigger stack processing.
742 if (ShenandoahVerify) {
743 heap->verifier()->verify_during_evacuation();
744 }
745
746 // Generational mode may promote objects in place during the evacuation phase.
747 // If that is the only reason we are evacuating, we don't need to update references
748 // and there will be no forwarded objects on the heap.
749 heap->set_has_forwarded_objects(!heap->collection_set()->is_empty());
750
751 // Arm nmethods/stack for concurrent processing
752 if (!heap->collection_set()->is_empty()) {
753 // Iff objects will be evaluated, arm the nmethod barriers. These will be disarmed
754 // under the same condition (established in prepare_concurrent_roots) after strong
755 // root evacuation has completed (see op_strong_roots).
756 ShenandoahCodeRoots::arm_nmethods_for_evac();
757 ShenandoahStackWatermark::change_epoch_id();
758 }
759
760 if (ShenandoahPacing) {
761 heap->pacer()->setup_for_evac();
762 }
763 } else {
764 if (ShenandoahVerify) {
765 heap->verifier()->verify_after_concmark();
766 }
767
768 if (VerifyAfterGC) {
769 Universe::verify();
770 }
771 }
772 }
773 }
774
775 bool ShenandoahConcurrentGC::has_in_place_promotions(ShenandoahHeap* heap) {
776 return heap->mode()->is_generational() && heap->old_generation()->has_in_place_promotions();
777 }
778
779 template<bool GENERATIONAL>
780 class ShenandoahConcurrentEvacThreadClosure : public ThreadClosure {
781 private:
782 OopClosure* const _oops;
783 public:
784 explicit ShenandoahConcurrentEvacThreadClosure(OopClosure* oops) : _oops(oops) {}
785
786 void do_thread(Thread* thread) override {
787 JavaThread* const jt = JavaThread::cast(thread);
788 StackWatermarkSet::finish_processing(jt, _oops, StackWatermarkKind::gc);
789 if (GENERATIONAL) {
790 ShenandoahThreadLocalData::enable_plab_promotions(thread);
791 }
792 }
793 };
794
795 template<bool GENERATIONAL>
796 class ShenandoahConcurrentEvacUpdateThreadTask : public WorkerTask {
797 private:
798 ShenandoahJavaThreadsIterator _java_threads;
799
800 public:
801 explicit ShenandoahConcurrentEvacUpdateThreadTask(uint n_workers) :
802 WorkerTask("Shenandoah Evacuate/Update Concurrent Thread Roots"),
803 _java_threads(ShenandoahPhaseTimings::conc_thread_roots, n_workers) {
804 }
805
806 void work(uint worker_id) override {
807 if (GENERATIONAL) {
808 Thread* worker_thread = Thread::current();
809 ShenandoahThreadLocalData::enable_plab_promotions(worker_thread);
810 }
811
812 // ShenandoahEvacOOMScope has to be setup by ShenandoahContextEvacuateUpdateRootsClosure.
813 // Otherwise, may deadlock with watermark lock
814 ShenandoahContextEvacuateUpdateRootsClosure oops_cl;
815 ShenandoahConcurrentEvacThreadClosure<GENERATIONAL> thr_cl(&oops_cl);
816 _java_threads.threads_do(&thr_cl, worker_id);
817 }
818 };
819
820 void ShenandoahConcurrentGC::op_thread_roots() {
821 ShenandoahHeap* const heap = ShenandoahHeap::heap();
822 assert(heap->is_evacuation_in_progress(), "Checked by caller");
823 ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_thread_roots);
824 if (heap->mode()->is_generational()) {
825 ShenandoahConcurrentEvacUpdateThreadTask<true> task(heap->workers()->active_workers());
826 heap->workers()->run_task(&task);
827 } else {
828 ShenandoahConcurrentEvacUpdateThreadTask<false> task(heap->workers()->active_workers());
829 heap->workers()->run_task(&task);
830 }
831 }
832
833 void ShenandoahConcurrentGC::op_weak_refs() {
834 ShenandoahHeap* const heap = ShenandoahHeap::heap();
835 assert(heap->is_concurrent_weak_root_in_progress(), "Only during this phase");
836 // Concurrent weak refs processing
837 ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_refs);
838 if (heap->gc_cause() == GCCause::_wb_breakpoint) {
839 ShenandoahBreakpoint::at_after_reference_processing_started();
840 }
841 _generation->ref_processor()->process_references(ShenandoahPhaseTimings::conc_weak_refs, heap->workers(), true /* concurrent */);
842 }
843
844 class ShenandoahEvacUpdateCleanupOopStorageRootsClosure : public BasicOopIterateClosure {
845 private:
846 ShenandoahHeap* const _heap;
847 ShenandoahMarkingContext* const _mark_context;
848 bool _evac_in_progress;
849 Thread* const _thread;
850
851 public:
852 ShenandoahEvacUpdateCleanupOopStorageRootsClosure();
853 void do_oop(oop* p);
854 void do_oop(narrowOop* p);
855 };
856
857 ShenandoahEvacUpdateCleanupOopStorageRootsClosure::ShenandoahEvacUpdateCleanupOopStorageRootsClosure() :
858 _heap(ShenandoahHeap::heap()),
859 _mark_context(ShenandoahHeap::heap()->marking_context()),
860 _evac_in_progress(ShenandoahHeap::heap()->is_evacuation_in_progress()),
861 _thread(Thread::current()) {
862 }
863
864 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(oop* p) {
865 const oop obj = RawAccess<>::oop_load(p);
866 if (!CompressedOops::is_null(obj)) {
867 if (!_mark_context->is_marked(obj)) {
868 shenandoah_assert_generations_reconciled();
869 if (_heap->is_in_active_generation(obj)) {
870 // TODO: This worries me. Here we are asserting that an unmarked from-space object is 'correct'.
871 // Normally, I would call this a bogus assert, but there seems to be a legitimate use-case for
872 // accessing from-space objects during class unloading. However, the from-space object may have
873 // been "filled". We've made no effort to prevent old generation classes being unloaded by young
874 // gen (and vice-versa).
875 shenandoah_assert_correct(p, obj);
876 ShenandoahHeap::atomic_clear_oop(p, obj);
877 }
878 } else if (_evac_in_progress && _heap->in_collection_set(obj)) {
879 oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
880 if (resolved == obj) {
881 resolved = _heap->evacuate_object(obj, _thread);
882 }
883 shenandoah_assert_not_in_cset_except(p, resolved, _heap->cancelled_gc());
884 ShenandoahHeap::atomic_update_oop(resolved, p, obj);
885 }
886 }
887 }
888
889 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(narrowOop* p) {
890 ShouldNotReachHere();
891 }
892
893 class ShenandoahIsCLDAliveClosure : public CLDClosure {
894 public:
895 void do_cld(ClassLoaderData* cld) {
896 cld->is_alive();
897 }
970 }
971 }
972 };
973
974 void ShenandoahConcurrentGC::op_weak_roots() {
975 ShenandoahHeap* const heap = ShenandoahHeap::heap();
976 assert(heap->is_concurrent_weak_root_in_progress(), "Only during this phase");
977 // Concurrent weak root processing
978 {
979 ShenandoahTimingsTracker t(ShenandoahPhaseTimings::conc_weak_roots_work);
980 ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_roots_work);
981 ShenandoahConcurrentWeakRootsEvacUpdateTask task(ShenandoahPhaseTimings::conc_weak_roots_work);
982 heap->workers()->run_task(&task);
983 }
984
985 // Perform handshake to flush out dead oops
986 {
987 ShenandoahTimingsTracker t(ShenandoahPhaseTimings::conc_weak_roots_rendezvous);
988 heap->rendezvous_threads();
989 }
990 // We can only toggle concurrent_weak_root_in_progress flag
991 // at a safepoint, so that mutators see a consistent
992 // value. The flag will be cleared at the next safepoint.
993 }
994
995 void ShenandoahConcurrentGC::op_class_unloading() {
996 ShenandoahHeap* const heap = ShenandoahHeap::heap();
997 assert (heap->is_concurrent_weak_root_in_progress() &&
998 heap->unload_classes(),
999 "Checked by caller");
1000 heap->do_class_unloading();
1001 }
1002
1003 class ShenandoahEvacUpdateCodeCacheClosure : public NMethodClosure {
1004 private:
1005 BarrierSetNMethod* const _bs;
1006 ShenandoahEvacuateUpdateMetadataClosure _cl;
1007
1008 public:
1009 ShenandoahEvacUpdateCodeCacheClosure() :
1010 _bs(BarrierSet::barrier_set()->barrier_set_nmethod()),
1011 _cl() {
1012 }
1080 assert(heap->is_concurrent_strong_root_in_progress(), "Checked by caller");
1081 ShenandoahConcurrentRootsEvacUpdateTask task(ShenandoahPhaseTimings::conc_strong_roots);
1082 heap->workers()->run_task(&task);
1083 heap->set_concurrent_strong_root_in_progress(false);
1084 }
1085
1086 void ShenandoahConcurrentGC::op_cleanup_early() {
1087 ShenandoahHeap::heap()->free_set()->recycle_trash();
1088 }
1089
1090 void ShenandoahConcurrentGC::op_evacuate() {
1091 ShenandoahHeap::heap()->evacuate_collection_set(true /*concurrent*/);
1092 }
1093
1094 void ShenandoahConcurrentGC::op_init_updaterefs() {
1095 ShenandoahHeap* const heap = ShenandoahHeap::heap();
1096 heap->set_evacuation_in_progress(false);
1097 heap->set_concurrent_weak_root_in_progress(false);
1098 heap->prepare_update_heap_references(true /*concurrent*/);
1099 heap->set_update_refs_in_progress(true);
1100 if (ShenandoahVerify) {
1101 heap->verifier()->verify_before_updaterefs();
1102 }
1103 if (ShenandoahPacing) {
1104 heap->pacer()->setup_for_updaterefs();
1105 }
1106 }
1107
1108 void ShenandoahConcurrentGC::op_updaterefs() {
1109 ShenandoahHeap::heap()->update_heap_references(true /*concurrent*/);
1110 }
1111
1112 class ShenandoahUpdateThreadClosure : public HandshakeClosure {
1113 private:
1114 ShenandoahUpdateRefsClosure _cl;
1115 public:
1116 ShenandoahUpdateThreadClosure();
1117 void do_thread(Thread* thread);
1118 };
1119
1120 ShenandoahUpdateThreadClosure::ShenandoahUpdateThreadClosure() :
1121 HandshakeClosure("Shenandoah Update Thread Roots") {
1122 }
1127 ResourceMark rm;
1128 jt->oops_do(&_cl, nullptr);
1129 }
1130 }
1131
1132 void ShenandoahConcurrentGC::op_update_thread_roots() {
1133 ShenandoahUpdateThreadClosure cl;
1134 Handshake::execute(&cl);
1135 }
1136
1137 void ShenandoahConcurrentGC::op_final_updaterefs() {
1138 ShenandoahHeap* const heap = ShenandoahHeap::heap();
1139 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
1140 assert(!heap->_update_refs_iterator.has_next(), "Should have finished update references");
1141
1142 heap->finish_concurrent_roots();
1143
1144 // Clear cancelled GC, if set. On cancellation path, the block before would handle
1145 // everything.
1146 if (heap->cancelled_gc()) {
1147 heap->clear_cancelled_gc(true /* clear oom handler */);
1148 }
1149
1150 // Has to be done before cset is clear
1151 if (ShenandoahVerify) {
1152 heap->verifier()->verify_roots_in_to_space();
1153 }
1154
1155 // If we are running in generational mode and this is an aging cycle, this will also age active
1156 // regions that haven't been used for allocation.
1157 heap->update_heap_region_states(true /*concurrent*/);
1158
1159 heap->set_update_refs_in_progress(false);
1160 heap->set_has_forwarded_objects(false);
1161
1162 if (heap->mode()->is_generational() && heap->is_concurrent_old_mark_in_progress()) {
1163 // When the SATB barrier is left on to support concurrent old gen mark, it may pick up writes to
1164 // objects in the collection set. After those objects are evacuated, the pointers in the
1165 // SATB are no longer safe. Once we have finished update references, we are guaranteed that
1166 // no more writes to the collection set are possible.
1167 //
1168 // This will transfer any old pointers in _active_ regions from the SATB to the old gen
1169 // mark queues. All other pointers will be discarded. This would also discard any pointers
1170 // in old regions that were included in a mixed evacuation. We aren't using the SATB filter
1171 // methods here because we cannot control when they execute. If the SATB filter runs _after_
1172 // a region has been recycled, we will not be able to detect the bad pointer.
1173 //
1174 // We are not concerned about skipping this step in abbreviated cycles because regions
1175 // with no live objects cannot have been written to and so cannot have entries in the SATB
1176 // buffers.
1177 heap->old_generation()->transfer_pointers_from_satb();
1178
1179 // Aging_cycle is only relevant during evacuation cycle for individual objects and during final mark for
1180 // entire regions. Both of these relevant operations occur before final update refs.
1181 ShenandoahGenerationalHeap::heap()->set_aging_cycle(false);
1182 }
1183
1184 if (ShenandoahVerify) {
1185 heap->verifier()->verify_after_updaterefs();
1186 }
1187
1188 if (VerifyAfterGC) {
1189 Universe::verify();
1190 }
1191
1192 heap->rebuild_free_set(true /*concurrent*/);
1193 }
1194
1195 void ShenandoahConcurrentGC::op_final_roots() {
1196
1197 ShenandoahHeap *heap = ShenandoahHeap::heap();
1198 heap->set_concurrent_weak_root_in_progress(false);
1199 heap->set_evacuation_in_progress(false);
1200
1201 if (heap->mode()->is_generational()) {
1202 // If the cycle was shortened for having enough immediate garbage, this could be
1203 // the last GC safepoint before concurrent marking of old resumes. We must be sure
1204 // that old mark threads don't see any pointers to garbage in the SATB buffers.
1205 if (heap->is_concurrent_old_mark_in_progress()) {
1206 heap->old_generation()->transfer_pointers_from_satb();
1207 }
1208
1209 if (!_generation->is_old()) {
1210 ShenandoahGenerationalHeap::heap()->update_region_ages(_generation->complete_marking_context());
1211 }
1212 }
1213 }
1214
1215 void ShenandoahConcurrentGC::op_cleanup_complete() {
1216 ShenandoahHeap::heap()->free_set()->recycle_trash();
1217 }
1218
1219 bool ShenandoahConcurrentGC::check_cancellation_and_abort(ShenandoahDegenPoint point) {
1220 if (ShenandoahHeap::heap()->cancelled_gc()) {
1221 _degen_point = point;
1222 return true;
1223 }
1224 return false;
1225 }
1226
1227 const char* ShenandoahConcurrentGC::init_mark_event_message() const {
1228 ShenandoahHeap* const heap = ShenandoahHeap::heap();
1229 assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here");
1230 if (heap->unload_classes()) {
1231 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Init Mark", " (unload classes)");
1232 } else {
1233 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Init Mark", "");
1234 }
1235 }
1236
1237 const char* ShenandoahConcurrentGC::final_mark_event_message() const {
1238 ShenandoahHeap* const heap = ShenandoahHeap::heap();
1239 assert(!heap->has_forwarded_objects() || heap->is_concurrent_old_mark_in_progress(),
1240 "Should not have forwarded objects during final mark, unless old gen concurrent mark is running");
1241
1242 if (heap->unload_classes()) {
1243 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Final Mark", " (unload classes)");
1244 } else {
1245 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Final Mark", "");
1246 }
1247 }
1248
1249 const char* ShenandoahConcurrentGC::conc_mark_event_message() const {
1250 ShenandoahHeap* const heap = ShenandoahHeap::heap();
1251 assert(!heap->has_forwarded_objects() || heap->is_concurrent_old_mark_in_progress(),
1252 "Should not have forwarded objects concurrent mark, unless old gen concurrent mark is running");
1253 if (heap->unload_classes()) {
1254 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent marking", " (unload classes)");
1255 } else {
1256 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent marking", "");
1257 }
1258 }
|