1 /*
2 * Copyright (c) 2021, 2022, Red Hat, Inc. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26
27 #include "gc/shared/barrierSetNMethod.hpp"
28 #include "gc/shared/collectorCounters.hpp"
29 #include "gc/shared/continuationGCSupport.inline.hpp"
30 #include "gc/shenandoah/shenandoahBreakpoint.hpp"
31 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
32 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
33 #include "gc/shenandoah/shenandoahFreeSet.hpp"
34 #include "gc/shenandoah/shenandoahLock.hpp"
35 #include "gc/shenandoah/shenandoahMark.inline.hpp"
36 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
37 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
38 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
39 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
40 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
41 #include "gc/shenandoah/shenandoahStackWatermark.hpp"
42 #include "gc/shenandoah/shenandoahUtils.hpp"
43 #include "gc/shenandoah/shenandoahVerifier.hpp"
44 #include "gc/shenandoah/shenandoahVMOperations.hpp"
45 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
46 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
47 #include "memory/allocation.hpp"
48 #include "prims/jvmtiTagMap.hpp"
49 #include "runtime/vmThread.hpp"
50 #include "utilities/events.hpp"
51
52 // Breakpoint support
53 class ShenandoahBreakpointGCScope : public StackObj {
68 }
69 };
70
71 class ShenandoahBreakpointMarkScope : public StackObj {
72 private:
73 const GCCause::Cause _cause;
74 public:
75 ShenandoahBreakpointMarkScope(GCCause::Cause cause) : _cause(cause) {
76 if (_cause == GCCause::_wb_breakpoint) {
77 ShenandoahBreakpoint::at_after_marking_started();
78 }
79 }
80
81 ~ShenandoahBreakpointMarkScope() {
82 if (_cause == GCCause::_wb_breakpoint) {
83 ShenandoahBreakpoint::at_before_marking_completed();
84 }
85 }
86 };
87
88 ShenandoahConcurrentGC::ShenandoahConcurrentGC() :
89 _mark(),
90 _degen_point(ShenandoahDegenPoint::_degenerated_unset) {
91 }
92
93 ShenandoahGC::ShenandoahDegenPoint ShenandoahConcurrentGC::degen_point() const {
94 return _degen_point;
95 }
96
97 void ShenandoahConcurrentGC::cancel() {
98 ShenandoahConcurrentMark::cancel();
99 }
100
101 bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) {
102 ShenandoahHeap* const heap = ShenandoahHeap::heap();
103 ShenandoahBreakpointGCScope breakpoint_gc_scope(cause);
104
105 // Reset for upcoming marking
106 entry_reset();
107
108 // Start initial mark under STW
109 vmop_entry_init_mark();
110
111 {
112 ShenandoahBreakpointMarkScope breakpoint_mark_scope(cause);
113 // Concurrent mark roots
114 entry_mark_roots();
115 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_outside_cycle)) return false;
116
117 // Continue concurrent mark
118 entry_mark();
119 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_mark)) return false;
120 }
121
122 // Complete marking under STW, and start evacuation
123 vmop_entry_final_mark();
124
125 // Concurrent stack processing
126 if (heap->is_evacuation_in_progress()) {
127 entry_thread_roots();
128 }
129
130 // Process weak roots that might still point to regions that would be broken by cleanup
131 if (heap->is_concurrent_weak_root_in_progress()) {
132 entry_weak_refs();
133 entry_weak_roots();
134 }
135
136 // Final mark might have reclaimed some immediate garbage, kick cleanup to reclaim
137 // the space. This would be the last action if there is nothing to evacuate.
138 entry_cleanup_early();
139
140 {
141 ShenandoahHeapLocker locker(heap->lock());
142 heap->free_set()->log_status();
143 }
144
145 // Perform concurrent class unloading
146 if (heap->unload_classes() &&
147 heap->is_concurrent_weak_root_in_progress()) {
148 entry_class_unloading();
149 }
150
151 // Processing strong roots
152 // This may be skipped if there is nothing to update/evacuate.
153 // If so, strong_root_in_progress would be unset.
154 if (heap->is_concurrent_strong_root_in_progress()) {
155 entry_strong_roots();
156 }
157
158 // Continue the cycle with evacuation and optional update-refs.
159 // This may be skipped if there is nothing to evacuate.
160 // If so, evac_in_progress would be unset by collection set preparation code.
161 if (heap->is_evacuation_in_progress()) {
162 // Concurrently evacuate
163 entry_evacuate();
164 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_evac)) return false;
165
166 // Perform update-refs phase.
167 vmop_entry_init_updaterefs();
168 entry_updaterefs();
169 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_updaterefs)) return false;
170
171 // Concurrent update thread roots
172 entry_update_thread_roots();
173 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_updaterefs)) return false;
174
175 vmop_entry_final_updaterefs();
176
177 // Update references freed up collection set, kick the cleanup to reclaim the space.
178 entry_cleanup_complete();
179 } else {
180 vmop_entry_final_roots();
181 }
182
183 return true;
184 }
185
186 void ShenandoahConcurrentGC::vmop_entry_init_mark() {
187 ShenandoahHeap* const heap = ShenandoahHeap::heap();
188 TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
189 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_mark_gross);
190
191 heap->try_inject_alloc_failure();
192 VM_ShenandoahInitMark op(this);
193 VMThread::execute(&op); // jump to entry_init_mark() under safepoint
194 }
195
196 void ShenandoahConcurrentGC::vmop_entry_final_mark() {
197 ShenandoahHeap* const heap = ShenandoahHeap::heap();
198 TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
199 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_mark_gross);
200
201 heap->try_inject_alloc_failure();
202 VM_ShenandoahFinalMarkStartEvac op(this);
272 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_update_refs);
273 EventMark em("%s", msg);
274
275 ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
276 ShenandoahWorkerPolicy::calc_workers_for_final_update_ref(),
277 "final reference update");
278
279 op_final_updaterefs();
280 }
281
282 void ShenandoahConcurrentGC::entry_final_roots() {
283 static const char* msg = "Pause Final Roots";
284 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_roots);
285 EventMark em("%s", msg);
286
287 op_final_roots();
288 }
289
290 void ShenandoahConcurrentGC::entry_reset() {
291 ShenandoahHeap* const heap = ShenandoahHeap::heap();
292 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
293 static const char* msg = "Concurrent reset";
294 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_reset);
295 EventMark em("%s", msg);
296
297 ShenandoahWorkerScope scope(heap->workers(),
298 ShenandoahWorkerPolicy::calc_workers_for_conc_reset(),
299 "concurrent reset");
300
301 heap->try_inject_alloc_failure();
302 op_reset();
303 }
304
305 void ShenandoahConcurrentGC::entry_mark_roots() {
306 ShenandoahHeap* const heap = ShenandoahHeap::heap();
307 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
308 const char* msg = "Concurrent marking roots";
309 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_mark_roots);
310 EventMark em("%s", msg);
311
312 ShenandoahWorkerScope scope(heap->workers(),
313 ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
314 "concurrent marking roots");
315
316 heap->try_inject_alloc_failure();
317 op_mark_roots();
318 }
319
320 void ShenandoahConcurrentGC::entry_mark() {
321 ShenandoahHeap* const heap = ShenandoahHeap::heap();
322 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
463 op_updaterefs();
464 }
465
466 void ShenandoahConcurrentGC::entry_cleanup_complete() {
467 ShenandoahHeap* const heap = ShenandoahHeap::heap();
468 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
469 static const char* msg = "Concurrent cleanup";
470 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_cleanup_complete, true /* log_heap_usage */);
471 EventMark em("%s", msg);
472
473 // This phase does not use workers, no need for setup
474 heap->try_inject_alloc_failure();
475 op_cleanup_complete();
476 }
477
478 void ShenandoahConcurrentGC::op_reset() {
479 ShenandoahHeap* const heap = ShenandoahHeap::heap();
480 if (ShenandoahPacing) {
481 heap->pacer()->setup_for_reset();
482 }
483
484 heap->prepare_gc();
485 }
486
487 class ShenandoahInitMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
488 private:
489 ShenandoahMarkingContext* const _ctx;
490 public:
491 ShenandoahInitMarkUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
492
493 void heap_region_do(ShenandoahHeapRegion* r) {
494 assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index());
495 if (r->is_active()) {
496 // Check if region needs updating its TAMS. We have updated it already during concurrent
497 // reset, so it is very likely we don't need to do another write here.
498 if (_ctx->top_at_mark_start(r) != r->top()) {
499 _ctx->capture_top_at_mark_start(r);
500 }
501 } else {
502 assert(_ctx->top_at_mark_start(r) == r->top(),
503 "Region " SIZE_FORMAT " should already have correct TAMS", r->index());
504 }
505 }
506
507 bool is_thread_safe() { return true; }
508 };
509
510 void ShenandoahConcurrentGC::start_mark() {
511 _mark.start_mark();
512 }
513
514 void ShenandoahConcurrentGC::op_init_mark() {
515 ShenandoahHeap* const heap = ShenandoahHeap::heap();
516 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
517 assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
518
519 assert(heap->marking_context()->is_bitmap_clear(), "need clear marking bitmap");
520 assert(!heap->marking_context()->is_complete(), "should not be complete");
521 assert(!heap->has_forwarded_objects(), "No forwarded objects on this path");
522
523 if (ShenandoahVerify) {
524 heap->verifier()->verify_before_concmark();
525 }
526
527 if (VerifyBeforeGC) {
528 Universe::verify();
529 }
530
531 heap->set_concurrent_mark_in_progress(true);
532
533 start_mark();
534
535 {
536 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states);
537 ShenandoahInitMarkUpdateRegionStateClosure cl;
538 heap->parallel_heap_region_iterate(&cl);
539 }
540
541 // Weak reference processing
542 ShenandoahReferenceProcessor* rp = heap->ref_processor();
543 rp->reset_thread_locals();
544 rp->set_soft_reference_policy(heap->soft_ref_policy()->should_clear_all_soft_refs());
545
546 // Make above changes visible to worker threads
547 OrderAccess::fence();
548
549 // Arm nmethods for concurrent mark
550 ShenandoahCodeRoots::arm_nmethods_for_mark();
551
552 ShenandoahStackWatermark::change_epoch_id();
553 if (ShenandoahPacing) {
554 heap->pacer()->setup_for_mark();
555 }
556 }
557
558 void ShenandoahConcurrentGC::op_mark_roots() {
559 _mark.mark_concurrent_roots();
560 }
561
562 void ShenandoahConcurrentGC::op_mark() {
563 _mark.concurrent_mark();
564 }
565
566 void ShenandoahConcurrentGC::op_final_mark() {
567 ShenandoahHeap* const heap = ShenandoahHeap::heap();
568 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
569 assert(!heap->has_forwarded_objects(), "No forwarded objects on this path");
570
571 if (ShenandoahVerify) {
572 heap->verifier()->verify_roots_no_forwarded();
573 }
574
575 if (!heap->cancelled_gc()) {
576 _mark.finish_mark();
577 assert(!heap->cancelled_gc(), "STW mark cannot OOM");
578
579 // Notify JVMTI that the tagmap table will need cleaning.
580 JvmtiTagMap::set_needs_cleaning();
581
582 heap->prepare_regions_and_collection_set(true /*concurrent*/);
583
584 // Has to be done after cset selection
585 heap->prepare_concurrent_roots();
586
587 if (!heap->collection_set()->is_empty()) {
588 if (ShenandoahVerify) {
589 heap->verifier()->verify_before_evacuation();
590 }
591
592 heap->set_evacuation_in_progress(true);
593 // From here on, we need to update references.
594 heap->set_has_forwarded_objects(true);
595
596 // Verify before arming for concurrent processing.
597 // Otherwise, verification can trigger stack processing.
598 if (ShenandoahVerify) {
599 heap->verifier()->verify_during_evacuation();
600 }
601
602 // Arm nmethods/stack for concurrent processing
603 ShenandoahCodeRoots::arm_nmethods_for_evac();
604 ShenandoahStackWatermark::change_epoch_id();
605
606 if (ShenandoahPacing) {
607 heap->pacer()->setup_for_evac();
608 }
609 } else {
610 if (ShenandoahVerify) {
611 heap->verifier()->verify_after_concmark();
612 }
613
614 if (VerifyAfterGC) {
615 Universe::verify();
616 }
617 }
618 }
619 }
620
621 class ShenandoahConcurrentEvacThreadClosure : public ThreadClosure {
622 private:
623 OopClosure* const _oops;
624
625 public:
626 ShenandoahConcurrentEvacThreadClosure(OopClosure* oops);
627 void do_thread(Thread* thread);
628 };
629
630 ShenandoahConcurrentEvacThreadClosure::ShenandoahConcurrentEvacThreadClosure(OopClosure* oops) :
631 _oops(oops) {
632 }
633
634 void ShenandoahConcurrentEvacThreadClosure::do_thread(Thread* thread) {
635 JavaThread* const jt = JavaThread::cast(thread);
636 StackWatermarkSet::finish_processing(jt, _oops, StackWatermarkKind::gc);
637 }
638
639 class ShenandoahConcurrentEvacUpdateThreadTask : public WorkerTask {
640 private:
641 ShenandoahJavaThreadsIterator _java_threads;
642
643 public:
644 ShenandoahConcurrentEvacUpdateThreadTask(uint n_workers) :
645 WorkerTask("Shenandoah Evacuate/Update Concurrent Thread Roots"),
646 _java_threads(ShenandoahPhaseTimings::conc_thread_roots, n_workers) {
647 }
648
649 void work(uint worker_id) {
650 // ShenandoahEvacOOMScope has to be setup by ShenandoahContextEvacuateUpdateRootsClosure.
651 // Otherwise, may deadlock with watermark lock
652 ShenandoahContextEvacuateUpdateRootsClosure oops_cl;
653 ShenandoahConcurrentEvacThreadClosure thr_cl(&oops_cl);
654 _java_threads.threads_do(&thr_cl, worker_id);
655 }
656 };
657
658 void ShenandoahConcurrentGC::op_thread_roots() {
659 ShenandoahHeap* const heap = ShenandoahHeap::heap();
660 assert(heap->is_evacuation_in_progress(), "Checked by caller");
661 ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_thread_roots);
662 ShenandoahConcurrentEvacUpdateThreadTask task(heap->workers()->active_workers());
663 heap->workers()->run_task(&task);
664 }
665
666 void ShenandoahConcurrentGC::op_weak_refs() {
667 ShenandoahHeap* const heap = ShenandoahHeap::heap();
668 assert(heap->is_concurrent_weak_root_in_progress(), "Only during this phase");
669 // Concurrent weak refs processing
670 ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_refs);
671 if (heap->gc_cause() == GCCause::_wb_breakpoint) {
672 ShenandoahBreakpoint::at_after_reference_processing_started();
673 }
674 heap->ref_processor()->process_references(ShenandoahPhaseTimings::conc_weak_refs, heap->workers(), true /* concurrent */);
675 }
676
677 class ShenandoahEvacUpdateCleanupOopStorageRootsClosure : public BasicOopIterateClosure {
678 private:
679 ShenandoahHeap* const _heap;
680 ShenandoahMarkingContext* const _mark_context;
681 bool _evac_in_progress;
682 Thread* const _thread;
683
684 public:
685 ShenandoahEvacUpdateCleanupOopStorageRootsClosure();
686 void do_oop(oop* p);
687 void do_oop(narrowOop* p);
688 };
689
690 ShenandoahEvacUpdateCleanupOopStorageRootsClosure::ShenandoahEvacUpdateCleanupOopStorageRootsClosure() :
691 _heap(ShenandoahHeap::heap()),
692 _mark_context(ShenandoahHeap::heap()->marking_context()),
693 _evac_in_progress(ShenandoahHeap::heap()->is_evacuation_in_progress()),
694 _thread(Thread::current()) {
695 }
696
697 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(oop* p) {
698 const oop obj = RawAccess<>::oop_load(p);
699 if (!CompressedOops::is_null(obj)) {
700 if (!_mark_context->is_marked(obj)) {
701 shenandoah_assert_correct(p, obj);
702 ShenandoahHeap::atomic_clear_oop(p, obj);
703 } else if (_evac_in_progress && _heap->in_collection_set(obj)) {
704 oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
705 if (resolved == obj) {
706 resolved = _heap->evacuate_object(obj, _thread);
707 }
708 ShenandoahHeap::atomic_update_oop(resolved, p, obj);
709 assert(_heap->cancelled_gc() ||
710 _mark_context->is_marked(resolved) && !_heap->in_collection_set(resolved),
711 "Sanity");
712 }
713 }
714 }
715
716 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(narrowOop* p) {
717 ShouldNotReachHere();
718 }
719
720 class ShenandoahIsCLDAliveClosure : public CLDClosure {
721 public:
722 void do_cld(ClassLoaderData* cld) {
723 cld->is_alive();
724 }
725 };
726
727 class ShenandoahIsNMethodAliveClosure: public NMethodClosure {
728 public:
729 void do_nmethod(nmethod* n) {
730 n->is_unloading();
731 }
908 assert(heap->is_concurrent_strong_root_in_progress(), "Checked by caller");
909 ShenandoahConcurrentRootsEvacUpdateTask task(ShenandoahPhaseTimings::conc_strong_roots);
910 heap->workers()->run_task(&task);
911 heap->set_concurrent_strong_root_in_progress(false);
912 }
913
914 void ShenandoahConcurrentGC::op_cleanup_early() {
915 ShenandoahHeap::heap()->free_set()->recycle_trash();
916 }
917
918 void ShenandoahConcurrentGC::op_evacuate() {
919 ShenandoahHeap::heap()->evacuate_collection_set(true /*concurrent*/);
920 }
921
922 void ShenandoahConcurrentGC::op_init_updaterefs() {
923 ShenandoahHeap* const heap = ShenandoahHeap::heap();
924 heap->set_evacuation_in_progress(false);
925 heap->set_concurrent_weak_root_in_progress(false);
926 heap->prepare_update_heap_references(true /*concurrent*/);
927 heap->set_update_refs_in_progress(true);
928
929 if (ShenandoahPacing) {
930 heap->pacer()->setup_for_updaterefs();
931 }
932 }
933
934 void ShenandoahConcurrentGC::op_updaterefs() {
935 ShenandoahHeap::heap()->update_heap_references(true /*concurrent*/);
936 }
937
938 class ShenandoahUpdateThreadClosure : public HandshakeClosure {
939 private:
940 ShenandoahUpdateRefsClosure _cl;
941 public:
942 ShenandoahUpdateThreadClosure();
943 void do_thread(Thread* thread);
944 };
945
946 ShenandoahUpdateThreadClosure::ShenandoahUpdateThreadClosure() :
947 HandshakeClosure("Shenandoah Update Thread Roots") {
948 }
953 ResourceMark rm;
954 jt->oops_do(&_cl, nullptr);
955 }
956 }
957
958 void ShenandoahConcurrentGC::op_update_thread_roots() {
959 ShenandoahUpdateThreadClosure cl;
960 Handshake::execute(&cl);
961 }
962
963 void ShenandoahConcurrentGC::op_final_updaterefs() {
964 ShenandoahHeap* const heap = ShenandoahHeap::heap();
965 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
966 assert(!heap->_update_refs_iterator.has_next(), "Should have finished update references");
967
968 heap->finish_concurrent_roots();
969
970 // Clear cancelled GC, if set. On cancellation path, the block before would handle
971 // everything.
972 if (heap->cancelled_gc()) {
973 heap->clear_cancelled_gc();
974 }
975
976 // Has to be done before cset is clear
977 if (ShenandoahVerify) {
978 heap->verifier()->verify_roots_in_to_space();
979 }
980
981 heap->update_heap_region_states(true /*concurrent*/);
982
983 heap->set_update_refs_in_progress(false);
984 heap->set_has_forwarded_objects(false);
985
986 if (ShenandoahVerify) {
987 heap->verifier()->verify_after_updaterefs();
988 }
989
990 if (VerifyAfterGC) {
991 Universe::verify();
992 }
993
994 heap->rebuild_free_set(true /*concurrent*/);
995 }
996
997 void ShenandoahConcurrentGC::op_final_roots() {
998 ShenandoahHeap::heap()->set_concurrent_weak_root_in_progress(false);
999 }
1000
1001 void ShenandoahConcurrentGC::op_cleanup_complete() {
1002 ShenandoahHeap::heap()->free_set()->recycle_trash();
1003 }
1004
1005 bool ShenandoahConcurrentGC::check_cancellation_and_abort(ShenandoahDegenPoint point) {
1006 if (ShenandoahHeap::heap()->cancelled_gc()) {
1007 _degen_point = point;
1008 return true;
1009 }
1010 return false;
1011 }
1012
1013 const char* ShenandoahConcurrentGC::init_mark_event_message() const {
1014 ShenandoahHeap* const heap = ShenandoahHeap::heap();
1015 assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here");
1016 if (heap->unload_classes()) {
1017 return "Pause Init Mark (unload classes)";
1018 } else {
1019 return "Pause Init Mark";
1020 }
1021 }
1022
1023 const char* ShenandoahConcurrentGC::final_mark_event_message() const {
1024 ShenandoahHeap* const heap = ShenandoahHeap::heap();
1025 assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here");
1026 if (heap->unload_classes()) {
1027 return "Pause Final Mark (unload classes)";
1028 } else {
1029 return "Pause Final Mark";
1030 }
1031 }
1032
1033 const char* ShenandoahConcurrentGC::conc_mark_event_message() const {
1034 ShenandoahHeap* const heap = ShenandoahHeap::heap();
1035 assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here");
1036 if (heap->unload_classes()) {
1037 return "Concurrent marking (unload classes)";
1038 } else {
1039 return "Concurrent marking";
1040 }
1041 }
|
1 /*
2 * Copyright (c) 2021, 2022, Red Hat, Inc. All rights reserved.
3 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27
28 #include "gc/shared/barrierSetNMethod.hpp"
29 #include "gc/shared/collectorCounters.hpp"
30 #include "gc/shared/continuationGCSupport.inline.hpp"
31 #include "gc/shenandoah/shenandoahBreakpoint.hpp"
32 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
33 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
34 #include "gc/shenandoah/shenandoahFreeSet.hpp"
35 #include "gc/shenandoah/shenandoahGeneration.hpp"
36 #include "gc/shenandoah/shenandoahGenerationalHeap.hpp"
37 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
38 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
39 #include "gc/shenandoah/shenandoahLock.hpp"
40 #include "gc/shenandoah/shenandoahMark.inline.hpp"
41 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
42 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
43 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
44 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
45 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
46 #include "gc/shenandoah/shenandoahStackWatermark.hpp"
47 #include "gc/shenandoah/shenandoahUtils.hpp"
48 #include "gc/shenandoah/shenandoahVerifier.hpp"
49 #include "gc/shenandoah/shenandoahVMOperations.hpp"
50 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
51 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
52 #include "memory/allocation.hpp"
53 #include "prims/jvmtiTagMap.hpp"
54 #include "runtime/vmThread.hpp"
55 #include "utilities/events.hpp"
56
57 // Breakpoint support
58 class ShenandoahBreakpointGCScope : public StackObj {
73 }
74 };
75
76 class ShenandoahBreakpointMarkScope : public StackObj {
77 private:
78 const GCCause::Cause _cause;
79 public:
80 ShenandoahBreakpointMarkScope(GCCause::Cause cause) : _cause(cause) {
81 if (_cause == GCCause::_wb_breakpoint) {
82 ShenandoahBreakpoint::at_after_marking_started();
83 }
84 }
85
86 ~ShenandoahBreakpointMarkScope() {
87 if (_cause == GCCause::_wb_breakpoint) {
88 ShenandoahBreakpoint::at_before_marking_completed();
89 }
90 }
91 };
92
93 ShenandoahConcurrentGC::ShenandoahConcurrentGC(ShenandoahGeneration* generation, bool do_old_gc_bootstrap) :
94 _mark(generation),
95 _degen_point(ShenandoahDegenPoint::_degenerated_unset),
96 _abbreviated(false),
97 _do_old_gc_bootstrap(do_old_gc_bootstrap),
98 _generation(generation) {
99 }
100
101 ShenandoahGC::ShenandoahDegenPoint ShenandoahConcurrentGC::degen_point() const {
102 return _degen_point;
103 }
104
105 bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) {
106 ShenandoahHeap* const heap = ShenandoahHeap::heap();
107
108 ShenandoahBreakpointGCScope breakpoint_gc_scope(cause);
109
110 // Reset for upcoming marking
111 entry_reset();
112
113 // Start initial mark under STW
114 vmop_entry_init_mark();
115
116 {
117 ShenandoahBreakpointMarkScope breakpoint_mark_scope(cause);
118
119 // Reset task queue stats here, rather than in mark_concurrent_roots,
120 // because remembered set scan will `push` oops into the queues and
121 // resetting after this happens will lose those counts.
122 TASKQUEUE_STATS_ONLY(_mark.task_queues()->reset_taskqueue_stats());
123
124 // Concurrent remembered set scanning
125 entry_scan_remembered_set();
126 // TODO: When RS scanning yields, we will need a check_cancellation_and_abort() degeneration point here.
127
128 // Concurrent mark roots
129 entry_mark_roots();
130 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_roots)) {
131 return false;
132 }
133
134 // Continue concurrent mark
135 entry_mark();
136 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_mark)) {
137 return false;
138 }
139 }
140
141 // Complete marking under STW, and start evacuation
142 vmop_entry_final_mark();
143
144 // If GC was cancelled before final mark, then the safepoint operation will do nothing
145 // and the concurrent mark will still be in progress. In this case it is safe to resume
146 // the degenerated cycle from the marking phase. On the other hand, if the GC is cancelled
147 // after final mark (but before this check), then the final mark safepoint operation
148 // will have finished the mark (setting concurrent mark in progress to false). Final mark
149 // will also have setup state (in concurrent stack processing) that will not be safe to
150 // resume from the marking phase in the degenerated cycle. That is, if the cancellation
151 // occurred after final mark, we must resume the degenerated cycle after the marking phase.
152 if (_generation->is_concurrent_mark_in_progress() && check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_mark)) {
153 assert(!heap->is_concurrent_weak_root_in_progress(), "Weak roots should not be in progress when concurrent mark is in progress");
154 return false;
155 }
156
157 // Concurrent stack processing
158 if (heap->is_evacuation_in_progress()) {
159 entry_thread_roots();
160 }
161
162 // Process weak roots that might still point to regions that would be broken by cleanup
163 if (heap->is_concurrent_weak_root_in_progress()) {
164 entry_weak_refs();
165 entry_weak_roots();
166 }
167
168 // Final mark might have reclaimed some immediate garbage, kick cleanup to reclaim
169 // the space. This would be the last action if there is nothing to evacuate. Note that
170 // we will not age young-gen objects in the case that we skip evacuation.
171 entry_cleanup_early();
172
173 {
174 // TODO: Not sure there is value in logging free-set status right here. Note that whenever the free set is rebuilt,
175 // it logs the newly rebuilt status.
176 ShenandoahHeapLocker locker(heap->lock());
177 heap->free_set()->log_status();
178 }
179
180 // Perform concurrent class unloading
181 if (heap->unload_classes() &&
182 heap->is_concurrent_weak_root_in_progress()) {
183 entry_class_unloading();
184 }
185
186 // Processing strong roots
187 // This may be skipped if there is nothing to update/evacuate.
188 // If so, strong_root_in_progress would be unset.
189 if (heap->is_concurrent_strong_root_in_progress()) {
190 entry_strong_roots();
191 }
192
193 // Continue the cycle with evacuation and optional update-refs.
194 // This may be skipped if there is nothing to evacuate.
195 // If so, evac_in_progress would be unset by collection set preparation code.
196 if (heap->is_evacuation_in_progress()) {
197 // Concurrently evacuate
198 entry_evacuate();
199 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_evac)) {
200 return false;
201 }
202 }
203
204 if (heap->has_forwarded_objects()) {
205 // Perform update-refs phase.
206 vmop_entry_init_updaterefs();
207 entry_updaterefs();
208 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_updaterefs)) {
209 return false;
210 }
211
212 // Concurrent update thread roots
213 entry_update_thread_roots();
214 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_updaterefs)) {
215 return false;
216 }
217
218 vmop_entry_final_updaterefs();
219
220 // Update references freed up collection set, kick the cleanup to reclaim the space.
221 entry_cleanup_complete();
222 } else {
223 // We chose not to evacuate because we found sufficient immediate garbage. Note that we
224 // do not check for cancellation here because, at this point, the cycle is effectively
225 // complete. If the cycle has been cancelled here, the control thread will detect it
226 // on its next iteration and run a degenerated young cycle.
227 vmop_entry_final_roots();
228 _abbreviated = true;
229 }
230
231 // We defer generation resizing actions until after cset regions have been recycled. We do this even following an
232 // abbreviated cycle.
233 if (heap->mode()->is_generational()) {
234 if (!heap->old_generation()->is_parseable()) {
235 // Class unloading may render the card offsets unusable, so we must rebuild them before
236 // the next remembered set scan. We _could_ let the control thread do this sometime after
237 // the global cycle has completed and before the next young collection, but under memory
238 // pressure the control thread may not have the time (that is, because it's running back
239 // to back GCs). In that scenario, we would have to make the old regions parsable before
240 // we could start a young collection. This could delay the start of the young cycle and
241 // throw off the heuristics.
242 entry_global_coalesce_and_fill();
243 }
244
245 ShenandoahGenerationalHeap::TransferResult result;
246 {
247 ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::heap();
248 ShenandoahHeapLocker locker(gen_heap->lock());
249
250 result = gen_heap->balance_generations();
251 gen_heap->reset_generation_reserves();
252 }
253
254 LogTarget(Info, gc, ergo) lt;
255 if (lt.is_enabled()) {
256 LogStream ls(lt);
257 result.print_on("Concurrent GC", &ls);
258 }
259 }
260 return true;
261 }
262
263 void ShenandoahConcurrentGC::vmop_entry_init_mark() {
264 ShenandoahHeap* const heap = ShenandoahHeap::heap();
265 TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
266 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_mark_gross);
267
268 heap->try_inject_alloc_failure();
269 VM_ShenandoahInitMark op(this);
270 VMThread::execute(&op); // jump to entry_init_mark() under safepoint
271 }
272
273 void ShenandoahConcurrentGC::vmop_entry_final_mark() {
274 ShenandoahHeap* const heap = ShenandoahHeap::heap();
275 TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
276 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_mark_gross);
277
278 heap->try_inject_alloc_failure();
279 VM_ShenandoahFinalMarkStartEvac op(this);
349 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_update_refs);
350 EventMark em("%s", msg);
351
352 ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
353 ShenandoahWorkerPolicy::calc_workers_for_final_update_ref(),
354 "final reference update");
355
356 op_final_updaterefs();
357 }
358
359 void ShenandoahConcurrentGC::entry_final_roots() {
360 static const char* msg = "Pause Final Roots";
361 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_roots);
362 EventMark em("%s", msg);
363
364 op_final_roots();
365 }
366
367 void ShenandoahConcurrentGC::entry_reset() {
368 ShenandoahHeap* const heap = ShenandoahHeap::heap();
369 heap->try_inject_alloc_failure();
370
371 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
372 {
373 static const char* msg = "Concurrent reset";
374 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_reset);
375 EventMark em("%s", msg);
376
377 ShenandoahWorkerScope scope(heap->workers(),
378 ShenandoahWorkerPolicy::calc_workers_for_conc_reset(),
379 msg);
380 op_reset();
381 }
382
383 if (_do_old_gc_bootstrap) {
384 static const char* msg = "Concurrent reset (OLD)";
385 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_reset_old);
386 ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
387 ShenandoahWorkerPolicy::calc_workers_for_conc_reset(),
388 msg);
389 EventMark em("%s", msg);
390
391 heap->old_generation()->prepare_gc();
392 }
393 }
394
395 void ShenandoahConcurrentGC::entry_scan_remembered_set() {
396 if (_generation->is_young()) {
397 ShenandoahHeap* const heap = ShenandoahHeap::heap();
398 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
399 const char* msg = "Concurrent remembered set scanning";
400 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::init_scan_rset);
401 EventMark em("%s", msg);
402
403 ShenandoahWorkerScope scope(heap->workers(),
404 ShenandoahWorkerPolicy::calc_workers_for_rs_scanning(),
405 msg);
406
407 heap->try_inject_alloc_failure();
408 _generation->scan_remembered_set(true /* is_concurrent */);
409 }
410 }
411
412 void ShenandoahConcurrentGC::entry_mark_roots() {
413 ShenandoahHeap* const heap = ShenandoahHeap::heap();
414 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
415 const char* msg = "Concurrent marking roots";
416 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_mark_roots);
417 EventMark em("%s", msg);
418
419 ShenandoahWorkerScope scope(heap->workers(),
420 ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
421 "concurrent marking roots");
422
423 heap->try_inject_alloc_failure();
424 op_mark_roots();
425 }
426
427 void ShenandoahConcurrentGC::entry_mark() {
428 ShenandoahHeap* const heap = ShenandoahHeap::heap();
429 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
570 op_updaterefs();
571 }
572
573 void ShenandoahConcurrentGC::entry_cleanup_complete() {
574 ShenandoahHeap* const heap = ShenandoahHeap::heap();
575 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
576 static const char* msg = "Concurrent cleanup";
577 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_cleanup_complete, true /* log_heap_usage */);
578 EventMark em("%s", msg);
579
580 // This phase does not use workers, no need for setup
581 heap->try_inject_alloc_failure();
582 op_cleanup_complete();
583 }
584
585 void ShenandoahConcurrentGC::op_reset() {
586 ShenandoahHeap* const heap = ShenandoahHeap::heap();
587 if (ShenandoahPacing) {
588 heap->pacer()->setup_for_reset();
589 }
590 _generation->prepare_gc();
591 }
592
593 class ShenandoahInitMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
594 private:
595 ShenandoahMarkingContext* const _ctx;
596 public:
597 ShenandoahInitMarkUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
598
599 void heap_region_do(ShenandoahHeapRegion* r) {
600 assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index());
601 if (r->is_active()) {
602 // Check if region needs updating its TAMS. We have updated it already during concurrent
603 // reset, so it is very likely we don't need to do another write here. Since most regions
604 // are not "active", this path is relatively rare.
605 if (_ctx->top_at_mark_start(r) != r->top()) {
606 _ctx->capture_top_at_mark_start(r);
607 }
608 } else {
609 assert(_ctx->top_at_mark_start(r) == r->top(),
610 "Region " SIZE_FORMAT " should already have correct TAMS", r->index());
611 }
612 }
613
614 bool is_thread_safe() { return true; }
615 };
616
617 void ShenandoahConcurrentGC::start_mark() {
618 _mark.start_mark();
619 }
620
621 void ShenandoahConcurrentGC::op_init_mark() {
622 ShenandoahHeap* const heap = ShenandoahHeap::heap();
623 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
624 assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
625
626 assert(_generation->is_bitmap_clear(), "need clear marking bitmap");
627 assert(!_generation->is_mark_complete(), "should not be complete");
628 assert(!heap->has_forwarded_objects(), "No forwarded objects on this path");
629
630
631 if (heap->mode()->is_generational()) {
632 if (_generation->is_young() || (_generation->is_global() && ShenandoahVerify)) {
633 // The current implementation of swap_remembered_set() copies the write-card-table
634 // to the read-card-table. The remembered sets are also swapped for GLOBAL collections
635 // so that the verifier works with the correct copy of the card table when verifying.
636 // TODO: This path should not really depend on ShenandoahVerify.
637 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_swap_rset);
638 _generation->swap_remembered_set();
639 }
640
641 if (_generation->is_global()) {
642 heap->cancel_old_gc();
643 } else if (heap->is_concurrent_old_mark_in_progress()) {
644 // Purge the SATB buffers, transferring any valid, old pointers to the
645 // old generation mark queue. Any pointers in a young region will be
646 // abandoned.
647 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_transfer_satb);
648 heap->transfer_old_pointers_from_satb();
649 }
650 }
651
652 if (ShenandoahVerify) {
653 heap->verifier()->verify_before_concmark();
654 }
655
656 if (VerifyBeforeGC) {
657 Universe::verify();
658 }
659
660 _generation->set_concurrent_mark_in_progress(true);
661
662 start_mark();
663
664 if (_do_old_gc_bootstrap) {
665 // Update region state for both young and old regions
666 // TODO: We should be able to pull this out of the safepoint for the bootstrap
667 // cycle. The top of an old region will only move when a GC cycle evacuates
668 // objects into it. When we start an old cycle, we know that nothing can touch
669 // the top of old regions.
670 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states);
671 ShenandoahInitMarkUpdateRegionStateClosure cl;
672 heap->parallel_heap_region_iterate(&cl);
673 heap->old_generation()->ref_processor()->reset_thread_locals();
674 } else {
675 // Update region state for only young regions
676 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states);
677 ShenandoahInitMarkUpdateRegionStateClosure cl;
678 _generation->parallel_heap_region_iterate(&cl);
679 }
680
681 // Weak reference processing
682 ShenandoahReferenceProcessor* rp = _generation->ref_processor();
683 rp->reset_thread_locals();
684 rp->set_soft_reference_policy(heap->soft_ref_policy()->should_clear_all_soft_refs());
685
686 // Make above changes visible to worker threads
687 OrderAccess::fence();
688
689 // Arm nmethods for concurrent mark
690 ShenandoahCodeRoots::arm_nmethods_for_mark();
691
692 ShenandoahStackWatermark::change_epoch_id();
693 if (ShenandoahPacing) {
694 heap->pacer()->setup_for_mark();
695 }
696 }
697
698 void ShenandoahConcurrentGC::op_mark_roots() {
699 _mark.mark_concurrent_roots();
700 }
701
702 void ShenandoahConcurrentGC::op_mark() {
703 _mark.concurrent_mark();
704 }
705
706 void ShenandoahConcurrentGC::op_final_mark() {
707 ShenandoahHeap* const heap = ShenandoahHeap::heap();
708 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
709 assert(!heap->has_forwarded_objects(), "No forwarded objects on this path");
710
711 if (ShenandoahVerify) {
712 heap->verifier()->verify_roots_no_forwarded();
713 }
714
715 if (!heap->cancelled_gc()) {
716 _mark.finish_mark();
717 assert(!heap->cancelled_gc(), "STW mark cannot OOM");
718
719 // Notify JVMTI that the tagmap table will need cleaning.
720 JvmtiTagMap::set_needs_cleaning();
721
722 // The collection set is chosen by prepare_regions_and_collection_set().
723 //
724 // TODO: Under severe memory overload conditions that can be checked here, we may want to limit
725 // the inclusion of old-gen candidates within the collection set. This would allow us to prioritize efforts on
726 // evacuating young-gen, This remediation is most appropriate when old-gen availability is very high (so there
727 // are negligible negative impacts from delaying completion of old-gen evacuation) and when young-gen collections
728 // are "under duress" (as signalled by very low availability of memory within young-gen, indicating that/ young-gen
729 // collections are not triggering frequently enough).
730 _generation->prepare_regions_and_collection_set(true /*concurrent*/);
731
732 // Upon return from prepare_regions_and_collection_set(), certain parameters have been established to govern the
733 // evacuation efforts that are about to begin. In particular:
734 //
735 // heap->get_promoted_reserve() represents the amount of memory within old-gen's available memory that has
736 // been set aside to hold objects promoted from young-gen memory. This represents an estimated percentage
737 // of the live young-gen memory within the collection set. If there is more data ready to be promoted than
738 // can fit within this reserve, the promotion of some objects will be deferred until a subsequent evacuation
739 // pass.
740 //
741 // heap->get_old_evac_reserve() represents the amount of memory within old-gen's available memory that has been
742 // set aside to hold objects evacuated from the old-gen collection set.
743 //
744 // heap->get_young_evac_reserve() represents the amount of memory within young-gen's available memory that has
745 // been set aside to hold objects evacuated from the young-gen collection set. Conservatively, this value
746 // equals the entire amount of live young-gen memory within the collection set, even though some of this memory
747 // will likely be promoted.
748
749 // Has to be done after cset selection
750 heap->prepare_concurrent_roots();
751
752 if (heap->mode()->is_generational()) {
753 if (!heap->collection_set()->is_empty() || heap->old_generation()->has_in_place_promotions()) {
754 // Even if the collection set is empty, we need to do evacuation if there are regions to be promoted in place.
755 // Concurrent evacuation takes responsibility for registering objects and setting the remembered set cards to dirty.
756
757 LogTarget(Debug, gc, cset) lt;
758 if (lt.is_enabled()) {
759 ResourceMark rm;
760 LogStream ls(lt);
761 heap->collection_set()->print_on(&ls);
762 }
763
764 if (ShenandoahVerify) {
765 heap->verifier()->verify_before_evacuation();
766 }
767
768 heap->set_evacuation_in_progress(true);
769
770 // Verify before arming for concurrent processing.
771 // Otherwise, verification can trigger stack processing.
772 if (ShenandoahVerify) {
773 heap->verifier()->verify_during_evacuation();
774 }
775
776 // Generational mode may promote objects in place during the evacuation phase.
777 // If that is the only reason we are evacuating, we don't need to update references
778 // and there will be no forwarded objects on the heap.
779 heap->set_has_forwarded_objects(!heap->collection_set()->is_empty());
780
781 // Arm nmethods/stack for concurrent processing
782 if (!heap->collection_set()->is_empty()) {
783 // Iff objects will be evaluated, arm the nmethod barriers. These will be disarmed
784 // under the same condition (established in prepare_concurrent_roots) after strong
785 // root evacuation has completed (see op_strong_roots).
786 ShenandoahCodeRoots::arm_nmethods_for_evac();
787 ShenandoahStackWatermark::change_epoch_id();
788 }
789
790 if (ShenandoahPacing) {
791 heap->pacer()->setup_for_evac();
792 }
793 } else {
794 if (ShenandoahVerify) {
795 heap->verifier()->verify_after_concmark();
796 }
797
798 if (VerifyAfterGC) {
799 Universe::verify();
800 }
801 }
802 } else {
803 // Not is_generational()
804 if (!heap->collection_set()->is_empty()) {
805 LogTarget(Debug, gc, ergo) lt;
806 if (lt.is_enabled()) {
807 ResourceMark rm;
808 LogStream ls(lt);
809 heap->collection_set()->print_on(&ls);
810 }
811
812 if (ShenandoahVerify) {
813 heap->verifier()->verify_before_evacuation();
814 }
815
816 heap->set_evacuation_in_progress(true);
817
818 // Verify before arming for concurrent processing.
819 // Otherwise, verification can trigger stack processing.
820 if (ShenandoahVerify) {
821 heap->verifier()->verify_during_evacuation();
822 }
823
824 // From here on, we need to update references.
825 heap->set_has_forwarded_objects(true);
826
827 // Arm nmethods/stack for concurrent processing
828 ShenandoahCodeRoots::arm_nmethods_for_evac();
829 ShenandoahStackWatermark::change_epoch_id();
830
831 if (ShenandoahPacing) {
832 heap->pacer()->setup_for_evac();
833 }
834 } else {
835 if (ShenandoahVerify) {
836 heap->verifier()->verify_after_concmark();
837 }
838
839 if (VerifyAfterGC) {
840 Universe::verify();
841 }
842 }
843 }
844 }
845 }
846
847 class ShenandoahConcurrentEvacThreadClosure : public ThreadClosure {
848 private:
849 OopClosure* const _oops;
850
851 public:
852 ShenandoahConcurrentEvacThreadClosure(OopClosure* oops);
853 void do_thread(Thread* thread);
854 };
855
856 ShenandoahConcurrentEvacThreadClosure::ShenandoahConcurrentEvacThreadClosure(OopClosure* oops) :
857 _oops(oops) {
858 }
859
860 void ShenandoahConcurrentEvacThreadClosure::do_thread(Thread* thread) {
861 JavaThread* const jt = JavaThread::cast(thread);
862 StackWatermarkSet::finish_processing(jt, _oops, StackWatermarkKind::gc);
863 ShenandoahThreadLocalData::enable_plab_promotions(thread);
864 }
865
866 class ShenandoahConcurrentEvacUpdateThreadTask : public WorkerTask {
867 private:
868 ShenandoahJavaThreadsIterator _java_threads;
869
870 public:
871 ShenandoahConcurrentEvacUpdateThreadTask(uint n_workers) :
872 WorkerTask("Shenandoah Evacuate/Update Concurrent Thread Roots"),
873 _java_threads(ShenandoahPhaseTimings::conc_thread_roots, n_workers) {
874 }
875
876 void work(uint worker_id) {
877 Thread* worker_thread = Thread::current();
878 ShenandoahThreadLocalData::enable_plab_promotions(worker_thread);
879
880 // ShenandoahEvacOOMScope has to be setup by ShenandoahContextEvacuateUpdateRootsClosure.
881 // Otherwise, may deadlock with watermark lock
882 ShenandoahContextEvacuateUpdateRootsClosure oops_cl;
883 ShenandoahConcurrentEvacThreadClosure thr_cl(&oops_cl);
884 _java_threads.threads_do(&thr_cl, worker_id);
885 }
886 };
887
888 void ShenandoahConcurrentGC::op_thread_roots() {
889 ShenandoahHeap* const heap = ShenandoahHeap::heap();
890 assert(heap->is_evacuation_in_progress(), "Checked by caller");
891 ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_thread_roots);
892 ShenandoahConcurrentEvacUpdateThreadTask task(heap->workers()->active_workers());
893 heap->workers()->run_task(&task);
894 }
895
896 void ShenandoahConcurrentGC::op_weak_refs() {
897 ShenandoahHeap* const heap = ShenandoahHeap::heap();
898 assert(heap->is_concurrent_weak_root_in_progress(), "Only during this phase");
899 // Concurrent weak refs processing
900 ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_refs);
901 if (heap->gc_cause() == GCCause::_wb_breakpoint) {
902 ShenandoahBreakpoint::at_after_reference_processing_started();
903 }
904 _generation->ref_processor()->process_references(ShenandoahPhaseTimings::conc_weak_refs, heap->workers(), true /* concurrent */);
905 }
906
907 class ShenandoahEvacUpdateCleanupOopStorageRootsClosure : public BasicOopIterateClosure {
908 private:
909 ShenandoahHeap* const _heap;
910 ShenandoahMarkingContext* const _mark_context;
911 bool _evac_in_progress;
912 Thread* const _thread;
913
914 public:
915 ShenandoahEvacUpdateCleanupOopStorageRootsClosure();
916 void do_oop(oop* p);
917 void do_oop(narrowOop* p);
918 };
919
920 ShenandoahEvacUpdateCleanupOopStorageRootsClosure::ShenandoahEvacUpdateCleanupOopStorageRootsClosure() :
921 _heap(ShenandoahHeap::heap()),
922 _mark_context(ShenandoahHeap::heap()->marking_context()),
923 _evac_in_progress(ShenandoahHeap::heap()->is_evacuation_in_progress()),
924 _thread(Thread::current()) {
925 }
926
927 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(oop* p) {
928 const oop obj = RawAccess<>::oop_load(p);
929 if (!CompressedOops::is_null(obj)) {
930 if (!_mark_context->is_marked(obj)) {
931 if (_heap->is_in_active_generation(obj)) {
932 // TODO: This worries me. Here we are asserting that an unmarked from-space object is 'correct'.
933 // Normally, I would call this a bogus assert, but there seems to be a legitimate use-case for
934 // accessing from-space objects during class unloading. However, the from-space object may have
935 // been "filled". We've made no effort to prevent old generation classes being unloaded by young
936 // gen (and vice-versa).
937 shenandoah_assert_correct(p, obj);
938 ShenandoahHeap::atomic_clear_oop(p, obj);
939 }
940 } else if (_evac_in_progress && _heap->in_collection_set(obj)) {
941 oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
942 if (resolved == obj) {
943 resolved = _heap->evacuate_object(obj, _thread);
944 }
945 shenandoah_assert_not_in_cset_except(p, resolved, _heap->cancelled_gc());
946 ShenandoahHeap::atomic_update_oop(resolved, p, obj);
947 }
948 }
949 }
950
951 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(narrowOop* p) {
952 ShouldNotReachHere();
953 }
954
955 class ShenandoahIsCLDAliveClosure : public CLDClosure {
956 public:
957 void do_cld(ClassLoaderData* cld) {
958 cld->is_alive();
959 }
960 };
961
962 class ShenandoahIsNMethodAliveClosure: public NMethodClosure {
963 public:
964 void do_nmethod(nmethod* n) {
965 n->is_unloading();
966 }
1143 assert(heap->is_concurrent_strong_root_in_progress(), "Checked by caller");
1144 ShenandoahConcurrentRootsEvacUpdateTask task(ShenandoahPhaseTimings::conc_strong_roots);
1145 heap->workers()->run_task(&task);
1146 heap->set_concurrent_strong_root_in_progress(false);
1147 }
1148
1149 void ShenandoahConcurrentGC::op_cleanup_early() {
1150 ShenandoahHeap::heap()->free_set()->recycle_trash();
1151 }
1152
1153 void ShenandoahConcurrentGC::op_evacuate() {
1154 ShenandoahHeap::heap()->evacuate_collection_set(true /*concurrent*/);
1155 }
1156
1157 void ShenandoahConcurrentGC::op_init_updaterefs() {
1158 ShenandoahHeap* const heap = ShenandoahHeap::heap();
1159 heap->set_evacuation_in_progress(false);
1160 heap->set_concurrent_weak_root_in_progress(false);
1161 heap->prepare_update_heap_references(true /*concurrent*/);
1162 heap->set_update_refs_in_progress(true);
1163 if (ShenandoahVerify) {
1164 heap->verifier()->verify_before_updaterefs();
1165 }
1166 if (ShenandoahPacing) {
1167 heap->pacer()->setup_for_updaterefs();
1168 }
1169 }
1170
1171 void ShenandoahConcurrentGC::op_updaterefs() {
1172 ShenandoahHeap::heap()->update_heap_references(true /*concurrent*/);
1173 }
1174
1175 class ShenandoahUpdateThreadClosure : public HandshakeClosure {
1176 private:
1177 ShenandoahUpdateRefsClosure _cl;
1178 public:
1179 ShenandoahUpdateThreadClosure();
1180 void do_thread(Thread* thread);
1181 };
1182
1183 ShenandoahUpdateThreadClosure::ShenandoahUpdateThreadClosure() :
1184 HandshakeClosure("Shenandoah Update Thread Roots") {
1185 }
1190 ResourceMark rm;
1191 jt->oops_do(&_cl, nullptr);
1192 }
1193 }
1194
1195 void ShenandoahConcurrentGC::op_update_thread_roots() {
1196 ShenandoahUpdateThreadClosure cl;
1197 Handshake::execute(&cl);
1198 }
1199
1200 void ShenandoahConcurrentGC::op_final_updaterefs() {
1201 ShenandoahHeap* const heap = ShenandoahHeap::heap();
1202 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
1203 assert(!heap->_update_refs_iterator.has_next(), "Should have finished update references");
1204
1205 heap->finish_concurrent_roots();
1206
1207 // Clear cancelled GC, if set. On cancellation path, the block before would handle
1208 // everything.
1209 if (heap->cancelled_gc()) {
1210 heap->clear_cancelled_gc(true /* clear oom handler */);
1211 }
1212
1213 // Has to be done before cset is clear
1214 if (ShenandoahVerify) {
1215 heap->verifier()->verify_roots_in_to_space();
1216 }
1217
1218 if (heap->mode()->is_generational() && heap->is_concurrent_old_mark_in_progress()) {
1219 // When the SATB barrier is left on to support concurrent old gen mark, it may pick up writes to
1220 // objects in the collection set. After those objects are evacuated, the pointers in the
1221 // SATB are no longer safe. Once we have finished update references, we are guaranteed that
1222 // no more writes to the collection set are possible.
1223 //
1224 // This will transfer any old pointers in _active_ regions from the SATB to the old gen
1225 // mark queues. All other pointers will be discarded. This would also discard any pointers
1226 // in old regions that were included in a mixed evacuation. We aren't using the SATB filter
1227 // methods here because we cannot control when they execute. If the SATB filter runs _after_
1228 // a region has been recycled, we will not be able to detect the bad pointer.
1229 //
1230 // We are not concerned about skipping this step in abbreviated cycles because regions
1231 // with no live objects cannot have been written to and so cannot have entries in the SATB
1232 // buffers.
1233 heap->transfer_old_pointers_from_satb();
1234 }
1235
1236 heap->update_heap_region_states(true /*concurrent*/);
1237
1238 heap->set_update_refs_in_progress(false);
1239 heap->set_has_forwarded_objects(false);
1240
1241 // Aging_cycle is only relevant during evacuation cycle for individual objects and during final mark for
1242 // entire regions. Both of these relevant operations occur before final update refs.
1243 heap->set_aging_cycle(false);
1244
1245 if (ShenandoahVerify) {
1246 heap->verifier()->verify_after_updaterefs();
1247 }
1248
1249 if (VerifyAfterGC) {
1250 Universe::verify();
1251 }
1252
1253 heap->rebuild_free_set(true /*concurrent*/);
1254 }
1255
1256 void ShenandoahConcurrentGC::op_final_roots() {
1257
1258 ShenandoahHeap *heap = ShenandoahHeap::heap();
1259 heap->set_concurrent_weak_root_in_progress(false);
1260 heap->set_evacuation_in_progress(false);
1261
1262 if (heap->mode()->is_generational()) {
1263 // If the cycle was shortened for having enough immediate garbage, this could be
1264 // the last GC safepoint before concurrent marking of old resumes. We must be sure
1265 // that old mark threads don't see any pointers to garbage in the SATB buffers.
1266 if (heap->is_concurrent_old_mark_in_progress()) {
1267 heap->transfer_old_pointers_from_satb();
1268 }
1269
1270 ShenandoahMarkingContext *ctx = heap->complete_marking_context();
1271 for (size_t i = 0; i < heap->num_regions(); i++) {
1272 ShenandoahHeapRegion *r = heap->get_region(i);
1273 if (r->is_active() && r->is_young()) {
1274 HeapWord* tams = ctx->top_at_mark_start(r);
1275 HeapWord* top = r->top();
1276 if (top > tams) {
1277 r->reset_age();
1278 } else if (heap->is_aging_cycle()) {
1279 r->increment_age();
1280 }
1281 }
1282 }
1283 }
1284 }
1285
1286 void ShenandoahConcurrentGC::entry_global_coalesce_and_fill() {
1287 ShenandoahHeap* const heap = ShenandoahHeap::heap();
1288
1289 const char* msg = "Coalescing and filling old regions in global collect";
1290 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_coalesce_and_fill);
1291
1292 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
1293 EventMark em("%s", msg);
1294 ShenandoahWorkerScope scope(heap->workers(),
1295 ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
1296 "concurrent coalesce and fill");
1297
1298 op_global_coalesce_and_fill();
1299 }
1300
1301 void ShenandoahConcurrentGC::op_global_coalesce_and_fill() {
1302 ShenandoahGenerationalHeap::heap()->coalesce_and_fill_old_regions(true);
1303 }
1304
1305 void ShenandoahConcurrentGC::op_cleanup_complete() {
1306 ShenandoahHeap::heap()->free_set()->recycle_trash();
1307 }
1308
1309 bool ShenandoahConcurrentGC::check_cancellation_and_abort(ShenandoahDegenPoint point) {
1310 if (ShenandoahHeap::heap()->cancelled_gc()) {
1311 _degen_point = point;
1312 return true;
1313 }
1314 return false;
1315 }
1316
1317 const char* ShenandoahConcurrentGC::init_mark_event_message() const {
1318 ShenandoahHeap* const heap = ShenandoahHeap::heap();
1319 assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here");
1320 if (heap->unload_classes()) {
1321 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Init Mark", " (unload classes)");
1322 } else {
1323 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Init Mark", "");
1324 }
1325 }
1326
1327 const char* ShenandoahConcurrentGC::final_mark_event_message() const {
1328 ShenandoahHeap* const heap = ShenandoahHeap::heap();
1329 assert(!heap->has_forwarded_objects() || heap->is_concurrent_old_mark_in_progress(),
1330 "Should not have forwarded objects during final mark, unless old gen concurrent mark is running");
1331
1332 if (heap->unload_classes()) {
1333 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Final Mark", " (unload classes)");
1334 } else {
1335 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Final Mark", "");
1336 }
1337 }
1338
1339 const char* ShenandoahConcurrentGC::conc_mark_event_message() const {
1340 ShenandoahHeap* const heap = ShenandoahHeap::heap();
1341 assert(!heap->has_forwarded_objects() || heap->is_concurrent_old_mark_in_progress(),
1342 "Should not have forwarded objects concurrent mark, unless old gen concurrent mark is running");
1343 if (heap->unload_classes()) {
1344 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent marking", " (unload classes)");
1345 } else {
1346 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent marking", "");
1347 }
1348 }
|