1 /*
2 * Copyright (c) 2021, 2022, Red Hat, Inc. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26
27 #include "gc/shared/barrierSetNMethod.hpp"
28 #include "gc/shared/collectorCounters.hpp"
29 #include "gc/shared/continuationGCSupport.inline.hpp"
30 #include "gc/shenandoah/shenandoahBreakpoint.hpp"
31 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
32 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
33 #include "gc/shenandoah/shenandoahFreeSet.hpp"
34 #include "gc/shenandoah/shenandoahLock.hpp"
35 #include "gc/shenandoah/shenandoahMark.inline.hpp"
36 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
37 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
38 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
39 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
40 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
41 #include "gc/shenandoah/shenandoahStackWatermark.hpp"
42 #include "gc/shenandoah/shenandoahUtils.hpp"
43 #include "gc/shenandoah/shenandoahVerifier.hpp"
44 #include "gc/shenandoah/shenandoahVMOperations.hpp"
45 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
46 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
47 #include "memory/allocation.hpp"
48 #include "prims/jvmtiTagMap.hpp"
49 #include "runtime/vmThread.hpp"
50 #include "utilities/events.hpp"
51
52 // Breakpoint support
53 class ShenandoahBreakpointGCScope : public StackObj {
68 }
69 };
70
71 class ShenandoahBreakpointMarkScope : public StackObj {
72 private:
73 const GCCause::Cause _cause;
74 public:
75 ShenandoahBreakpointMarkScope(GCCause::Cause cause) : _cause(cause) {
76 if (_cause == GCCause::_wb_breakpoint) {
77 ShenandoahBreakpoint::at_after_marking_started();
78 }
79 }
80
81 ~ShenandoahBreakpointMarkScope() {
82 if (_cause == GCCause::_wb_breakpoint) {
83 ShenandoahBreakpoint::at_before_marking_completed();
84 }
85 }
86 };
87
88 ShenandoahConcurrentGC::ShenandoahConcurrentGC() :
89 _mark(),
90 _degen_point(ShenandoahDegenPoint::_degenerated_unset) {
91 }
92
93 ShenandoahGC::ShenandoahDegenPoint ShenandoahConcurrentGC::degen_point() const {
94 return _degen_point;
95 }
96
97 void ShenandoahConcurrentGC::cancel() {
98 ShenandoahConcurrentMark::cancel();
99 }
100
101 bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) {
102 ShenandoahHeap* const heap = ShenandoahHeap::heap();
103 ShenandoahBreakpointGCScope breakpoint_gc_scope(cause);
104
105 // Reset for upcoming marking
106 entry_reset();
107
108 // Start initial mark under STW
109 vmop_entry_init_mark();
110
111 {
112 ShenandoahBreakpointMarkScope breakpoint_mark_scope(cause);
113 // Concurrent mark roots
114 entry_mark_roots();
115 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_outside_cycle)) return false;
116
117 // Continue concurrent mark
118 entry_mark();
119 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_mark)) return false;
120 }
121
122 // Complete marking under STW, and start evacuation
123 vmop_entry_final_mark();
124
125 // Concurrent stack processing
126 if (heap->is_evacuation_in_progress()) {
127 entry_thread_roots();
128 }
129
130 // Process weak roots that might still point to regions that would be broken by cleanup
131 if (heap->is_concurrent_weak_root_in_progress()) {
132 entry_weak_refs();
133 entry_weak_roots();
134 }
135
136 // Final mark might have reclaimed some immediate garbage, kick cleanup to reclaim
137 // the space. This would be the last action if there is nothing to evacuate.
138 entry_cleanup_early();
139
140 {
141 ShenandoahHeapLocker locker(heap->lock());
142 heap->free_set()->log_status();
143 }
144
145 // Perform concurrent class unloading
146 if (heap->unload_classes() &&
147 heap->is_concurrent_weak_root_in_progress()) {
148 entry_class_unloading();
149 }
150
151 // Processing strong roots
152 // This may be skipped if there is nothing to update/evacuate.
153 // If so, strong_root_in_progress would be unset.
154 if (heap->is_concurrent_strong_root_in_progress()) {
155 entry_strong_roots();
156 }
157
158 // Continue the cycle with evacuation and optional update-refs.
159 // This may be skipped if there is nothing to evacuate.
160 // If so, evac_in_progress would be unset by collection set preparation code.
161 if (heap->is_evacuation_in_progress()) {
162 // Concurrently evacuate
163 entry_evacuate();
164 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_evac)) return false;
165
166 // Perform update-refs phase.
167 vmop_entry_init_updaterefs();
168 entry_updaterefs();
169 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_updaterefs)) return false;
170
171 // Concurrent update thread roots
172 entry_update_thread_roots();
173 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_updaterefs)) return false;
174
175 vmop_entry_final_updaterefs();
176
177 // Update references freed up collection set, kick the cleanup to reclaim the space.
178 entry_cleanup_complete();
179 } else {
180 vmop_entry_final_roots();
181 }
182
183 return true;
184 }
185
186 void ShenandoahConcurrentGC::vmop_entry_init_mark() {
187 ShenandoahHeap* const heap = ShenandoahHeap::heap();
188 TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
189 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_mark_gross);
190
191 heap->try_inject_alloc_failure();
192 VM_ShenandoahInitMark op(this);
193 VMThread::execute(&op); // jump to entry_init_mark() under safepoint
194 }
195
196 void ShenandoahConcurrentGC::vmop_entry_final_mark() {
197 ShenandoahHeap* const heap = ShenandoahHeap::heap();
198 TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
199 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_mark_gross);
200
201 heap->try_inject_alloc_failure();
202 VM_ShenandoahFinalMarkStartEvac op(this);
272 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_update_refs);
273 EventMark em("%s", msg);
274
275 ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
276 ShenandoahWorkerPolicy::calc_workers_for_final_update_ref(),
277 "final reference update");
278
279 op_final_updaterefs();
280 }
281
282 void ShenandoahConcurrentGC::entry_final_roots() {
283 static const char* msg = "Pause Final Roots";
284 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_roots);
285 EventMark em("%s", msg);
286
287 op_final_roots();
288 }
289
290 void ShenandoahConcurrentGC::entry_reset() {
291 ShenandoahHeap* const heap = ShenandoahHeap::heap();
292 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
293 static const char* msg = "Concurrent reset";
294 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_reset);
295 EventMark em("%s", msg);
296
297 ShenandoahWorkerScope scope(heap->workers(),
298 ShenandoahWorkerPolicy::calc_workers_for_conc_reset(),
299 "concurrent reset");
300
301 heap->try_inject_alloc_failure();
302 op_reset();
303 }
304
305 void ShenandoahConcurrentGC::entry_mark_roots() {
306 ShenandoahHeap* const heap = ShenandoahHeap::heap();
307 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
308 const char* msg = "Concurrent marking roots";
309 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_mark_roots);
310 EventMark em("%s", msg);
311
312 ShenandoahWorkerScope scope(heap->workers(),
313 ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
314 "concurrent marking roots");
315
316 heap->try_inject_alloc_failure();
317 op_mark_roots();
318 }
319
320 void ShenandoahConcurrentGC::entry_mark() {
321 ShenandoahHeap* const heap = ShenandoahHeap::heap();
322 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
463 op_updaterefs();
464 }
465
466 void ShenandoahConcurrentGC::entry_cleanup_complete() {
467 ShenandoahHeap* const heap = ShenandoahHeap::heap();
468 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
469 static const char* msg = "Concurrent cleanup";
470 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_cleanup_complete, true /* log_heap_usage */);
471 EventMark em("%s", msg);
472
473 // This phase does not use workers, no need for setup
474 heap->try_inject_alloc_failure();
475 op_cleanup_complete();
476 }
477
478 void ShenandoahConcurrentGC::op_reset() {
479 ShenandoahHeap* const heap = ShenandoahHeap::heap();
480 if (ShenandoahPacing) {
481 heap->pacer()->setup_for_reset();
482 }
483
484 heap->prepare_gc();
485 }
486
487 class ShenandoahInitMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
488 private:
489 ShenandoahMarkingContext* const _ctx;
490 public:
491 ShenandoahInitMarkUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
492
493 void heap_region_do(ShenandoahHeapRegion* r) {
494 assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index());
495 if (r->is_active()) {
496 // Check if region needs updating its TAMS. We have updated it already during concurrent
497 // reset, so it is very likely we don't need to do another write here.
498 if (_ctx->top_at_mark_start(r) != r->top()) {
499 _ctx->capture_top_at_mark_start(r);
500 }
501 } else {
502 assert(_ctx->top_at_mark_start(r) == r->top(),
503 "Region " SIZE_FORMAT " should already have correct TAMS", r->index());
504 }
505 }
506
507 bool is_thread_safe() { return true; }
508 };
509
510 void ShenandoahConcurrentGC::start_mark() {
511 _mark.start_mark();
512 }
513
514 void ShenandoahConcurrentGC::op_init_mark() {
515 ShenandoahHeap* const heap = ShenandoahHeap::heap();
516 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
517 assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
518
519 assert(heap->marking_context()->is_bitmap_clear(), "need clear marking bitmap");
520 assert(!heap->marking_context()->is_complete(), "should not be complete");
521 assert(!heap->has_forwarded_objects(), "No forwarded objects on this path");
522
523 if (ShenandoahVerify) {
524 heap->verifier()->verify_before_concmark();
525 }
526
527 if (VerifyBeforeGC) {
528 Universe::verify();
529 }
530
531 heap->set_concurrent_mark_in_progress(true);
532
533 start_mark();
534
535 {
536 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states);
537 ShenandoahInitMarkUpdateRegionStateClosure cl;
538 heap->parallel_heap_region_iterate(&cl);
539 }
540
541 // Weak reference processing
542 ShenandoahReferenceProcessor* rp = heap->ref_processor();
543 rp->reset_thread_locals();
544 rp->set_soft_reference_policy(heap->soft_ref_policy()->should_clear_all_soft_refs());
545
546 // Make above changes visible to worker threads
547 OrderAccess::fence();
548
549 // Arm nmethods for concurrent mark
550 ShenandoahCodeRoots::arm_nmethods_for_mark();
551
552 ShenandoahStackWatermark::change_epoch_id();
553 if (ShenandoahPacing) {
554 heap->pacer()->setup_for_mark();
555 }
556 }
557
558 void ShenandoahConcurrentGC::op_mark_roots() {
559 _mark.mark_concurrent_roots();
560 }
561
562 void ShenandoahConcurrentGC::op_mark() {
563 _mark.concurrent_mark();
564 }
565
566 void ShenandoahConcurrentGC::op_final_mark() {
567 ShenandoahHeap* const heap = ShenandoahHeap::heap();
568 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
569 assert(!heap->has_forwarded_objects(), "No forwarded objects on this path");
570
571 if (ShenandoahVerify) {
572 heap->verifier()->verify_roots_no_forwarded();
573 }
574
575 if (!heap->cancelled_gc()) {
576 _mark.finish_mark();
577 assert(!heap->cancelled_gc(), "STW mark cannot OOM");
578
579 // Notify JVMTI that the tagmap table will need cleaning.
580 JvmtiTagMap::set_needs_cleaning();
581
582 heap->prepare_regions_and_collection_set(true /*concurrent*/);
583
584 // Has to be done after cset selection
585 heap->prepare_concurrent_roots();
586
587 if (!heap->collection_set()->is_empty()) {
588 if (ShenandoahVerify) {
589 heap->verifier()->verify_before_evacuation();
590 }
591
592 heap->set_evacuation_in_progress(true);
593 // From here on, we need to update references.
594 heap->set_has_forwarded_objects(true);
595
596 // Verify before arming for concurrent processing.
597 // Otherwise, verification can trigger stack processing.
598 if (ShenandoahVerify) {
599 heap->verifier()->verify_during_evacuation();
600 }
601
602 // Arm nmethods/stack for concurrent processing
603 ShenandoahCodeRoots::arm_nmethods_for_evac();
604 ShenandoahStackWatermark::change_epoch_id();
605
606 if (ShenandoahPacing) {
607 heap->pacer()->setup_for_evac();
608 }
609 } else {
610 if (ShenandoahVerify) {
611 heap->verifier()->verify_after_concmark();
612 }
613
614 if (VerifyAfterGC) {
615 Universe::verify();
616 }
617 }
618 }
619 }
620
621 class ShenandoahConcurrentEvacThreadClosure : public ThreadClosure {
622 private:
623 OopClosure* const _oops;
624
625 public:
626 ShenandoahConcurrentEvacThreadClosure(OopClosure* oops);
627 void do_thread(Thread* thread);
628 };
629
630 ShenandoahConcurrentEvacThreadClosure::ShenandoahConcurrentEvacThreadClosure(OopClosure* oops) :
631 _oops(oops) {
632 }
633
634 void ShenandoahConcurrentEvacThreadClosure::do_thread(Thread* thread) {
635 JavaThread* const jt = JavaThread::cast(thread);
636 StackWatermarkSet::finish_processing(jt, _oops, StackWatermarkKind::gc);
637 }
638
639 class ShenandoahConcurrentEvacUpdateThreadTask : public WorkerTask {
640 private:
641 ShenandoahJavaThreadsIterator _java_threads;
642
643 public:
644 ShenandoahConcurrentEvacUpdateThreadTask(uint n_workers) :
645 WorkerTask("Shenandoah Evacuate/Update Concurrent Thread Roots"),
646 _java_threads(ShenandoahPhaseTimings::conc_thread_roots, n_workers) {
647 }
648
649 void work(uint worker_id) {
650 // ShenandoahEvacOOMScope has to be setup by ShenandoahContextEvacuateUpdateRootsClosure.
651 // Otherwise, may deadlock with watermark lock
652 ShenandoahContextEvacuateUpdateRootsClosure oops_cl;
653 ShenandoahConcurrentEvacThreadClosure thr_cl(&oops_cl);
654 _java_threads.threads_do(&thr_cl, worker_id);
655 }
656 };
657
658 void ShenandoahConcurrentGC::op_thread_roots() {
659 ShenandoahHeap* const heap = ShenandoahHeap::heap();
660 assert(heap->is_evacuation_in_progress(), "Checked by caller");
661 ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_thread_roots);
662 ShenandoahConcurrentEvacUpdateThreadTask task(heap->workers()->active_workers());
663 heap->workers()->run_task(&task);
664 }
665
666 void ShenandoahConcurrentGC::op_weak_refs() {
667 ShenandoahHeap* const heap = ShenandoahHeap::heap();
668 assert(heap->is_concurrent_weak_root_in_progress(), "Only during this phase");
669 // Concurrent weak refs processing
670 ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_refs);
671 if (heap->gc_cause() == GCCause::_wb_breakpoint) {
672 ShenandoahBreakpoint::at_after_reference_processing_started();
673 }
674 heap->ref_processor()->process_references(ShenandoahPhaseTimings::conc_weak_refs, heap->workers(), true /* concurrent */);
675 }
676
677 class ShenandoahEvacUpdateCleanupOopStorageRootsClosure : public BasicOopIterateClosure {
678 private:
679 ShenandoahHeap* const _heap;
680 ShenandoahMarkingContext* const _mark_context;
681 bool _evac_in_progress;
682 Thread* const _thread;
683
684 public:
685 ShenandoahEvacUpdateCleanupOopStorageRootsClosure();
686 void do_oop(oop* p);
687 void do_oop(narrowOop* p);
688 };
689
690 ShenandoahEvacUpdateCleanupOopStorageRootsClosure::ShenandoahEvacUpdateCleanupOopStorageRootsClosure() :
691 _heap(ShenandoahHeap::heap()),
692 _mark_context(ShenandoahHeap::heap()->marking_context()),
693 _evac_in_progress(ShenandoahHeap::heap()->is_evacuation_in_progress()),
694 _thread(Thread::current()) {
695 }
696
697 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(oop* p) {
698 const oop obj = RawAccess<>::oop_load(p);
699 if (!CompressedOops::is_null(obj)) {
700 if (!_mark_context->is_marked(obj)) {
701 shenandoah_assert_correct(p, obj);
702 ShenandoahHeap::atomic_clear_oop(p, obj);
703 } else if (_evac_in_progress && _heap->in_collection_set(obj)) {
704 oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
705 if (resolved == obj) {
706 resolved = _heap->evacuate_object(obj, _thread);
707 }
708 ShenandoahHeap::atomic_update_oop(resolved, p, obj);
709 assert(_heap->cancelled_gc() ||
710 _mark_context->is_marked(resolved) && !_heap->in_collection_set(resolved),
711 "Sanity");
712 }
713 }
714 }
715
716 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(narrowOop* p) {
717 ShouldNotReachHere();
718 }
719
720 class ShenandoahIsCLDAliveClosure : public CLDClosure {
721 public:
722 void do_cld(ClassLoaderData* cld) {
723 cld->is_alive();
724 }
725 };
726
727 class ShenandoahIsNMethodAliveClosure: public NMethodClosure {
728 public:
729 void do_nmethod(nmethod* n) {
730 n->is_unloading();
731 }
799 }
800 }
801 };
802
803 void ShenandoahConcurrentGC::op_weak_roots() {
804 ShenandoahHeap* const heap = ShenandoahHeap::heap();
805 assert(heap->is_concurrent_weak_root_in_progress(), "Only during this phase");
806 // Concurrent weak root processing
807 {
808 ShenandoahTimingsTracker t(ShenandoahPhaseTimings::conc_weak_roots_work);
809 ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_roots_work);
810 ShenandoahConcurrentWeakRootsEvacUpdateTask task(ShenandoahPhaseTimings::conc_weak_roots_work);
811 heap->workers()->run_task(&task);
812 }
813
814 // Perform handshake to flush out dead oops
815 {
816 ShenandoahTimingsTracker t(ShenandoahPhaseTimings::conc_weak_roots_rendezvous);
817 heap->rendezvous_threads();
818 }
819 }
820
821 void ShenandoahConcurrentGC::op_class_unloading() {
822 ShenandoahHeap* const heap = ShenandoahHeap::heap();
823 assert (heap->is_concurrent_weak_root_in_progress() &&
824 heap->unload_classes(),
825 "Checked by caller");
826 heap->do_class_unloading();
827 }
828
829 class ShenandoahEvacUpdateCodeCacheClosure : public NMethodClosure {
830 private:
831 BarrierSetNMethod* const _bs;
832 ShenandoahEvacuateUpdateMetadataClosure _cl;
833
834 public:
835 ShenandoahEvacUpdateCodeCacheClosure() :
836 _bs(BarrierSet::barrier_set()->barrier_set_nmethod()),
837 _cl() {
838 }
908 assert(heap->is_concurrent_strong_root_in_progress(), "Checked by caller");
909 ShenandoahConcurrentRootsEvacUpdateTask task(ShenandoahPhaseTimings::conc_strong_roots);
910 heap->workers()->run_task(&task);
911 heap->set_concurrent_strong_root_in_progress(false);
912 }
913
914 void ShenandoahConcurrentGC::op_cleanup_early() {
915 ShenandoahHeap::heap()->free_set()->recycle_trash();
916 }
917
918 void ShenandoahConcurrentGC::op_evacuate() {
919 ShenandoahHeap::heap()->evacuate_collection_set(true /*concurrent*/);
920 }
921
922 void ShenandoahConcurrentGC::op_init_updaterefs() {
923 ShenandoahHeap* const heap = ShenandoahHeap::heap();
924 heap->set_evacuation_in_progress(false);
925 heap->set_concurrent_weak_root_in_progress(false);
926 heap->prepare_update_heap_references(true /*concurrent*/);
927 heap->set_update_refs_in_progress(true);
928
929 if (ShenandoahPacing) {
930 heap->pacer()->setup_for_updaterefs();
931 }
932 }
933
934 void ShenandoahConcurrentGC::op_updaterefs() {
935 ShenandoahHeap::heap()->update_heap_references(true /*concurrent*/);
936 }
937
938 class ShenandoahUpdateThreadClosure : public HandshakeClosure {
939 private:
940 ShenandoahUpdateRefsClosure _cl;
941 public:
942 ShenandoahUpdateThreadClosure();
943 void do_thread(Thread* thread);
944 };
945
946 ShenandoahUpdateThreadClosure::ShenandoahUpdateThreadClosure() :
947 HandshakeClosure("Shenandoah Update Thread Roots") {
948 }
953 ResourceMark rm;
954 jt->oops_do(&_cl, nullptr);
955 }
956 }
957
958 void ShenandoahConcurrentGC::op_update_thread_roots() {
959 ShenandoahUpdateThreadClosure cl;
960 Handshake::execute(&cl);
961 }
962
963 void ShenandoahConcurrentGC::op_final_updaterefs() {
964 ShenandoahHeap* const heap = ShenandoahHeap::heap();
965 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
966 assert(!heap->_update_refs_iterator.has_next(), "Should have finished update references");
967
968 heap->finish_concurrent_roots();
969
970 // Clear cancelled GC, if set. On cancellation path, the block before would handle
971 // everything.
972 if (heap->cancelled_gc()) {
973 heap->clear_cancelled_gc();
974 }
975
976 // Has to be done before cset is clear
977 if (ShenandoahVerify) {
978 heap->verifier()->verify_roots_in_to_space();
979 }
980
981 heap->update_heap_region_states(true /*concurrent*/);
982
983 heap->set_update_refs_in_progress(false);
984 heap->set_has_forwarded_objects(false);
985
986 if (ShenandoahVerify) {
987 heap->verifier()->verify_after_updaterefs();
988 }
989
990 if (VerifyAfterGC) {
991 Universe::verify();
992 }
993
994 heap->rebuild_free_set(true /*concurrent*/);
995 }
996
997 void ShenandoahConcurrentGC::op_final_roots() {
998 ShenandoahHeap::heap()->set_concurrent_weak_root_in_progress(false);
999 }
1000
1001 void ShenandoahConcurrentGC::op_cleanup_complete() {
1002 ShenandoahHeap::heap()->free_set()->recycle_trash();
1003 }
1004
1005 bool ShenandoahConcurrentGC::check_cancellation_and_abort(ShenandoahDegenPoint point) {
1006 if (ShenandoahHeap::heap()->cancelled_gc()) {
1007 _degen_point = point;
1008 return true;
1009 }
1010 return false;
1011 }
1012
1013 const char* ShenandoahConcurrentGC::init_mark_event_message() const {
1014 ShenandoahHeap* const heap = ShenandoahHeap::heap();
1015 assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here");
1016 if (heap->unload_classes()) {
1017 return "Pause Init Mark (unload classes)";
1018 } else {
1019 return "Pause Init Mark";
1020 }
1021 }
1022
1023 const char* ShenandoahConcurrentGC::final_mark_event_message() const {
1024 ShenandoahHeap* const heap = ShenandoahHeap::heap();
1025 assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here");
1026 if (heap->unload_classes()) {
1027 return "Pause Final Mark (unload classes)";
1028 } else {
1029 return "Pause Final Mark";
1030 }
1031 }
1032
1033 const char* ShenandoahConcurrentGC::conc_mark_event_message() const {
1034 ShenandoahHeap* const heap = ShenandoahHeap::heap();
1035 assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here");
1036 if (heap->unload_classes()) {
1037 return "Concurrent marking (unload classes)";
1038 } else {
1039 return "Concurrent marking";
1040 }
1041 }
|
1 /*
2 * Copyright (c) 2021, 2022, Red Hat, Inc. All rights reserved.
3 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27
28 #include "gc/shared/barrierSetNMethod.hpp"
29 #include "gc/shared/collectorCounters.hpp"
30 #include "gc/shared/continuationGCSupport.inline.hpp"
31 #include "gc/shenandoah/shenandoahBreakpoint.hpp"
32 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
33 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
34 #include "gc/shenandoah/shenandoahFreeSet.hpp"
35 #include "gc/shenandoah/shenandoahGeneration.hpp"
36 #include "gc/shenandoah/shenandoahGenerationalHeap.hpp"
37 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
38 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
39 #include "gc/shenandoah/shenandoahLock.hpp"
40 #include "gc/shenandoah/shenandoahMark.inline.hpp"
41 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
42 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
43 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
44 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
45 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
46 #include "gc/shenandoah/shenandoahStackWatermark.hpp"
47 #include "gc/shenandoah/shenandoahUtils.hpp"
48 #include "gc/shenandoah/shenandoahVerifier.hpp"
49 #include "gc/shenandoah/shenandoahVMOperations.hpp"
50 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
51 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
52 #include "memory/allocation.hpp"
53 #include "prims/jvmtiTagMap.hpp"
54 #include "runtime/vmThread.hpp"
55 #include "utilities/events.hpp"
56
57 // Breakpoint support
58 class ShenandoahBreakpointGCScope : public StackObj {
73 }
74 };
75
76 class ShenandoahBreakpointMarkScope : public StackObj {
77 private:
78 const GCCause::Cause _cause;
79 public:
80 ShenandoahBreakpointMarkScope(GCCause::Cause cause) : _cause(cause) {
81 if (_cause == GCCause::_wb_breakpoint) {
82 ShenandoahBreakpoint::at_after_marking_started();
83 }
84 }
85
86 ~ShenandoahBreakpointMarkScope() {
87 if (_cause == GCCause::_wb_breakpoint) {
88 ShenandoahBreakpoint::at_before_marking_completed();
89 }
90 }
91 };
92
93 ShenandoahConcurrentGC::ShenandoahConcurrentGC(ShenandoahGeneration* generation, bool do_old_gc_bootstrap) :
94 _mark(generation),
95 _degen_point(ShenandoahDegenPoint::_degenerated_unset),
96 _abbreviated(false),
97 _do_old_gc_bootstrap(do_old_gc_bootstrap),
98 _generation(generation) {
99 }
100
101 ShenandoahGC::ShenandoahDegenPoint ShenandoahConcurrentGC::degen_point() const {
102 return _degen_point;
103 }
104
105 bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) {
106 ShenandoahHeap* const heap = ShenandoahHeap::heap();
107
108 ShenandoahBreakpointGCScope breakpoint_gc_scope(cause);
109
110 // Reset for upcoming marking
111 entry_reset();
112
113 // Start initial mark under STW
114 vmop_entry_init_mark();
115
116 {
117 ShenandoahBreakpointMarkScope breakpoint_mark_scope(cause);
118
119 // Reset task queue stats here, rather than in mark_concurrent_roots,
120 // because remembered set scan will `push` oops into the queues and
121 // resetting after this happens will lose those counts.
122 TASKQUEUE_STATS_ONLY(_mark.task_queues()->reset_taskqueue_stats());
123
124 // Concurrent remembered set scanning
125 entry_scan_remembered_set();
126 // TODO: When RS scanning yields, we will need a check_cancellation_and_abort() degeneration point here.
127
128 // Concurrent mark roots
129 entry_mark_roots();
130 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_roots)) {
131 return false;
132 }
133
134 // Continue concurrent mark
135 entry_mark();
136 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_mark)) {
137 return false;
138 }
139 }
140
141 // Complete marking under STW, and start evacuation
142 vmop_entry_final_mark();
143
144 // If the GC was cancelled just before final mark (but after the preceding cancellation check),
145 // then the safepoint operation will do nothing and the concurrent mark will still be in progress.
146 // In this case it is safe (and necessary) to resume the degenerated cycle from the marking phase.
147 //
148 // On the other hand, if the GC is cancelled after final mark (but before this check), then the
149 // final mark safepoint operation will have finished the mark (setting concurrent mark in progress
150 // to false). In this case (final mark has completed), we need control to fall past the next
151 // cancellation check and resume the degenerated cycle from the evacuation phase.
152 if (_generation->is_concurrent_mark_in_progress()) {
153 // If the concurrent mark is still in progress after the final mark safepoint, then the GC has
154 // been cancelled. The degenerated cycle must resume from the marking phase. Without this check,
155 // the non-generational mode may fall all the way to the end of this collect routine without
156 // having done anything (besides mark most of the heap). Without having collected anything, we
157 // can expect an 'out of cycle' degenerated GC which will again mark the entire heap. This is
158 // not optimal.
159 // For the generational mode, we cannot allow this. The generational mode relies on marking
160 // (including the final mark) to rebuild portions of the card table. If the generational mode does
161 // not complete marking after it has swapped the card tables, the root set on subsequent GCs will
162 // be incomplete, heap corruption may follow.
163 bool cancelled = check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_mark);
164 assert(cancelled, "GC must have been cancelled between concurrent and final mark");
165 return false;
166 }
167
168 // Concurrent stack processing
169 if (heap->is_evacuation_in_progress()) {
170 entry_thread_roots();
171 }
172
173 // Process weak roots that might still point to regions that would be broken by cleanup
174 if (heap->is_concurrent_weak_root_in_progress()) {
175 entry_weak_refs();
176 entry_weak_roots();
177 }
178
179 // Final mark might have reclaimed some immediate garbage, kick cleanup to reclaim
180 // the space. This would be the last action if there is nothing to evacuate. Note that
181 // we will not age young-gen objects in the case that we skip evacuation.
182 entry_cleanup_early();
183
184 {
185 // TODO: Not sure there is value in logging free-set status right here. Note that whenever the free set is rebuilt,
186 // it logs the newly rebuilt status.
187 ShenandoahHeapLocker locker(heap->lock());
188 heap->free_set()->log_status();
189 }
190
191 // Perform concurrent class unloading
192 if (heap->unload_classes() &&
193 heap->is_concurrent_weak_root_in_progress()) {
194 entry_class_unloading();
195 }
196
197 // Processing strong roots
198 // This may be skipped if there is nothing to update/evacuate.
199 // If so, strong_root_in_progress would be unset.
200 if (heap->is_concurrent_strong_root_in_progress()) {
201 entry_strong_roots();
202 }
203
204 // Continue the cycle with evacuation and optional update-refs.
205 // This may be skipped if there is nothing to evacuate.
206 // If so, evac_in_progress would be unset by collection set preparation code.
207 if (heap->is_evacuation_in_progress()) {
208 // Concurrently evacuate
209 entry_evacuate();
210 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_evac)) {
211 return false;
212 }
213 }
214
215 if (heap->has_forwarded_objects()) {
216 // Perform update-refs phase.
217 vmop_entry_init_updaterefs();
218 entry_updaterefs();
219 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_updaterefs)) {
220 return false;
221 }
222
223 // Concurrent update thread roots
224 entry_update_thread_roots();
225 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_updaterefs)) {
226 return false;
227 }
228
229 vmop_entry_final_updaterefs();
230
231 // Update references freed up collection set, kick the cleanup to reclaim the space.
232 entry_cleanup_complete();
233 } else {
234 // We chose not to evacuate because we found sufficient immediate garbage. Note that we
235 // do not check for cancellation here because, at this point, the cycle is effectively
236 // complete. If the cycle has been cancelled here, the control thread will detect it
237 // on its next iteration and run a degenerated young cycle.
238 vmop_entry_final_roots();
239 _abbreviated = true;
240 }
241
242 // We defer generation resizing actions until after cset regions have been recycled. We do this even following an
243 // abbreviated cycle.
244 if (heap->mode()->is_generational()) {
245 ShenandoahGenerationalHeap::heap()->complete_concurrent_cycle();
246 }
247 return true;
248 }
249
250 void ShenandoahConcurrentGC::vmop_entry_init_mark() {
251 ShenandoahHeap* const heap = ShenandoahHeap::heap();
252 TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
253 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_mark_gross);
254
255 heap->try_inject_alloc_failure();
256 VM_ShenandoahInitMark op(this);
257 VMThread::execute(&op); // jump to entry_init_mark() under safepoint
258 }
259
260 void ShenandoahConcurrentGC::vmop_entry_final_mark() {
261 ShenandoahHeap* const heap = ShenandoahHeap::heap();
262 TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
263 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_mark_gross);
264
265 heap->try_inject_alloc_failure();
266 VM_ShenandoahFinalMarkStartEvac op(this);
336 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_update_refs);
337 EventMark em("%s", msg);
338
339 ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
340 ShenandoahWorkerPolicy::calc_workers_for_final_update_ref(),
341 "final reference update");
342
343 op_final_updaterefs();
344 }
345
346 void ShenandoahConcurrentGC::entry_final_roots() {
347 static const char* msg = "Pause Final Roots";
348 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_roots);
349 EventMark em("%s", msg);
350
351 op_final_roots();
352 }
353
354 void ShenandoahConcurrentGC::entry_reset() {
355 ShenandoahHeap* const heap = ShenandoahHeap::heap();
356 heap->try_inject_alloc_failure();
357
358 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
359 {
360 static const char* msg = "Concurrent reset";
361 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_reset);
362 EventMark em("%s", msg);
363
364 ShenandoahWorkerScope scope(heap->workers(),
365 ShenandoahWorkerPolicy::calc_workers_for_conc_reset(),
366 msg);
367 op_reset();
368 }
369
370 if (_do_old_gc_bootstrap) {
371 static const char* msg = "Concurrent reset (OLD)";
372 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_reset_old);
373 ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
374 ShenandoahWorkerPolicy::calc_workers_for_conc_reset(),
375 msg);
376 EventMark em("%s", msg);
377
378 heap->old_generation()->prepare_gc();
379 }
380 }
381
382 void ShenandoahConcurrentGC::entry_scan_remembered_set() {
383 if (_generation->is_young()) {
384 ShenandoahHeap* const heap = ShenandoahHeap::heap();
385 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
386 const char* msg = "Concurrent remembered set scanning";
387 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::init_scan_rset);
388 EventMark em("%s", msg);
389
390 ShenandoahWorkerScope scope(heap->workers(),
391 ShenandoahWorkerPolicy::calc_workers_for_rs_scanning(),
392 msg);
393
394 heap->try_inject_alloc_failure();
395 _generation->scan_remembered_set(true /* is_concurrent */);
396 }
397 }
398
399 void ShenandoahConcurrentGC::entry_mark_roots() {
400 ShenandoahHeap* const heap = ShenandoahHeap::heap();
401 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
402 const char* msg = "Concurrent marking roots";
403 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_mark_roots);
404 EventMark em("%s", msg);
405
406 ShenandoahWorkerScope scope(heap->workers(),
407 ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
408 "concurrent marking roots");
409
410 heap->try_inject_alloc_failure();
411 op_mark_roots();
412 }
413
414 void ShenandoahConcurrentGC::entry_mark() {
415 ShenandoahHeap* const heap = ShenandoahHeap::heap();
416 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
557 op_updaterefs();
558 }
559
560 void ShenandoahConcurrentGC::entry_cleanup_complete() {
561 ShenandoahHeap* const heap = ShenandoahHeap::heap();
562 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
563 static const char* msg = "Concurrent cleanup";
564 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_cleanup_complete, true /* log_heap_usage */);
565 EventMark em("%s", msg);
566
567 // This phase does not use workers, no need for setup
568 heap->try_inject_alloc_failure();
569 op_cleanup_complete();
570 }
571
572 void ShenandoahConcurrentGC::op_reset() {
573 ShenandoahHeap* const heap = ShenandoahHeap::heap();
574 if (ShenandoahPacing) {
575 heap->pacer()->setup_for_reset();
576 }
577 _generation->prepare_gc();
578 }
579
580 class ShenandoahInitMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
581 private:
582 ShenandoahMarkingContext* const _ctx;
583 public:
584 ShenandoahInitMarkUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
585
586 void heap_region_do(ShenandoahHeapRegion* r) {
587 assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index());
588 if (r->is_active()) {
589 // Check if region needs updating its TAMS. We have updated it already during concurrent
590 // reset, so it is very likely we don't need to do another write here. Since most regions
591 // are not "active", this path is relatively rare.
592 if (_ctx->top_at_mark_start(r) != r->top()) {
593 _ctx->capture_top_at_mark_start(r);
594 }
595 } else {
596 assert(_ctx->top_at_mark_start(r) == r->top(),
597 "Region " SIZE_FORMAT " should already have correct TAMS", r->index());
598 }
599 }
600
601 bool is_thread_safe() { return true; }
602 };
603
604 void ShenandoahConcurrentGC::start_mark() {
605 _mark.start_mark();
606 }
607
608 void ShenandoahConcurrentGC::op_init_mark() {
609 ShenandoahHeap* const heap = ShenandoahHeap::heap();
610 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
611 assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
612
613 assert(_generation->is_bitmap_clear(), "need clear marking bitmap");
614 assert(!_generation->is_mark_complete(), "should not be complete");
615 assert(!heap->has_forwarded_objects(), "No forwarded objects on this path");
616
617
618 if (heap->mode()->is_generational()) {
619 if (_generation->is_young() || (_generation->is_global() && ShenandoahVerify)) {
620 // The current implementation of swap_remembered_set() copies the write-card-table
621 // to the read-card-table. The remembered sets are also swapped for GLOBAL collections
622 // so that the verifier works with the correct copy of the card table when verifying.
623 // TODO: This path should not really depend on ShenandoahVerify.
624 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_swap_rset);
625 _generation->swap_remembered_set();
626 }
627
628 if (_generation->is_global()) {
629 heap->old_generation()->cancel_gc();
630 } else if (heap->is_concurrent_old_mark_in_progress()) {
631 // Purge the SATB buffers, transferring any valid, old pointers to the
632 // old generation mark queue. Any pointers in a young region will be
633 // abandoned.
634 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_transfer_satb);
635 heap->old_generation()->transfer_pointers_from_satb();
636 }
637 }
638
639 if (ShenandoahVerify) {
640 heap->verifier()->verify_before_concmark();
641 }
642
643 if (VerifyBeforeGC) {
644 Universe::verify();
645 }
646
647 _generation->set_concurrent_mark_in_progress(true);
648
649 start_mark();
650
651 if (_do_old_gc_bootstrap) {
652 shenandoah_assert_generational();
653 // Update region state for both young and old regions
654 // TODO: We should be able to pull this out of the safepoint for the bootstrap
655 // cycle. The top of an old region will only move when a GC cycle evacuates
656 // objects into it. When we start an old cycle, we know that nothing can touch
657 // the top of old regions.
658 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states);
659 ShenandoahInitMarkUpdateRegionStateClosure cl;
660 heap->parallel_heap_region_iterate(&cl);
661 heap->old_generation()->ref_processor()->reset_thread_locals();
662 } else {
663 // Update region state for only young regions
664 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states);
665 ShenandoahInitMarkUpdateRegionStateClosure cl;
666 _generation->parallel_heap_region_iterate(&cl);
667 }
668
669 // Weak reference processing
670 ShenandoahReferenceProcessor* rp = _generation->ref_processor();
671 rp->reset_thread_locals();
672 rp->set_soft_reference_policy(heap->soft_ref_policy()->should_clear_all_soft_refs());
673
674 // Make above changes visible to worker threads
675 OrderAccess::fence();
676
677 // Arm nmethods for concurrent mark
678 ShenandoahCodeRoots::arm_nmethods_for_mark();
679
680 ShenandoahStackWatermark::change_epoch_id();
681 if (ShenandoahPacing) {
682 heap->pacer()->setup_for_mark();
683 }
684 }
685
686 void ShenandoahConcurrentGC::op_mark_roots() {
687 _mark.mark_concurrent_roots();
688 }
689
690 void ShenandoahConcurrentGC::op_mark() {
691 _mark.concurrent_mark();
692 }
693
694 void ShenandoahConcurrentGC::op_final_mark() {
695 ShenandoahHeap* const heap = ShenandoahHeap::heap();
696 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
697 assert(!heap->has_forwarded_objects(), "No forwarded objects on this path");
698
699 if (ShenandoahVerify) {
700 heap->verifier()->verify_roots_no_forwarded();
701 }
702
703 if (!heap->cancelled_gc()) {
704 _mark.finish_mark();
705 assert(!heap->cancelled_gc(), "STW mark cannot OOM");
706
707 // Notify JVMTI that the tagmap table will need cleaning.
708 JvmtiTagMap::set_needs_cleaning();
709
710 // The collection set is chosen by prepare_regions_and_collection_set().
711 //
712 // TODO: Under severe memory overload conditions that can be checked here, we may want to limit
713 // the inclusion of old-gen candidates within the collection set. This would allow us to prioritize efforts on
714 // evacuating young-gen, This remediation is most appropriate when old-gen availability is very high (so there
715 // are negligible negative impacts from delaying completion of old-gen evacuation) and when young-gen collections
716 // are "under duress" (as signalled by very low availability of memory within young-gen, indicating that/ young-gen
717 // collections are not triggering frequently enough).
718 _generation->prepare_regions_and_collection_set(true /*concurrent*/);
719
720 // Upon return from prepare_regions_and_collection_set(), certain parameters have been established to govern the
721 // evacuation efforts that are about to begin. In particular:
722 //
723 // heap->get_promoted_reserve() represents the amount of memory within old-gen's available memory that has
724 // been set aside to hold objects promoted from young-gen memory. This represents an estimated percentage
725 // of the live young-gen memory within the collection set. If there is more data ready to be promoted than
726 // can fit within this reserve, the promotion of some objects will be deferred until a subsequent evacuation
727 // pass.
728 //
729 // heap->get_old_evac_reserve() represents the amount of memory within old-gen's available memory that has been
730 // set aside to hold objects evacuated from the old-gen collection set.
731 //
732 // heap->get_young_evac_reserve() represents the amount of memory within young-gen's available memory that has
733 // been set aside to hold objects evacuated from the young-gen collection set. Conservatively, this value
734 // equals the entire amount of live young-gen memory within the collection set, even though some of this memory
735 // will likely be promoted.
736
737 // Has to be done after cset selection
738 heap->prepare_concurrent_roots();
739
740 if (!heap->collection_set()->is_empty() || has_in_place_promotions(heap)) {
741 // Even if the collection set is empty, we need to do evacuation if there are regions to be promoted in place.
742 // Concurrent evacuation takes responsibility for registering objects and setting the remembered set cards to dirty.
743
744 LogTarget(Debug, gc, cset) lt;
745 if (lt.is_enabled()) {
746 ResourceMark rm;
747 LogStream ls(lt);
748 heap->collection_set()->print_on(&ls);
749 }
750
751 if (ShenandoahVerify) {
752 heap->verifier()->verify_before_evacuation();
753 }
754
755 // TODO: Do we need to set this if we are only promoting regions in place? We don't need the barriers on for that.
756 heap->set_evacuation_in_progress(true);
757
758 // Verify before arming for concurrent processing.
759 // Otherwise, verification can trigger stack processing.
760 if (ShenandoahVerify) {
761 heap->verifier()->verify_during_evacuation();
762 }
763
764 // Generational mode may promote objects in place during the evacuation phase.
765 // If that is the only reason we are evacuating, we don't need to update references
766 // and there will be no forwarded objects on the heap.
767 heap->set_has_forwarded_objects(!heap->collection_set()->is_empty());
768
769 // Arm nmethods/stack for concurrent processing
770 if (!heap->collection_set()->is_empty()) {
771 // Iff objects will be evaluated, arm the nmethod barriers. These will be disarmed
772 // under the same condition (established in prepare_concurrent_roots) after strong
773 // root evacuation has completed (see op_strong_roots).
774 ShenandoahCodeRoots::arm_nmethods_for_evac();
775 ShenandoahStackWatermark::change_epoch_id();
776 }
777
778 if (ShenandoahPacing) {
779 heap->pacer()->setup_for_evac();
780 }
781 } else {
782 if (ShenandoahVerify) {
783 heap->verifier()->verify_after_concmark();
784 }
785
786 if (VerifyAfterGC) {
787 Universe::verify();
788 }
789 }
790 }
791 }
792
793 bool ShenandoahConcurrentGC::has_in_place_promotions(ShenandoahHeap* heap) {
794 return heap->mode()->is_generational() && heap->old_generation()->has_in_place_promotions();
795 }
796
797 template<bool GENERATIONAL>
798 class ShenandoahConcurrentEvacThreadClosure : public ThreadClosure {
799 private:
800 OopClosure* const _oops;
801 public:
802 explicit ShenandoahConcurrentEvacThreadClosure(OopClosure* oops) : _oops(oops) {}
803
804 void do_thread(Thread* thread) override {
805 JavaThread* const jt = JavaThread::cast(thread);
806 StackWatermarkSet::finish_processing(jt, _oops, StackWatermarkKind::gc);
807 if (GENERATIONAL) {
808 ShenandoahThreadLocalData::enable_plab_promotions(thread);
809 }
810 }
811 };
812
813 template<bool GENERATIONAL>
814 class ShenandoahConcurrentEvacUpdateThreadTask : public WorkerTask {
815 private:
816 ShenandoahJavaThreadsIterator _java_threads;
817
818 public:
819 explicit ShenandoahConcurrentEvacUpdateThreadTask(uint n_workers) :
820 WorkerTask("Shenandoah Evacuate/Update Concurrent Thread Roots"),
821 _java_threads(ShenandoahPhaseTimings::conc_thread_roots, n_workers) {
822 }
823
824 void work(uint worker_id) override {
825 if (GENERATIONAL) {
826 Thread* worker_thread = Thread::current();
827 ShenandoahThreadLocalData::enable_plab_promotions(worker_thread);
828 }
829
830 // ShenandoahEvacOOMScope has to be setup by ShenandoahContextEvacuateUpdateRootsClosure.
831 // Otherwise, may deadlock with watermark lock
832 ShenandoahContextEvacuateUpdateRootsClosure oops_cl;
833 ShenandoahConcurrentEvacThreadClosure<GENERATIONAL> thr_cl(&oops_cl);
834 _java_threads.threads_do(&thr_cl, worker_id);
835 }
836 };
837
838 void ShenandoahConcurrentGC::op_thread_roots() {
839 ShenandoahHeap* const heap = ShenandoahHeap::heap();
840 assert(heap->is_evacuation_in_progress(), "Checked by caller");
841 ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_thread_roots);
842 if (heap->mode()->is_generational()) {
843 ShenandoahConcurrentEvacUpdateThreadTask<true> task(heap->workers()->active_workers());
844 heap->workers()->run_task(&task);
845 } else {
846 ShenandoahConcurrentEvacUpdateThreadTask<false> task(heap->workers()->active_workers());
847 heap->workers()->run_task(&task);
848 }
849 }
850
851 void ShenandoahConcurrentGC::op_weak_refs() {
852 ShenandoahHeap* const heap = ShenandoahHeap::heap();
853 assert(heap->is_concurrent_weak_root_in_progress(), "Only during this phase");
854 // Concurrent weak refs processing
855 ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_refs);
856 if (heap->gc_cause() == GCCause::_wb_breakpoint) {
857 ShenandoahBreakpoint::at_after_reference_processing_started();
858 }
859 _generation->ref_processor()->process_references(ShenandoahPhaseTimings::conc_weak_refs, heap->workers(), true /* concurrent */);
860 }
861
862 class ShenandoahEvacUpdateCleanupOopStorageRootsClosure : public BasicOopIterateClosure {
863 private:
864 ShenandoahHeap* const _heap;
865 ShenandoahMarkingContext* const _mark_context;
866 bool _evac_in_progress;
867 Thread* const _thread;
868
869 public:
870 ShenandoahEvacUpdateCleanupOopStorageRootsClosure();
871 void do_oop(oop* p);
872 void do_oop(narrowOop* p);
873 };
874
875 ShenandoahEvacUpdateCleanupOopStorageRootsClosure::ShenandoahEvacUpdateCleanupOopStorageRootsClosure() :
876 _heap(ShenandoahHeap::heap()),
877 _mark_context(ShenandoahHeap::heap()->marking_context()),
878 _evac_in_progress(ShenandoahHeap::heap()->is_evacuation_in_progress()),
879 _thread(Thread::current()) {
880 }
881
882 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(oop* p) {
883 const oop obj = RawAccess<>::oop_load(p);
884 if (!CompressedOops::is_null(obj)) {
885 if (!_mark_context->is_marked(obj)) {
886 shenandoah_assert_generations_reconciled();
887 if (_heap->is_in_active_generation(obj)) {
888 // TODO: This worries me. Here we are asserting that an unmarked from-space object is 'correct'.
889 // Normally, I would call this a bogus assert, but there seems to be a legitimate use-case for
890 // accessing from-space objects during class unloading. However, the from-space object may have
891 // been "filled". We've made no effort to prevent old generation classes being unloaded by young
892 // gen (and vice-versa).
893 shenandoah_assert_correct(p, obj);
894 ShenandoahHeap::atomic_clear_oop(p, obj);
895 }
896 } else if (_evac_in_progress && _heap->in_collection_set(obj)) {
897 oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
898 if (resolved == obj) {
899 resolved = _heap->evacuate_object(obj, _thread);
900 }
901 shenandoah_assert_not_in_cset_except(p, resolved, _heap->cancelled_gc());
902 ShenandoahHeap::atomic_update_oop(resolved, p, obj);
903 }
904 }
905 }
906
907 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(narrowOop* p) {
908 ShouldNotReachHere();
909 }
910
911 class ShenandoahIsCLDAliveClosure : public CLDClosure {
912 public:
913 void do_cld(ClassLoaderData* cld) {
914 cld->is_alive();
915 }
916 };
917
918 class ShenandoahIsNMethodAliveClosure: public NMethodClosure {
919 public:
920 void do_nmethod(nmethod* n) {
921 n->is_unloading();
922 }
990 }
991 }
992 };
993
994 void ShenandoahConcurrentGC::op_weak_roots() {
995 ShenandoahHeap* const heap = ShenandoahHeap::heap();
996 assert(heap->is_concurrent_weak_root_in_progress(), "Only during this phase");
997 // Concurrent weak root processing
998 {
999 ShenandoahTimingsTracker t(ShenandoahPhaseTimings::conc_weak_roots_work);
1000 ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_roots_work);
1001 ShenandoahConcurrentWeakRootsEvacUpdateTask task(ShenandoahPhaseTimings::conc_weak_roots_work);
1002 heap->workers()->run_task(&task);
1003 }
1004
1005 // Perform handshake to flush out dead oops
1006 {
1007 ShenandoahTimingsTracker t(ShenandoahPhaseTimings::conc_weak_roots_rendezvous);
1008 heap->rendezvous_threads();
1009 }
1010 // We can only toggle concurrent_weak_root_in_progress flag
1011 // at a safepoint, so that mutators see a consistent
1012 // value. The flag will be cleared at the next safepoint.
1013 }
1014
1015 void ShenandoahConcurrentGC::op_class_unloading() {
1016 ShenandoahHeap* const heap = ShenandoahHeap::heap();
1017 assert (heap->is_concurrent_weak_root_in_progress() &&
1018 heap->unload_classes(),
1019 "Checked by caller");
1020 heap->do_class_unloading();
1021 }
1022
1023 class ShenandoahEvacUpdateCodeCacheClosure : public NMethodClosure {
1024 private:
1025 BarrierSetNMethod* const _bs;
1026 ShenandoahEvacuateUpdateMetadataClosure _cl;
1027
1028 public:
1029 ShenandoahEvacUpdateCodeCacheClosure() :
1030 _bs(BarrierSet::barrier_set()->barrier_set_nmethod()),
1031 _cl() {
1032 }
1102 assert(heap->is_concurrent_strong_root_in_progress(), "Checked by caller");
1103 ShenandoahConcurrentRootsEvacUpdateTask task(ShenandoahPhaseTimings::conc_strong_roots);
1104 heap->workers()->run_task(&task);
1105 heap->set_concurrent_strong_root_in_progress(false);
1106 }
1107
1108 void ShenandoahConcurrentGC::op_cleanup_early() {
1109 ShenandoahHeap::heap()->free_set()->recycle_trash();
1110 }
1111
1112 void ShenandoahConcurrentGC::op_evacuate() {
1113 ShenandoahHeap::heap()->evacuate_collection_set(true /*concurrent*/);
1114 }
1115
1116 void ShenandoahConcurrentGC::op_init_updaterefs() {
1117 ShenandoahHeap* const heap = ShenandoahHeap::heap();
1118 heap->set_evacuation_in_progress(false);
1119 heap->set_concurrent_weak_root_in_progress(false);
1120 heap->prepare_update_heap_references(true /*concurrent*/);
1121 heap->set_update_refs_in_progress(true);
1122 if (ShenandoahVerify) {
1123 heap->verifier()->verify_before_updaterefs();
1124 }
1125 if (ShenandoahPacing) {
1126 heap->pacer()->setup_for_updaterefs();
1127 }
1128 }
1129
1130 void ShenandoahConcurrentGC::op_updaterefs() {
1131 ShenandoahHeap::heap()->update_heap_references(true /*concurrent*/);
1132 }
1133
1134 class ShenandoahUpdateThreadClosure : public HandshakeClosure {
1135 private:
1136 ShenandoahUpdateRefsClosure _cl;
1137 public:
1138 ShenandoahUpdateThreadClosure();
1139 void do_thread(Thread* thread);
1140 };
1141
1142 ShenandoahUpdateThreadClosure::ShenandoahUpdateThreadClosure() :
1143 HandshakeClosure("Shenandoah Update Thread Roots") {
1144 }
1149 ResourceMark rm;
1150 jt->oops_do(&_cl, nullptr);
1151 }
1152 }
1153
1154 void ShenandoahConcurrentGC::op_update_thread_roots() {
1155 ShenandoahUpdateThreadClosure cl;
1156 Handshake::execute(&cl);
1157 }
1158
1159 void ShenandoahConcurrentGC::op_final_updaterefs() {
1160 ShenandoahHeap* const heap = ShenandoahHeap::heap();
1161 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
1162 assert(!heap->_update_refs_iterator.has_next(), "Should have finished update references");
1163
1164 heap->finish_concurrent_roots();
1165
1166 // Clear cancelled GC, if set. On cancellation path, the block before would handle
1167 // everything.
1168 if (heap->cancelled_gc()) {
1169 heap->clear_cancelled_gc(true /* clear oom handler */);
1170 }
1171
1172 // Has to be done before cset is clear
1173 if (ShenandoahVerify) {
1174 heap->verifier()->verify_roots_in_to_space();
1175 }
1176
1177 // If we are running in generational mode and this is an aging cycle, this will also age active
1178 // regions that haven't been used for allocation.
1179 heap->update_heap_region_states(true /*concurrent*/);
1180
1181 heap->set_update_refs_in_progress(false);
1182 heap->set_has_forwarded_objects(false);
1183
1184 if (heap->mode()->is_generational() && heap->is_concurrent_old_mark_in_progress()) {
1185 // When the SATB barrier is left on to support concurrent old gen mark, it may pick up writes to
1186 // objects in the collection set. After those objects are evacuated, the pointers in the
1187 // SATB are no longer safe. Once we have finished update references, we are guaranteed that
1188 // no more writes to the collection set are possible.
1189 //
1190 // This will transfer any old pointers in _active_ regions from the SATB to the old gen
1191 // mark queues. All other pointers will be discarded. This would also discard any pointers
1192 // in old regions that were included in a mixed evacuation. We aren't using the SATB filter
1193 // methods here because we cannot control when they execute. If the SATB filter runs _after_
1194 // a region has been recycled, we will not be able to detect the bad pointer.
1195 //
1196 // We are not concerned about skipping this step in abbreviated cycles because regions
1197 // with no live objects cannot have been written to and so cannot have entries in the SATB
1198 // buffers.
1199 heap->old_generation()->transfer_pointers_from_satb();
1200
1201 // Aging_cycle is only relevant during evacuation cycle for individual objects and during final mark for
1202 // entire regions. Both of these relevant operations occur before final update refs.
1203 ShenandoahGenerationalHeap::heap()->set_aging_cycle(false);
1204 }
1205
1206 if (ShenandoahVerify) {
1207 heap->verifier()->verify_after_updaterefs();
1208 }
1209
1210 if (VerifyAfterGC) {
1211 Universe::verify();
1212 }
1213
1214 heap->rebuild_free_set(true /*concurrent*/);
1215 }
1216
1217 void ShenandoahConcurrentGC::op_final_roots() {
1218
1219 ShenandoahHeap *heap = ShenandoahHeap::heap();
1220 heap->set_concurrent_weak_root_in_progress(false);
1221 heap->set_evacuation_in_progress(false);
1222
1223 if (heap->mode()->is_generational()) {
1224 // If the cycle was shortened for having enough immediate garbage, this could be
1225 // the last GC safepoint before concurrent marking of old resumes. We must be sure
1226 // that old mark threads don't see any pointers to garbage in the SATB buffers.
1227 if (heap->is_concurrent_old_mark_in_progress()) {
1228 heap->old_generation()->transfer_pointers_from_satb();
1229 }
1230
1231 if (!_generation->is_old()) {
1232 ShenandoahGenerationalHeap::heap()->update_region_ages(_generation->complete_marking_context());
1233 }
1234 }
1235 }
1236
1237 void ShenandoahConcurrentGC::op_cleanup_complete() {
1238 ShenandoahHeap::heap()->free_set()->recycle_trash();
1239 }
1240
1241 bool ShenandoahConcurrentGC::check_cancellation_and_abort(ShenandoahDegenPoint point) {
1242 if (ShenandoahHeap::heap()->cancelled_gc()) {
1243 _degen_point = point;
1244 return true;
1245 }
1246 return false;
1247 }
1248
1249 const char* ShenandoahConcurrentGC::init_mark_event_message() const {
1250 ShenandoahHeap* const heap = ShenandoahHeap::heap();
1251 assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here");
1252 if (heap->unload_classes()) {
1253 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Init Mark", " (unload classes)");
1254 } else {
1255 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Init Mark", "");
1256 }
1257 }
1258
1259 const char* ShenandoahConcurrentGC::final_mark_event_message() const {
1260 ShenandoahHeap* const heap = ShenandoahHeap::heap();
1261 assert(!heap->has_forwarded_objects() || heap->is_concurrent_old_mark_in_progress(),
1262 "Should not have forwarded objects during final mark, unless old gen concurrent mark is running");
1263
1264 if (heap->unload_classes()) {
1265 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Final Mark", " (unload classes)");
1266 } else {
1267 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Final Mark", "");
1268 }
1269 }
1270
1271 const char* ShenandoahConcurrentGC::conc_mark_event_message() const {
1272 ShenandoahHeap* const heap = ShenandoahHeap::heap();
1273 assert(!heap->has_forwarded_objects() || heap->is_concurrent_old_mark_in_progress(),
1274 "Should not have forwarded objects concurrent mark, unless old gen concurrent mark is running");
1275 if (heap->unload_classes()) {
1276 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent marking", " (unload classes)");
1277 } else {
1278 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent marking", "");
1279 }
1280 }
|