1 /*
2 * Copyright (c) 2021, 2022, Red Hat, Inc. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26
27 #include "gc/shared/barrierSetNMethod.hpp"
28 #include "gc/shared/collectorCounters.hpp"
29 #include "gc/shared/continuationGCSupport.inline.hpp"
30 #include "gc/shenandoah/shenandoahBreakpoint.hpp"
31 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
32 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
33 #include "gc/shenandoah/shenandoahFreeSet.hpp"
34 #include "gc/shenandoah/shenandoahLock.hpp"
35 #include "gc/shenandoah/shenandoahMark.inline.hpp"
36 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
37 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
38 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
39 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
40 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
41 #include "gc/shenandoah/shenandoahStackWatermark.hpp"
42 #include "gc/shenandoah/shenandoahUtils.hpp"
43 #include "gc/shenandoah/shenandoahVerifier.hpp"
44 #include "gc/shenandoah/shenandoahVMOperations.hpp"
45 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
46 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
47 #include "memory/allocation.hpp"
48 #include "prims/jvmtiTagMap.hpp"
49 #include "runtime/vmThread.hpp"
50 #include "utilities/events.hpp"
51
52 // Breakpoint support
53 class ShenandoahBreakpointGCScope : public StackObj {
68 }
69 };
70
71 class ShenandoahBreakpointMarkScope : public StackObj {
72 private:
73 const GCCause::Cause _cause;
74 public:
75 ShenandoahBreakpointMarkScope(GCCause::Cause cause) : _cause(cause) {
76 if (_cause == GCCause::_wb_breakpoint) {
77 ShenandoahBreakpoint::at_after_marking_started();
78 }
79 }
80
81 ~ShenandoahBreakpointMarkScope() {
82 if (_cause == GCCause::_wb_breakpoint) {
83 ShenandoahBreakpoint::at_before_marking_completed();
84 }
85 }
86 };
87
88 ShenandoahConcurrentGC::ShenandoahConcurrentGC() :
89 _mark(),
90 _degen_point(ShenandoahDegenPoint::_degenerated_unset) {
91 }
92
93 ShenandoahGC::ShenandoahDegenPoint ShenandoahConcurrentGC::degen_point() const {
94 return _degen_point;
95 }
96
97 void ShenandoahConcurrentGC::cancel() {
98 ShenandoahConcurrentMark::cancel();
99 }
100
101 bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) {
102 ShenandoahHeap* const heap = ShenandoahHeap::heap();
103 ShenandoahBreakpointGCScope breakpoint_gc_scope(cause);
104
105 // Reset for upcoming marking
106 entry_reset();
107
108 // Start initial mark under STW
109 vmop_entry_init_mark();
110
111 {
112 ShenandoahBreakpointMarkScope breakpoint_mark_scope(cause);
113 // Concurrent mark roots
114 entry_mark_roots();
115 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_outside_cycle)) return false;
116
117 // Continue concurrent mark
118 entry_mark();
119 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_mark)) return false;
120 }
121
122 // Complete marking under STW, and start evacuation
123 vmop_entry_final_mark();
124
125 // Concurrent stack processing
126 if (heap->is_evacuation_in_progress()) {
127 entry_thread_roots();
128 }
129
130 // Process weak roots that might still point to regions that would be broken by cleanup
131 if (heap->is_concurrent_weak_root_in_progress()) {
132 entry_weak_refs();
133 entry_weak_roots();
134 }
135
136 // Final mark might have reclaimed some immediate garbage, kick cleanup to reclaim
137 // the space. This would be the last action if there is nothing to evacuate.
138 entry_cleanup_early();
139
140 {
141 ShenandoahHeapLocker locker(heap->lock());
142 heap->free_set()->log_status();
143 }
144
145 // Perform concurrent class unloading
146 if (heap->unload_classes() &&
147 heap->is_concurrent_weak_root_in_progress()) {
148 entry_class_unloading();
149 }
150
151 // Processing strong roots
152 // This may be skipped if there is nothing to update/evacuate.
153 // If so, strong_root_in_progress would be unset.
154 if (heap->is_concurrent_strong_root_in_progress()) {
155 entry_strong_roots();
156 }
157
158 // Continue the cycle with evacuation and optional update-refs.
159 // This may be skipped if there is nothing to evacuate.
160 // If so, evac_in_progress would be unset by collection set preparation code.
161 if (heap->is_evacuation_in_progress()) {
162 // Concurrently evacuate
163 entry_evacuate();
164 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_evac)) return false;
165
166 // Perform update-refs phase.
167 vmop_entry_init_updaterefs();
168 entry_updaterefs();
169 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_updaterefs)) return false;
170
171 // Concurrent update thread roots
172 entry_update_thread_roots();
173 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_updaterefs)) return false;
174
175 vmop_entry_final_updaterefs();
176
177 // Update references freed up collection set, kick the cleanup to reclaim the space.
178 entry_cleanup_complete();
179 } else {
180 vmop_entry_final_roots();
181 }
182
183 return true;
184 }
185
186 void ShenandoahConcurrentGC::vmop_entry_init_mark() {
187 ShenandoahHeap* const heap = ShenandoahHeap::heap();
188 TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
189 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_mark_gross);
190
191 heap->try_inject_alloc_failure();
192 VM_ShenandoahInitMark op(this);
193 VMThread::execute(&op); // jump to entry_init_mark() under safepoint
194 }
195
196 void ShenandoahConcurrentGC::vmop_entry_final_mark() {
197 ShenandoahHeap* const heap = ShenandoahHeap::heap();
198 TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
199 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_mark_gross);
200
201 heap->try_inject_alloc_failure();
202 VM_ShenandoahFinalMarkStartEvac op(this);
285 EventMark em("%s", msg);
286
287 op_final_roots();
288 }
289
290 void ShenandoahConcurrentGC::entry_reset() {
291 ShenandoahHeap* const heap = ShenandoahHeap::heap();
292 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
293 static const char* msg = "Concurrent reset";
294 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_reset);
295 EventMark em("%s", msg);
296
297 ShenandoahWorkerScope scope(heap->workers(),
298 ShenandoahWorkerPolicy::calc_workers_for_conc_reset(),
299 "concurrent reset");
300
301 heap->try_inject_alloc_failure();
302 op_reset();
303 }
304
305 void ShenandoahConcurrentGC::entry_mark_roots() {
306 ShenandoahHeap* const heap = ShenandoahHeap::heap();
307 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
308 const char* msg = "Concurrent marking roots";
309 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_mark_roots);
310 EventMark em("%s", msg);
311
312 ShenandoahWorkerScope scope(heap->workers(),
313 ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
314 "concurrent marking roots");
315
316 heap->try_inject_alloc_failure();
317 op_mark_roots();
318 }
319
320 void ShenandoahConcurrentGC::entry_mark() {
321 ShenandoahHeap* const heap = ShenandoahHeap::heap();
322 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
323 const char* msg = conc_mark_event_message();
324 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_mark);
463 op_updaterefs();
464 }
465
466 void ShenandoahConcurrentGC::entry_cleanup_complete() {
467 ShenandoahHeap* const heap = ShenandoahHeap::heap();
468 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
469 static const char* msg = "Concurrent cleanup";
470 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_cleanup_complete, true /* log_heap_usage */);
471 EventMark em("%s", msg);
472
473 // This phase does not use workers, no need for setup
474 heap->try_inject_alloc_failure();
475 op_cleanup_complete();
476 }
477
478 void ShenandoahConcurrentGC::op_reset() {
479 ShenandoahHeap* const heap = ShenandoahHeap::heap();
480 if (ShenandoahPacing) {
481 heap->pacer()->setup_for_reset();
482 }
483
484 heap->prepare_gc();
485 }
486
487 class ShenandoahInitMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
488 private:
489 ShenandoahMarkingContext* const _ctx;
490 public:
491 ShenandoahInitMarkUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
492
493 void heap_region_do(ShenandoahHeapRegion* r) {
494 assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index());
495 if (r->is_active()) {
496 // Check if region needs updating its TAMS. We have updated it already during concurrent
497 // reset, so it is very likely we don't need to do another write here.
498 if (_ctx->top_at_mark_start(r) != r->top()) {
499 _ctx->capture_top_at_mark_start(r);
500 }
501 } else {
502 assert(_ctx->top_at_mark_start(r) == r->top(),
503 "Region " SIZE_FORMAT " should already have correct TAMS", r->index());
504 }
505 }
506
507 bool is_thread_safe() { return true; }
508 };
509
510 void ShenandoahConcurrentGC::start_mark() {
511 _mark.start_mark();
512 }
513
514 void ShenandoahConcurrentGC::op_init_mark() {
515 ShenandoahHeap* const heap = ShenandoahHeap::heap();
516 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
517 assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
518
519 assert(heap->marking_context()->is_bitmap_clear(), "need clear marking bitmap");
520 assert(!heap->marking_context()->is_complete(), "should not be complete");
521 assert(!heap->has_forwarded_objects(), "No forwarded objects on this path");
522
523 if (ShenandoahVerify) {
524 heap->verifier()->verify_before_concmark();
525 }
526
527 if (VerifyBeforeGC) {
528 Universe::verify();
529 }
530
531 heap->set_concurrent_mark_in_progress(true);
532
533 start_mark();
534
535 {
536 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states);
537 ShenandoahInitMarkUpdateRegionStateClosure cl;
538 heap->parallel_heap_region_iterate(&cl);
539 }
540
541 // Weak reference processing
542 ShenandoahReferenceProcessor* rp = heap->ref_processor();
543 rp->reset_thread_locals();
544 rp->set_soft_reference_policy(heap->soft_ref_policy()->should_clear_all_soft_refs());
545
546 // Make above changes visible to worker threads
547 OrderAccess::fence();
548
549 // Arm nmethods for concurrent mark
550 ShenandoahCodeRoots::arm_nmethods_for_mark();
551
552 ShenandoahStackWatermark::change_epoch_id();
553 if (ShenandoahPacing) {
554 heap->pacer()->setup_for_mark();
555 }
556 }
557
558 void ShenandoahConcurrentGC::op_mark_roots() {
559 _mark.mark_concurrent_roots();
560 }
561
562 void ShenandoahConcurrentGC::op_mark() {
563 _mark.concurrent_mark();
564 }
565
566 void ShenandoahConcurrentGC::op_final_mark() {
567 ShenandoahHeap* const heap = ShenandoahHeap::heap();
568 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
569 assert(!heap->has_forwarded_objects(), "No forwarded objects on this path");
570
571 if (ShenandoahVerify) {
572 heap->verifier()->verify_roots_no_forwarded();
573 }
574
575 if (!heap->cancelled_gc()) {
576 _mark.finish_mark();
577 assert(!heap->cancelled_gc(), "STW mark cannot OOM");
578
579 // Notify JVMTI that the tagmap table will need cleaning.
580 JvmtiTagMap::set_needs_cleaning();
581
582 heap->prepare_regions_and_collection_set(true /*concurrent*/);
583
584 // Has to be done after cset selection
585 heap->prepare_concurrent_roots();
586
587 if (!heap->collection_set()->is_empty()) {
588 if (ShenandoahVerify) {
589 heap->verifier()->verify_before_evacuation();
590 }
591
592 heap->set_evacuation_in_progress(true);
593 // From here on, we need to update references.
594 heap->set_has_forwarded_objects(true);
595
596 // Verify before arming for concurrent processing.
597 // Otherwise, verification can trigger stack processing.
598 if (ShenandoahVerify) {
599 heap->verifier()->verify_during_evacuation();
600 }
601
602 // Arm nmethods/stack for concurrent processing
603 ShenandoahCodeRoots::arm_nmethods_for_evac();
604 ShenandoahStackWatermark::change_epoch_id();
605
606 if (ShenandoahPacing) {
607 heap->pacer()->setup_for_evac();
608 }
609 } else {
610 if (ShenandoahVerify) {
611 heap->verifier()->verify_after_concmark();
612 }
613
614 if (VerifyAfterGC) {
615 Universe::verify();
616 }
617 }
618 }
619 }
620
621 class ShenandoahConcurrentEvacThreadClosure : public ThreadClosure {
622 private:
623 OopClosure* const _oops;
624
625 public:
626 ShenandoahConcurrentEvacThreadClosure(OopClosure* oops);
627 void do_thread(Thread* thread);
628 };
629
630 ShenandoahConcurrentEvacThreadClosure::ShenandoahConcurrentEvacThreadClosure(OopClosure* oops) :
631 _oops(oops) {
632 }
633
634 void ShenandoahConcurrentEvacThreadClosure::do_thread(Thread* thread) {
635 JavaThread* const jt = JavaThread::cast(thread);
636 StackWatermarkSet::finish_processing(jt, _oops, StackWatermarkKind::gc);
637 }
638
639 class ShenandoahConcurrentEvacUpdateThreadTask : public WorkerTask {
640 private:
641 ShenandoahJavaThreadsIterator _java_threads;
642
643 public:
644 ShenandoahConcurrentEvacUpdateThreadTask(uint n_workers) :
645 WorkerTask("Shenandoah Evacuate/Update Concurrent Thread Roots"),
646 _java_threads(ShenandoahPhaseTimings::conc_thread_roots, n_workers) {
647 }
648
649 void work(uint worker_id) {
650 // ShenandoahEvacOOMScope has to be setup by ShenandoahContextEvacuateUpdateRootsClosure.
651 // Otherwise, may deadlock with watermark lock
652 ShenandoahContextEvacuateUpdateRootsClosure oops_cl;
653 ShenandoahConcurrentEvacThreadClosure thr_cl(&oops_cl);
654 _java_threads.threads_do(&thr_cl, worker_id);
655 }
656 };
657
658 void ShenandoahConcurrentGC::op_thread_roots() {
659 ShenandoahHeap* const heap = ShenandoahHeap::heap();
660 assert(heap->is_evacuation_in_progress(), "Checked by caller");
661 ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_thread_roots);
662 ShenandoahConcurrentEvacUpdateThreadTask task(heap->workers()->active_workers());
663 heap->workers()->run_task(&task);
664 }
665
666 void ShenandoahConcurrentGC::op_weak_refs() {
667 ShenandoahHeap* const heap = ShenandoahHeap::heap();
668 assert(heap->is_concurrent_weak_root_in_progress(), "Only during this phase");
669 // Concurrent weak refs processing
670 ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_refs);
671 if (heap->gc_cause() == GCCause::_wb_breakpoint) {
672 ShenandoahBreakpoint::at_after_reference_processing_started();
673 }
674 heap->ref_processor()->process_references(ShenandoahPhaseTimings::conc_weak_refs, heap->workers(), true /* concurrent */);
675 }
676
677 class ShenandoahEvacUpdateCleanupOopStorageRootsClosure : public BasicOopIterateClosure {
678 private:
679 ShenandoahHeap* const _heap;
680 ShenandoahMarkingContext* const _mark_context;
681 bool _evac_in_progress;
682 Thread* const _thread;
683
684 public:
685 ShenandoahEvacUpdateCleanupOopStorageRootsClosure();
686 void do_oop(oop* p);
687 void do_oop(narrowOop* p);
688 };
689
690 ShenandoahEvacUpdateCleanupOopStorageRootsClosure::ShenandoahEvacUpdateCleanupOopStorageRootsClosure() :
691 _heap(ShenandoahHeap::heap()),
692 _mark_context(ShenandoahHeap::heap()->marking_context()),
693 _evac_in_progress(ShenandoahHeap::heap()->is_evacuation_in_progress()),
694 _thread(Thread::current()) {
695 }
696
697 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(oop* p) {
698 const oop obj = RawAccess<>::oop_load(p);
699 if (!CompressedOops::is_null(obj)) {
700 if (!_mark_context->is_marked(obj)) {
701 shenandoah_assert_correct(p, obj);
702 ShenandoahHeap::atomic_clear_oop(p, obj);
703 } else if (_evac_in_progress && _heap->in_collection_set(obj)) {
704 oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
705 if (resolved == obj) {
706 resolved = _heap->evacuate_object(obj, _thread);
707 }
708 ShenandoahHeap::atomic_update_oop(resolved, p, obj);
709 assert(_heap->cancelled_gc() ||
710 _mark_context->is_marked(resolved) && !_heap->in_collection_set(resolved),
711 "Sanity");
712 }
713 }
714 }
715
716 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(narrowOop* p) {
717 ShouldNotReachHere();
718 }
719
720 class ShenandoahIsCLDAliveClosure : public CLDClosure {
721 public:
722 void do_cld(ClassLoaderData* cld) {
908 assert(heap->is_concurrent_strong_root_in_progress(), "Checked by caller");
909 ShenandoahConcurrentRootsEvacUpdateTask task(ShenandoahPhaseTimings::conc_strong_roots);
910 heap->workers()->run_task(&task);
911 heap->set_concurrent_strong_root_in_progress(false);
912 }
913
914 void ShenandoahConcurrentGC::op_cleanup_early() {
915 ShenandoahHeap::heap()->free_set()->recycle_trash();
916 }
917
918 void ShenandoahConcurrentGC::op_evacuate() {
919 ShenandoahHeap::heap()->evacuate_collection_set(true /*concurrent*/);
920 }
921
922 void ShenandoahConcurrentGC::op_init_updaterefs() {
923 ShenandoahHeap* const heap = ShenandoahHeap::heap();
924 heap->set_evacuation_in_progress(false);
925 heap->set_concurrent_weak_root_in_progress(false);
926 heap->prepare_update_heap_references(true /*concurrent*/);
927 heap->set_update_refs_in_progress(true);
928
929 if (ShenandoahPacing) {
930 heap->pacer()->setup_for_updaterefs();
931 }
932 }
933
934 void ShenandoahConcurrentGC::op_updaterefs() {
935 ShenandoahHeap::heap()->update_heap_references(true /*concurrent*/);
936 }
937
938 class ShenandoahUpdateThreadClosure : public HandshakeClosure {
939 private:
940 ShenandoahUpdateRefsClosure _cl;
941 public:
942 ShenandoahUpdateThreadClosure();
943 void do_thread(Thread* thread);
944 };
945
946 ShenandoahUpdateThreadClosure::ShenandoahUpdateThreadClosure() :
947 HandshakeClosure("Shenandoah Update Thread Roots") {
948 }
953 ResourceMark rm;
954 jt->oops_do(&_cl, nullptr);
955 }
956 }
957
958 void ShenandoahConcurrentGC::op_update_thread_roots() {
959 ShenandoahUpdateThreadClosure cl;
960 Handshake::execute(&cl);
961 }
962
963 void ShenandoahConcurrentGC::op_final_updaterefs() {
964 ShenandoahHeap* const heap = ShenandoahHeap::heap();
965 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
966 assert(!heap->_update_refs_iterator.has_next(), "Should have finished update references");
967
968 heap->finish_concurrent_roots();
969
970 // Clear cancelled GC, if set. On cancellation path, the block before would handle
971 // everything.
972 if (heap->cancelled_gc()) {
973 heap->clear_cancelled_gc();
974 }
975
976 // Has to be done before cset is clear
977 if (ShenandoahVerify) {
978 heap->verifier()->verify_roots_in_to_space();
979 }
980
981 heap->update_heap_region_states(true /*concurrent*/);
982
983 heap->set_update_refs_in_progress(false);
984 heap->set_has_forwarded_objects(false);
985
986 if (ShenandoahVerify) {
987 heap->verifier()->verify_after_updaterefs();
988 }
989
990 if (VerifyAfterGC) {
991 Universe::verify();
992 }
993
994 heap->rebuild_free_set(true /*concurrent*/);
995 }
996
997 void ShenandoahConcurrentGC::op_final_roots() {
998 ShenandoahHeap::heap()->set_concurrent_weak_root_in_progress(false);
999 }
1000
1001 void ShenandoahConcurrentGC::op_cleanup_complete() {
1002 ShenandoahHeap::heap()->free_set()->recycle_trash();
1003 }
1004
1005 bool ShenandoahConcurrentGC::check_cancellation_and_abort(ShenandoahDegenPoint point) {
1006 if (ShenandoahHeap::heap()->cancelled_gc()) {
1007 _degen_point = point;
1008 return true;
1009 }
1010 return false;
1011 }
1012
1013 const char* ShenandoahConcurrentGC::init_mark_event_message() const {
1014 ShenandoahHeap* const heap = ShenandoahHeap::heap();
1015 assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here");
1016 if (heap->unload_classes()) {
1017 return "Pause Init Mark (unload classes)";
1018 } else {
1019 return "Pause Init Mark";
1020 }
1021 }
1022
1023 const char* ShenandoahConcurrentGC::final_mark_event_message() const {
1024 ShenandoahHeap* const heap = ShenandoahHeap::heap();
1025 assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here");
1026 if (heap->unload_classes()) {
1027 return "Pause Final Mark (unload classes)";
1028 } else {
1029 return "Pause Final Mark";
1030 }
1031 }
1032
1033 const char* ShenandoahConcurrentGC::conc_mark_event_message() const {
1034 ShenandoahHeap* const heap = ShenandoahHeap::heap();
1035 assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here");
1036 if (heap->unload_classes()) {
1037 return "Concurrent marking (unload classes)";
1038 } else {
1039 return "Concurrent marking";
1040 }
1041 }
|
1 /*
2 * Copyright (c) 2021, 2022, Red Hat, Inc. All rights reserved.
3 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27
28 #include "gc/shared/barrierSetNMethod.hpp"
29 #include "gc/shared/collectorCounters.hpp"
30 #include "gc/shared/continuationGCSupport.inline.hpp"
31 #include "gc/shenandoah/shenandoahBreakpoint.hpp"
32 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
33 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
34 #include "gc/shenandoah/shenandoahFreeSet.hpp"
35 #include "gc/shenandoah/shenandoahGeneration.hpp"
36 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
37 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
38 #include "gc/shenandoah/shenandoahLock.hpp"
39 #include "gc/shenandoah/shenandoahMark.inline.hpp"
40 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
41 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
42 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
43 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
44 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
45 #include "gc/shenandoah/shenandoahStackWatermark.hpp"
46 #include "gc/shenandoah/shenandoahUtils.hpp"
47 #include "gc/shenandoah/shenandoahVerifier.hpp"
48 #include "gc/shenandoah/shenandoahVMOperations.hpp"
49 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
50 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
51 #include "memory/allocation.hpp"
52 #include "prims/jvmtiTagMap.hpp"
53 #include "runtime/vmThread.hpp"
54 #include "utilities/events.hpp"
55
56 // Breakpoint support
57 class ShenandoahBreakpointGCScope : public StackObj {
72 }
73 };
74
75 class ShenandoahBreakpointMarkScope : public StackObj {
76 private:
77 const GCCause::Cause _cause;
78 public:
79 ShenandoahBreakpointMarkScope(GCCause::Cause cause) : _cause(cause) {
80 if (_cause == GCCause::_wb_breakpoint) {
81 ShenandoahBreakpoint::at_after_marking_started();
82 }
83 }
84
85 ~ShenandoahBreakpointMarkScope() {
86 if (_cause == GCCause::_wb_breakpoint) {
87 ShenandoahBreakpoint::at_before_marking_completed();
88 }
89 }
90 };
91
92 ShenandoahConcurrentGC::ShenandoahConcurrentGC(ShenandoahGeneration* generation, bool do_old_gc_bootstrap) :
93 _mark(generation),
94 _degen_point(ShenandoahDegenPoint::_degenerated_unset),
95 _abbreviated(false),
96 _do_old_gc_bootstrap(do_old_gc_bootstrap),
97 _generation(generation) {
98 }
99
100 ShenandoahGC::ShenandoahDegenPoint ShenandoahConcurrentGC::degen_point() const {
101 return _degen_point;
102 }
103
104 bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) {
105 ShenandoahHeap* const heap = ShenandoahHeap::heap();
106 heap->start_conc_gc();
107
108 ShenandoahBreakpointGCScope breakpoint_gc_scope(cause);
109
110 // Reset for upcoming marking
111 entry_reset();
112
113 // Start initial mark under STW
114 vmop_entry_init_mark();
115
116 {
117 ShenandoahBreakpointMarkScope breakpoint_mark_scope(cause);
118
119 // Reset task queue stats here, rather than in mark_concurrent_roots,
120 // because remembered set scan will `push` oops into the queues and
121 // resetting after this happens will lose those counts.
122 TASKQUEUE_STATS_ONLY(_mark.task_queues()->reset_taskqueue_stats());
123
124 // Concurrent remembered set scanning
125 entry_scan_remembered_set();
126 // TODO: When RS scanning yields, we will need a check_cancellation_and_abort() degeneration point here.
127
128 // Concurrent mark roots
129 entry_mark_roots();
130 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_roots)) {
131 return false;
132 }
133
134 // Continue concurrent mark
135 entry_mark();
136 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_mark)) {
137 return false;
138 }
139 }
140
141 // Complete marking under STW, and start evacuation
142 vmop_entry_final_mark();
143
144 // If GC was cancelled before final mark, then the safepoint operation will do nothing
145 // and the concurrent mark will still be in progress. In this case it is safe to resume
146 // the degenerated cycle from the marking phase. On the other hand, if the GC is cancelled
147 // after final mark (but before this check), then the final mark safepoint operation
148 // will have finished the mark (setting concurrent mark in progress to false). Final mark
149 // will also have setup state (in concurrent stack processing) that will not be safe to
150 // resume from the marking phase in the degenerated cycle. That is, if the cancellation
151 // occurred after final mark, we must resume the degenerated cycle after the marking phase.
152 if (_generation->is_concurrent_mark_in_progress() && check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_mark)) {
153 assert(!heap->is_concurrent_weak_root_in_progress(), "Weak roots should not be in progress when concurrent mark is in progress");
154 return false;
155 }
156
157 // Concurrent stack processing
158 if (heap->is_evacuation_in_progress()) {
159 entry_thread_roots();
160 }
161
162 // Process weak roots that might still point to regions that would be broken by cleanup
163 if (heap->is_concurrent_weak_root_in_progress()) {
164 entry_weak_refs();
165 entry_weak_roots();
166 }
167
168 // Final mark might have reclaimed some immediate garbage, kick cleanup to reclaim
169 // the space. This would be the last action if there is nothing to evacuate. Note that
170 // we will not age young-gen objects in the case that we skip evacuation.
171 entry_cleanup_early();
172
173 {
174 ShenandoahHeapLocker locker(heap->lock());
175 heap->free_set()->log_status();
176 }
177
178 // Perform concurrent class unloading
179 if (heap->unload_classes() &&
180 heap->is_concurrent_weak_root_in_progress()) {
181 entry_class_unloading();
182 }
183
184 // Processing strong roots
185 // This may be skipped if there is nothing to update/evacuate.
186 // If so, strong_root_in_progress would be unset.
187 if (heap->is_concurrent_strong_root_in_progress()) {
188 entry_strong_roots();
189 }
190
191 // Continue the cycle with evacuation and optional update-refs.
192 // This may be skipped if there is nothing to evacuate.
193 // If so, evac_in_progress would be unset by collection set preparation code.
194 if (heap->is_evacuation_in_progress()) {
195 // Concurrently evacuate
196 entry_evacuate();
197 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_evac)) {
198 return false;
199 }
200 }
201
202 if (heap->has_forwarded_objects()) {
203 // Perform update-refs phase.
204 vmop_entry_init_updaterefs();
205 entry_updaterefs();
206 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_updaterefs)) {
207 return false;
208 }
209
210 // Concurrent update thread roots
211 entry_update_thread_roots();
212 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_updaterefs)) {
213 return false;
214 }
215
216 vmop_entry_final_updaterefs();
217
218 // Update references freed up collection set, kick the cleanup to reclaim the space.
219 entry_cleanup_complete();
220 } else {
221 // We chose not to evacuate because we found sufficient immediate garbage. Note that we
222 // do not check for cancellation here because, at this point, the cycle is effectively
223 // complete. If the cycle has been cancelled here, the control thread will detect it
224 // on its next iteration and run a degenerated young cycle.
225 vmop_entry_final_roots();
226 _abbreviated = true;
227 }
228
229 // We defer generation resizing actions until after cset regions have been recycled. We do this even following an
230 // abbreviated cycle.
231 if (heap->mode()->is_generational()) {
232 bool success;
233 size_t region_xfer;
234 const char* region_destination;
235 ShenandoahYoungGeneration* young_gen = heap->young_generation();
236 ShenandoahGeneration* old_gen = heap->old_generation();
237 {
238 ShenandoahHeapLocker locker(heap->lock());
239
240 size_t old_region_surplus = heap->get_old_region_surplus();
241 size_t old_region_deficit = heap->get_old_region_deficit();
242 if (old_region_surplus) {
243 success = heap->generation_sizer()->transfer_to_young(old_region_surplus);
244 region_destination = "young";
245 region_xfer = old_region_surplus;
246 } else if (old_region_deficit) {
247 success = heap->generation_sizer()->transfer_to_old(old_region_deficit);
248 region_destination = "old";
249 region_xfer = old_region_deficit;
250 if (!success) {
251 ((ShenandoahOldHeuristics *) old_gen->heuristics())->trigger_cannot_expand();
252 }
253 } else {
254 region_destination = "none";
255 region_xfer = 0;
256 success = true;
257 }
258 heap->set_old_region_surplus(0);
259 heap->set_old_region_deficit(0);
260
261 size_t old_usage_before_evac = heap->capture_old_usage(0);
262 size_t old_usage_now = old_gen->used();
263 size_t promoted_bytes = old_usage_now - old_usage_before_evac;
264 heap->set_previous_promotion(promoted_bytes);
265 heap->set_young_evac_reserve(0);
266 heap->set_old_evac_reserve(0);
267 heap->reset_old_evac_expended();
268 heap->set_promoted_reserve(0);
269 }
270
271 // Report outside the heap lock
272 size_t young_available = young_gen->available();
273 size_t old_available = old_gen->available();
274 log_info(gc, ergo)("After cleanup, %s " SIZE_FORMAT " regions to %s to prepare for next gc, old available: "
275 SIZE_FORMAT "%s, young_available: " SIZE_FORMAT "%s",
276 success? "successfully transferred": "failed to transfer", region_xfer, region_destination,
277 byte_size_in_proper_unit(old_available), proper_unit_for_byte_size(old_available),
278 byte_size_in_proper_unit(young_available), proper_unit_for_byte_size(young_available));
279 }
280 return true;
281 }
282
283 void ShenandoahConcurrentGC::vmop_entry_init_mark() {
284 ShenandoahHeap* const heap = ShenandoahHeap::heap();
285 TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
286 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_mark_gross);
287
288 heap->try_inject_alloc_failure();
289 VM_ShenandoahInitMark op(this);
290 VMThread::execute(&op); // jump to entry_init_mark() under safepoint
291 }
292
293 void ShenandoahConcurrentGC::vmop_entry_final_mark() {
294 ShenandoahHeap* const heap = ShenandoahHeap::heap();
295 TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
296 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_mark_gross);
297
298 heap->try_inject_alloc_failure();
299 VM_ShenandoahFinalMarkStartEvac op(this);
382 EventMark em("%s", msg);
383
384 op_final_roots();
385 }
386
387 void ShenandoahConcurrentGC::entry_reset() {
388 ShenandoahHeap* const heap = ShenandoahHeap::heap();
389 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
390 static const char* msg = "Concurrent reset";
391 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_reset);
392 EventMark em("%s", msg);
393
394 ShenandoahWorkerScope scope(heap->workers(),
395 ShenandoahWorkerPolicy::calc_workers_for_conc_reset(),
396 "concurrent reset");
397
398 heap->try_inject_alloc_failure();
399 op_reset();
400 }
401
402 void ShenandoahConcurrentGC::entry_scan_remembered_set() {
403 if (_generation->is_young()) {
404 ShenandoahHeap* const heap = ShenandoahHeap::heap();
405 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
406 const char* msg = "Concurrent remembered set scanning";
407 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::init_scan_rset);
408 EventMark em("%s", msg);
409
410 ShenandoahWorkerScope scope(heap->workers(),
411 ShenandoahWorkerPolicy::calc_workers_for_rs_scanning(),
412 msg);
413
414 heap->try_inject_alloc_failure();
415 _generation->scan_remembered_set(true /* is_concurrent */);
416 }
417 }
418
419 void ShenandoahConcurrentGC::entry_mark_roots() {
420 ShenandoahHeap* const heap = ShenandoahHeap::heap();
421 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
422 const char* msg = "Concurrent marking roots";
423 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_mark_roots);
424 EventMark em("%s", msg);
425
426 ShenandoahWorkerScope scope(heap->workers(),
427 ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
428 "concurrent marking roots");
429
430 heap->try_inject_alloc_failure();
431 op_mark_roots();
432 }
433
434 void ShenandoahConcurrentGC::entry_mark() {
435 ShenandoahHeap* const heap = ShenandoahHeap::heap();
436 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
437 const char* msg = conc_mark_event_message();
438 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_mark);
577 op_updaterefs();
578 }
579
580 void ShenandoahConcurrentGC::entry_cleanup_complete() {
581 ShenandoahHeap* const heap = ShenandoahHeap::heap();
582 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
583 static const char* msg = "Concurrent cleanup";
584 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_cleanup_complete, true /* log_heap_usage */);
585 EventMark em("%s", msg);
586
587 // This phase does not use workers, no need for setup
588 heap->try_inject_alloc_failure();
589 op_cleanup_complete();
590 }
591
592 void ShenandoahConcurrentGC::op_reset() {
593 ShenandoahHeap* const heap = ShenandoahHeap::heap();
594 if (ShenandoahPacing) {
595 heap->pacer()->setup_for_reset();
596 }
597 _generation->prepare_gc();
598 }
599
600 class ShenandoahInitMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
601 private:
602 ShenandoahMarkingContext* const _ctx;
603 public:
604 ShenandoahInitMarkUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
605
606 void heap_region_do(ShenandoahHeapRegion* r) {
607 assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index());
608 if (r->is_active()) {
609 // Check if region needs updating its TAMS. We have updated it already during concurrent
610 // reset, so it is very likely we don't need to do another write here. Since most regions
611 // are not "active", this path is relatively rare.
612 if (_ctx->top_at_mark_start(r) != r->top()) {
613 _ctx->capture_top_at_mark_start(r);
614 }
615 } else {
616 assert(_ctx->top_at_mark_start(r) == r->top(),
617 "Region " SIZE_FORMAT " should already have correct TAMS", r->index());
618 }
619 }
620
621 bool is_thread_safe() { return true; }
622 };
623
624 void ShenandoahConcurrentGC::start_mark() {
625 _mark.start_mark();
626 }
627
628 void ShenandoahConcurrentGC::op_init_mark() {
629 ShenandoahHeap* const heap = ShenandoahHeap::heap();
630 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
631 assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
632
633 assert(_generation->is_bitmap_clear(), "need clear marking bitmap");
634 assert(!_generation->is_mark_complete(), "should not be complete");
635 assert(!heap->has_forwarded_objects(), "No forwarded objects on this path");
636
637
638 if (heap->mode()->is_generational()) {
639 if (_generation->is_young() || (_generation->is_global() && ShenandoahVerify)) {
640 // The current implementation of swap_remembered_set() copies the write-card-table
641 // to the read-card-table. The remembered sets are also swapped for GLOBAL collections
642 // so that the verifier works with the correct copy of the card table when verifying.
643 // TODO: This path should not really depend on ShenandoahVerify.
644 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_swap_rset);
645 _generation->swap_remembered_set();
646 }
647
648 if (_generation->is_global()) {
649 heap->cancel_old_gc();
650 } else if (heap->is_concurrent_old_mark_in_progress()) {
651 // Purge the SATB buffers, transferring any valid, old pointers to the
652 // old generation mark queue. Any pointers in a young region will be
653 // abandoned.
654 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_transfer_satb);
655 heap->transfer_old_pointers_from_satb();
656 }
657 }
658
659 if (ShenandoahVerify) {
660 heap->verifier()->verify_before_concmark();
661 }
662
663 if (VerifyBeforeGC) {
664 Universe::verify();
665 }
666
667 _generation->set_concurrent_mark_in_progress(true);
668
669 start_mark();
670
671 if (_do_old_gc_bootstrap) {
672 // Update region state for both young and old regions
673 // TODO: We should be able to pull this out of the safepoint for the bootstrap
674 // cycle. The top of an old region will only move when a GC cycle evacuates
675 // objects into it. When we start an old cycle, we know that nothing can touch
676 // the top of old regions.
677 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states);
678 ShenandoahInitMarkUpdateRegionStateClosure cl;
679 heap->parallel_heap_region_iterate(&cl);
680 } else {
681 // Update region state for only young regions
682 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states);
683 ShenandoahInitMarkUpdateRegionStateClosure cl;
684 _generation->parallel_heap_region_iterate(&cl);
685 }
686
687 // Weak reference processing
688 ShenandoahReferenceProcessor* rp = _generation->ref_processor();
689 rp->reset_thread_locals();
690 rp->set_soft_reference_policy(heap->soft_ref_policy()->should_clear_all_soft_refs());
691
692 // Make above changes visible to worker threads
693 OrderAccess::fence();
694
695 // Arm nmethods for concurrent mark
696 ShenandoahCodeRoots::arm_nmethods_for_mark();
697
698 ShenandoahStackWatermark::change_epoch_id();
699 if (ShenandoahPacing) {
700 heap->pacer()->setup_for_mark();
701 }
702 }
703
704 void ShenandoahConcurrentGC::op_mark_roots() {
705 _mark.mark_concurrent_roots();
706 }
707
708 void ShenandoahConcurrentGC::op_mark() {
709 _mark.concurrent_mark();
710 }
711
712 void ShenandoahConcurrentGC::op_final_mark() {
713 ShenandoahHeap* const heap = ShenandoahHeap::heap();
714 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
715 assert(!heap->has_forwarded_objects(), "No forwarded objects on this path");
716
717 if (ShenandoahVerify) {
718 heap->verifier()->verify_roots_no_forwarded();
719 }
720
721 if (!heap->cancelled_gc()) {
722 _mark.finish_mark();
723 assert(!heap->cancelled_gc(), "STW mark cannot OOM");
724
725 // Notify JVMTI that the tagmap table will need cleaning.
726 JvmtiTagMap::set_needs_cleaning();
727
728 // The collection set is chosen by prepare_regions_and_collection_set().
729 //
730 // TODO: Under severe memory overload conditions that can be checked here, we may want to limit
731 // the inclusion of old-gen candidates within the collection set. This would allow us to prioritize efforts on
732 // evacuating young-gen, This remediation is most appropriate when old-gen availability is very high (so there
733 // are negligible negative impacts from delaying completion of old-gen evacuation) and when young-gen collections
734 // are "under duress" (as signalled by very low availability of memory within young-gen, indicating that/ young-gen
735 // collections are not triggering frequently enough).
736 _generation->prepare_regions_and_collection_set(true /*concurrent*/);
737
738 // Upon return from prepare_regions_and_collection_set(), certain parameters have been established to govern the
739 // evacuation efforts that are about to begin. In particular:
740 //
741 // heap->get_promoted_reserve() represents the amount of memory within old-gen's available memory that has
742 // been set aside to hold objects promoted from young-gen memory. This represents an estimated percentage
743 // of the live young-gen memory within the collection set. If there is more data ready to be promoted than
744 // can fit within this reserve, the promotion of some objects will be deferred until a subsequent evacuation
745 // pass.
746 //
747 // heap->get_old_evac_reserve() represents the amount of memory within old-gen's available memory that has been
748 // set aside to hold objects evacuated from the old-gen collection set.
749 //
750 // heap->get_young_evac_reserve() represents the amount of memory within young-gen's available memory that has
751 // been set aside to hold objects evacuated from the young-gen collection set. Conservatively, this value
752 // equals the entire amount of live young-gen memory within the collection set, even though some of this memory
753 // will likely be promoted.
754
755 // Has to be done after cset selection
756 heap->prepare_concurrent_roots();
757
758 if (heap->mode()->is_generational()) {
759 size_t humongous_regions_promoted = heap->get_promotable_humongous_regions();
760 size_t regular_regions_promoted_in_place = heap->get_regular_regions_promoted_in_place();
761 if (!heap->collection_set()->is_empty() || (humongous_regions_promoted + regular_regions_promoted_in_place > 0)) {
762 // Even if the collection set is empty, we need to do evacuation if there are regions to be promoted in place.
763 // Concurrent evacuation takes responsibility for registering objects and setting the remembered set cards to dirty.
764
765 LogTarget(Debug, gc, cset) lt;
766 if (lt.is_enabled()) {
767 ResourceMark rm;
768 LogStream ls(lt);
769 heap->collection_set()->print_on(&ls);
770 }
771
772 if (ShenandoahVerify) {
773 heap->verifier()->verify_before_evacuation();
774 }
775
776 heap->set_evacuation_in_progress(true);
777
778 // Verify before arming for concurrent processing.
779 // Otherwise, verification can trigger stack processing.
780 if (ShenandoahVerify) {
781 heap->verifier()->verify_during_evacuation();
782 }
783
784 // Generational mode may promote objects in place during the evacuation phase.
785 // If that is the only reason we are evacuating, we don't need to update references
786 // and there will be no forwarded objects on the heap.
787 heap->set_has_forwarded_objects(!heap->collection_set()->is_empty());
788
789 // Arm nmethods/stack for concurrent processing
790 if (!heap->collection_set()->is_empty()) {
791 // Iff objects will be evaluated, arm the nmethod barriers. These will be disarmed
792 // under the same condition (established in prepare_concurrent_roots) after strong
793 // root evacuation has completed (see op_strong_roots).
794 ShenandoahCodeRoots::arm_nmethods_for_evac();
795 ShenandoahStackWatermark::change_epoch_id();
796 }
797
798 if (ShenandoahPacing) {
799 heap->pacer()->setup_for_evac();
800 }
801 } else {
802 if (ShenandoahVerify) {
803 heap->verifier()->verify_after_concmark();
804 }
805
806 if (VerifyAfterGC) {
807 Universe::verify();
808 }
809 }
810 } else {
811 // Not is_generational()
812 if (!heap->collection_set()->is_empty()) {
813 LogTarget(Info, gc, ergo) lt;
814 if (lt.is_enabled()) {
815 ResourceMark rm;
816 LogStream ls(lt);
817 heap->collection_set()->print_on(&ls);
818 }
819
820 if (ShenandoahVerify) {
821 heap->verifier()->verify_before_evacuation();
822 }
823
824 heap->set_evacuation_in_progress(true);
825
826 // Verify before arming for concurrent processing.
827 // Otherwise, verification can trigger stack processing.
828 if (ShenandoahVerify) {
829 heap->verifier()->verify_during_evacuation();
830 }
831
832 // From here on, we need to update references.
833 heap->set_has_forwarded_objects(true);
834
835 // Arm nmethods/stack for concurrent processing
836 ShenandoahCodeRoots::arm_nmethods_for_evac();
837 ShenandoahStackWatermark::change_epoch_id();
838
839 if (ShenandoahPacing) {
840 heap->pacer()->setup_for_evac();
841 }
842 } else {
843 if (ShenandoahVerify) {
844 heap->verifier()->verify_after_concmark();
845 }
846
847 if (VerifyAfterGC) {
848 Universe::verify();
849 }
850 }
851 }
852 }
853 }
854
855 class ShenandoahConcurrentEvacThreadClosure : public ThreadClosure {
856 private:
857 OopClosure* const _oops;
858
859 public:
860 ShenandoahConcurrentEvacThreadClosure(OopClosure* oops);
861 void do_thread(Thread* thread);
862 };
863
864 ShenandoahConcurrentEvacThreadClosure::ShenandoahConcurrentEvacThreadClosure(OopClosure* oops) :
865 _oops(oops) {
866 }
867
868 void ShenandoahConcurrentEvacThreadClosure::do_thread(Thread* thread) {
869 JavaThread* const jt = JavaThread::cast(thread);
870 StackWatermarkSet::finish_processing(jt, _oops, StackWatermarkKind::gc);
871 ShenandoahThreadLocalData::enable_plab_promotions(thread);
872 }
873
874 class ShenandoahConcurrentEvacUpdateThreadTask : public WorkerTask {
875 private:
876 ShenandoahJavaThreadsIterator _java_threads;
877
878 public:
879 ShenandoahConcurrentEvacUpdateThreadTask(uint n_workers) :
880 WorkerTask("Shenandoah Evacuate/Update Concurrent Thread Roots"),
881 _java_threads(ShenandoahPhaseTimings::conc_thread_roots, n_workers) {
882 }
883
884 void work(uint worker_id) {
885 Thread* worker_thread = Thread::current();
886 ShenandoahThreadLocalData::enable_plab_promotions(worker_thread);
887
888 // ShenandoahEvacOOMScope has to be setup by ShenandoahContextEvacuateUpdateRootsClosure.
889 // Otherwise, may deadlock with watermark lock
890 ShenandoahContextEvacuateUpdateRootsClosure oops_cl;
891 ShenandoahConcurrentEvacThreadClosure thr_cl(&oops_cl);
892 _java_threads.threads_do(&thr_cl, worker_id);
893 }
894 };
895
896 void ShenandoahConcurrentGC::op_thread_roots() {
897 ShenandoahHeap* const heap = ShenandoahHeap::heap();
898 assert(heap->is_evacuation_in_progress(), "Checked by caller");
899 ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_thread_roots);
900 ShenandoahConcurrentEvacUpdateThreadTask task(heap->workers()->active_workers());
901 heap->workers()->run_task(&task);
902 }
903
904 void ShenandoahConcurrentGC::op_weak_refs() {
905 ShenandoahHeap* const heap = ShenandoahHeap::heap();
906 assert(heap->is_concurrent_weak_root_in_progress(), "Only during this phase");
907 // Concurrent weak refs processing
908 ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_refs);
909 if (heap->gc_cause() == GCCause::_wb_breakpoint) {
910 ShenandoahBreakpoint::at_after_reference_processing_started();
911 }
912 _generation->ref_processor()->process_references(ShenandoahPhaseTimings::conc_weak_refs, heap->workers(), true /* concurrent */);
913 }
914
915 class ShenandoahEvacUpdateCleanupOopStorageRootsClosure : public BasicOopIterateClosure {
916 private:
917 ShenandoahHeap* const _heap;
918 ShenandoahMarkingContext* const _mark_context;
919 bool _evac_in_progress;
920 Thread* const _thread;
921
922 public:
923 ShenandoahEvacUpdateCleanupOopStorageRootsClosure();
924 void do_oop(oop* p);
925 void do_oop(narrowOop* p);
926 };
927
928 ShenandoahEvacUpdateCleanupOopStorageRootsClosure::ShenandoahEvacUpdateCleanupOopStorageRootsClosure() :
929 _heap(ShenandoahHeap::heap()),
930 _mark_context(ShenandoahHeap::heap()->marking_context()),
931 _evac_in_progress(ShenandoahHeap::heap()->is_evacuation_in_progress()),
932 _thread(Thread::current()) {
933 }
934
935 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(oop* p) {
936 const oop obj = RawAccess<>::oop_load(p);
937 if (!CompressedOops::is_null(obj)) {
938 if (!_mark_context->is_marked(obj)) {
939 if (_heap->is_in_active_generation(obj)) {
940 // TODO: This worries me. Here we are asserting that an unmarked from-space object is 'correct'.
941 // Normally, I would call this a bogus assert, but there seems to be a legitimate use-case for
942 // accessing from-space objects during class unloading. However, the from-space object may have
943 // been "filled". We've made no effort to prevent old generation classes being unloaded by young
944 // gen (and vice-versa).
945 shenandoah_assert_correct(p, obj);
946 ShenandoahHeap::atomic_clear_oop(p, obj);
947 }
948 } else if (_evac_in_progress && _heap->in_collection_set(obj)) {
949 oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
950 if (resolved == obj) {
951 resolved = _heap->evacuate_object(obj, _thread);
952 }
953 ShenandoahHeap::atomic_update_oop(resolved, p, obj);
954 assert(_heap->cancelled_gc() ||
955 _mark_context->is_marked(resolved) && !_heap->in_collection_set(resolved),
956 "Sanity");
957 }
958 }
959 }
960
961 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(narrowOop* p) {
962 ShouldNotReachHere();
963 }
964
965 class ShenandoahIsCLDAliveClosure : public CLDClosure {
966 public:
967 void do_cld(ClassLoaderData* cld) {
1153 assert(heap->is_concurrent_strong_root_in_progress(), "Checked by caller");
1154 ShenandoahConcurrentRootsEvacUpdateTask task(ShenandoahPhaseTimings::conc_strong_roots);
1155 heap->workers()->run_task(&task);
1156 heap->set_concurrent_strong_root_in_progress(false);
1157 }
1158
1159 void ShenandoahConcurrentGC::op_cleanup_early() {
1160 ShenandoahHeap::heap()->free_set()->recycle_trash();
1161 }
1162
1163 void ShenandoahConcurrentGC::op_evacuate() {
1164 ShenandoahHeap::heap()->evacuate_collection_set(true /*concurrent*/);
1165 }
1166
1167 void ShenandoahConcurrentGC::op_init_updaterefs() {
1168 ShenandoahHeap* const heap = ShenandoahHeap::heap();
1169 heap->set_evacuation_in_progress(false);
1170 heap->set_concurrent_weak_root_in_progress(false);
1171 heap->prepare_update_heap_references(true /*concurrent*/);
1172 heap->set_update_refs_in_progress(true);
1173 if (ShenandoahVerify) {
1174 heap->verifier()->verify_before_updaterefs();
1175 }
1176 if (ShenandoahPacing) {
1177 heap->pacer()->setup_for_updaterefs();
1178 }
1179 }
1180
1181 void ShenandoahConcurrentGC::op_updaterefs() {
1182 ShenandoahHeap::heap()->update_heap_references(true /*concurrent*/);
1183 }
1184
1185 class ShenandoahUpdateThreadClosure : public HandshakeClosure {
1186 private:
1187 ShenandoahUpdateRefsClosure _cl;
1188 public:
1189 ShenandoahUpdateThreadClosure();
1190 void do_thread(Thread* thread);
1191 };
1192
1193 ShenandoahUpdateThreadClosure::ShenandoahUpdateThreadClosure() :
1194 HandshakeClosure("Shenandoah Update Thread Roots") {
1195 }
1200 ResourceMark rm;
1201 jt->oops_do(&_cl, nullptr);
1202 }
1203 }
1204
1205 void ShenandoahConcurrentGC::op_update_thread_roots() {
1206 ShenandoahUpdateThreadClosure cl;
1207 Handshake::execute(&cl);
1208 }
1209
1210 void ShenandoahConcurrentGC::op_final_updaterefs() {
1211 ShenandoahHeap* const heap = ShenandoahHeap::heap();
1212 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
1213 assert(!heap->_update_refs_iterator.has_next(), "Should have finished update references");
1214
1215 heap->finish_concurrent_roots();
1216
1217 // Clear cancelled GC, if set. On cancellation path, the block before would handle
1218 // everything.
1219 if (heap->cancelled_gc()) {
1220 heap->clear_cancelled_gc(true /* clear oom handler */);
1221 }
1222
1223 // Has to be done before cset is clear
1224 if (ShenandoahVerify) {
1225 heap->verifier()->verify_roots_in_to_space();
1226 }
1227
1228 if (heap->mode()->is_generational() && heap->is_concurrent_old_mark_in_progress()) {
1229 // When the SATB barrier is left on to support concurrent old gen mark, it may pick up writes to
1230 // objects in the collection set. After those objects are evacuated, the pointers in the
1231 // SATB are no longer safe. Once we have finished update references, we are guaranteed that
1232 // no more writes to the collection set are possible.
1233 //
1234 // This will transfer any old pointers in _active_ regions from the SATB to the old gen
1235 // mark queues. All other pointers will be discarded. This would also discard any pointers
1236 // in old regions that were included in a mixed evacuation. We aren't using the SATB filter
1237 // methods here because we cannot control when they execute. If the SATB filter runs _after_
1238 // a region has been recycled, we will not be able to detect the bad pointer.
1239 //
1240 // We are not concerned about skipping this step in abbreviated cycles because regions
1241 // with no live objects cannot have been written to and so cannot have entries in the SATB
1242 // buffers.
1243 heap->transfer_old_pointers_from_satb();
1244 }
1245
1246 heap->update_heap_region_states(true /*concurrent*/);
1247
1248 heap->set_update_refs_in_progress(false);
1249 heap->set_has_forwarded_objects(false);
1250
1251 // Aging_cycle is only relevant during evacuation cycle for individual objects and during final mark for
1252 // entire regions. Both of these relevant operations occur before final update refs.
1253 heap->set_aging_cycle(false);
1254
1255 if (ShenandoahVerify) {
1256 heap->verifier()->verify_after_updaterefs();
1257 }
1258
1259 if (VerifyAfterGC) {
1260 Universe::verify();
1261 }
1262
1263 heap->rebuild_free_set(true /*concurrent*/);
1264 }
1265
1266 void ShenandoahConcurrentGC::op_final_roots() {
1267
1268 ShenandoahHeap *heap = ShenandoahHeap::heap();
1269 heap->set_concurrent_weak_root_in_progress(false);
1270 heap->set_evacuation_in_progress(false);
1271
1272 if (heap->mode()->is_generational()) {
1273 ShenandoahMarkingContext *ctx = heap->complete_marking_context();
1274
1275 for (size_t i = 0; i < heap->num_regions(); i++) {
1276 ShenandoahHeapRegion *r = heap->get_region(i);
1277 if (r->is_active() && r->is_young()) {
1278 HeapWord* tams = ctx->top_at_mark_start(r);
1279 HeapWord* top = r->top();
1280 if (top > tams) {
1281 r->reset_age();
1282 } else if (heap->is_aging_cycle()) {
1283 r->increment_age();
1284 }
1285 }
1286 }
1287 }
1288 }
1289
1290 void ShenandoahConcurrentGC::op_cleanup_complete() {
1291 ShenandoahHeap::heap()->free_set()->recycle_trash();
1292 }
1293
1294 bool ShenandoahConcurrentGC::check_cancellation_and_abort(ShenandoahDegenPoint point) {
1295 if (ShenandoahHeap::heap()->cancelled_gc()) {
1296 _degen_point = point;
1297 return true;
1298 }
1299 return false;
1300 }
1301
1302 const char* ShenandoahConcurrentGC::init_mark_event_message() const {
1303 ShenandoahHeap* const heap = ShenandoahHeap::heap();
1304 assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here");
1305 if (heap->unload_classes()) {
1306 SHENANDOAH_RETURN_EVENT_MESSAGE(heap, _generation->type(), "Pause Init Mark", " (unload classes)");
1307 } else {
1308 SHENANDOAH_RETURN_EVENT_MESSAGE(heap, _generation->type(), "Pause Init Mark", "");
1309 }
1310 }
1311
1312 const char* ShenandoahConcurrentGC::final_mark_event_message() const {
1313 ShenandoahHeap* const heap = ShenandoahHeap::heap();
1314 assert(!heap->has_forwarded_objects() || heap->is_concurrent_old_mark_in_progress(),
1315 "Should not have forwarded objects during final mark, unless old gen concurrent mark is running");
1316
1317 if (heap->unload_classes()) {
1318 SHENANDOAH_RETURN_EVENT_MESSAGE(heap, _generation->type(), "Pause Final Mark", " (unload classes)");
1319 } else {
1320 SHENANDOAH_RETURN_EVENT_MESSAGE(heap, _generation->type(), "Pause Final Mark", "");
1321 }
1322 }
1323
1324 const char* ShenandoahConcurrentGC::conc_mark_event_message() const {
1325 ShenandoahHeap* const heap = ShenandoahHeap::heap();
1326 assert(!heap->has_forwarded_objects() || heap->is_concurrent_old_mark_in_progress(),
1327 "Should not have forwarded objects concurrent mark, unless old gen concurrent mark is running");
1328 if (heap->unload_classes()) {
1329 SHENANDOAH_RETURN_EVENT_MESSAGE(heap, _generation->type(), "Concurrent marking", " (unload classes)");
1330 } else {
1331 SHENANDOAH_RETURN_EVENT_MESSAGE(heap, _generation->type(), "Concurrent marking", "");
1332 }
1333 }
|