14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26
27 #include "gc/shared/barrierSetNMethod.hpp"
28 #include "gc/shared/collectorCounters.hpp"
29 #include "gc/shared/continuationGCSupport.inline.hpp"
30 #include "gc/shenandoah/shenandoahBreakpoint.hpp"
31 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
32 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
33 #include "gc/shenandoah/shenandoahFreeSet.hpp"
34 #include "gc/shenandoah/shenandoahLock.hpp"
35 #include "gc/shenandoah/shenandoahMark.inline.hpp"
36 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
37 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
38 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
39 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
40 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
41 #include "gc/shenandoah/shenandoahStackWatermark.hpp"
42 #include "gc/shenandoah/shenandoahUtils.hpp"
43 #include "gc/shenandoah/shenandoahVerifier.hpp"
44 #include "gc/shenandoah/shenandoahVMOperations.hpp"
45 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
46 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
47 #include "memory/allocation.hpp"
48 #include "prims/jvmtiTagMap.hpp"
49 #include "runtime/vmThread.hpp"
50 #include "utilities/events.hpp"
51
52 // Breakpoint support
53 class ShenandoahBreakpointGCScope : public StackObj {
68 }
69 };
70
71 class ShenandoahBreakpointMarkScope : public StackObj {
72 private:
73 const GCCause::Cause _cause;
74 public:
75 ShenandoahBreakpointMarkScope(GCCause::Cause cause) : _cause(cause) {
76 if (_cause == GCCause::_wb_breakpoint) {
77 ShenandoahBreakpoint::at_after_marking_started();
78 }
79 }
80
81 ~ShenandoahBreakpointMarkScope() {
82 if (_cause == GCCause::_wb_breakpoint) {
83 ShenandoahBreakpoint::at_before_marking_completed();
84 }
85 }
86 };
87
88 ShenandoahConcurrentGC::ShenandoahConcurrentGC() :
89 _mark(),
90 _degen_point(ShenandoahDegenPoint::_degenerated_unset) {
91 }
92
93 ShenandoahGC::ShenandoahDegenPoint ShenandoahConcurrentGC::degen_point() const {
94 return _degen_point;
95 }
96
97 void ShenandoahConcurrentGC::cancel() {
98 ShenandoahConcurrentMark::cancel();
99 }
100
101 bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) {
102 ShenandoahHeap* const heap = ShenandoahHeap::heap();
103 ShenandoahBreakpointGCScope breakpoint_gc_scope(cause);
104
105 // Reset for upcoming marking
106 entry_reset();
107
108 // Start initial mark under STW
109 vmop_entry_init_mark();
110
111 {
112 ShenandoahBreakpointMarkScope breakpoint_mark_scope(cause);
113 // Concurrent mark roots
114 entry_mark_roots();
115 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_outside_cycle)) return false;
116
117 // Continue concurrent mark
118 entry_mark();
119 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_mark)) return false;
120 }
121
122 // Complete marking under STW, and start evacuation
123 vmop_entry_final_mark();
124
125 // Concurrent stack processing
126 if (heap->is_evacuation_in_progress()) {
127 entry_thread_roots();
128 }
129
130 // Process weak roots that might still point to regions that would be broken by cleanup
131 if (heap->is_concurrent_weak_root_in_progress()) {
132 entry_weak_refs();
133 entry_weak_roots();
134 }
135
136 // Final mark might have reclaimed some immediate garbage, kick cleanup to reclaim
137 // the space. This would be the last action if there is nothing to evacuate.
138 entry_cleanup_early();
139
140 {
141 ShenandoahHeapLocker locker(heap->lock());
142 heap->free_set()->log_status();
143 }
144
145 // Perform concurrent class unloading
146 if (heap->unload_classes() &&
147 heap->is_concurrent_weak_root_in_progress()) {
148 entry_class_unloading();
149 }
150
151 // Processing strong roots
152 // This may be skipped if there is nothing to update/evacuate.
153 // If so, strong_root_in_progress would be unset.
154 if (heap->is_concurrent_strong_root_in_progress()) {
155 entry_strong_roots();
156 }
157
158 // Continue the cycle with evacuation and optional update-refs.
159 // This may be skipped if there is nothing to evacuate.
160 // If so, evac_in_progress would be unset by collection set preparation code.
161 if (heap->is_evacuation_in_progress()) {
162 // Concurrently evacuate
163 entry_evacuate();
164 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_evac)) return false;
165
166 // Perform update-refs phase.
167 vmop_entry_init_updaterefs();
168 entry_updaterefs();
169 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_updaterefs)) return false;
170
171 // Concurrent update thread roots
172 entry_update_thread_roots();
173 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_updaterefs)) return false;
174
175 vmop_entry_final_updaterefs();
176
177 // Update references freed up collection set, kick the cleanup to reclaim the space.
178 entry_cleanup_complete();
179 } else {
180 vmop_entry_final_roots();
181 }
182
183 return true;
184 }
185
186 void ShenandoahConcurrentGC::vmop_entry_init_mark() {
187 ShenandoahHeap* const heap = ShenandoahHeap::heap();
188 TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
189 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_mark_gross);
190
191 heap->try_inject_alloc_failure();
192 VM_ShenandoahInitMark op(this);
193 VMThread::execute(&op); // jump to entry_init_mark() under safepoint
194 }
195
196 void ShenandoahConcurrentGC::vmop_entry_final_mark() {
197 ShenandoahHeap* const heap = ShenandoahHeap::heap();
198 TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
199 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_mark_gross);
200
201 heap->try_inject_alloc_failure();
202 VM_ShenandoahFinalMarkStartEvac op(this);
206 void ShenandoahConcurrentGC::vmop_entry_init_updaterefs() {
207 ShenandoahHeap* const heap = ShenandoahHeap::heap();
208 TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
209 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_update_refs_gross);
210
211 heap->try_inject_alloc_failure();
212 VM_ShenandoahInitUpdateRefs op(this);
213 VMThread::execute(&op);
214 }
215
216 void ShenandoahConcurrentGC::vmop_entry_final_updaterefs() {
217 ShenandoahHeap* const heap = ShenandoahHeap::heap();
218 TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
219 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_update_refs_gross);
220
221 heap->try_inject_alloc_failure();
222 VM_ShenandoahFinalUpdateRefs op(this);
223 VMThread::execute(&op);
224 }
225
226 void ShenandoahConcurrentGC::vmop_entry_final_roots() {
227 ShenandoahHeap* const heap = ShenandoahHeap::heap();
228 TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
229 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_roots_gross);
230
231 // This phase does not use workers, no need for setup
232 heap->try_inject_alloc_failure();
233 VM_ShenandoahFinalRoots op(this);
234 VMThread::execute(&op);
235 }
236
237 void ShenandoahConcurrentGC::entry_init_mark() {
238 const char* msg = init_mark_event_message();
239 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::init_mark);
240 EventMark em("%s", msg);
241
242 ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
243 ShenandoahWorkerPolicy::calc_workers_for_init_marking(),
244 "init marking");
245
246 op_init_mark();
247 }
248
249 void ShenandoahConcurrentGC::entry_final_mark() {
250 const char* msg = final_mark_event_message();
251 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_mark);
252 EventMark em("%s", msg);
253
254 ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
255 ShenandoahWorkerPolicy::calc_workers_for_final_marking(),
256 "final marking");
257
258 op_final_mark();
259 }
260
261 void ShenandoahConcurrentGC::entry_init_updaterefs() {
262 static const char* msg = "Pause Init Update Refs";
263 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::init_update_refs);
264 EventMark em("%s", msg);
265
266 // No workers used in this phase, no setup required
267 op_init_updaterefs();
268 }
269
270 void ShenandoahConcurrentGC::entry_final_updaterefs() {
285 EventMark em("%s", msg);
286
287 op_final_roots();
288 }
289
290 void ShenandoahConcurrentGC::entry_reset() {
291 ShenandoahHeap* const heap = ShenandoahHeap::heap();
292 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
293 static const char* msg = "Concurrent reset";
294 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_reset);
295 EventMark em("%s", msg);
296
297 ShenandoahWorkerScope scope(heap->workers(),
298 ShenandoahWorkerPolicy::calc_workers_for_conc_reset(),
299 "concurrent reset");
300
301 heap->try_inject_alloc_failure();
302 op_reset();
303 }
304
305 void ShenandoahConcurrentGC::entry_mark_roots() {
306 ShenandoahHeap* const heap = ShenandoahHeap::heap();
307 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
308 const char* msg = "Concurrent marking roots";
309 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_mark_roots);
310 EventMark em("%s", msg);
311
312 ShenandoahWorkerScope scope(heap->workers(),
313 ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
314 "concurrent marking roots");
315
316 heap->try_inject_alloc_failure();
317 op_mark_roots();
318 }
319
320 void ShenandoahConcurrentGC::entry_mark() {
321 ShenandoahHeap* const heap = ShenandoahHeap::heap();
322 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
323 const char* msg = conc_mark_event_message();
324 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_mark);
325 EventMark em("%s", msg);
326
327 ShenandoahWorkerScope scope(heap->workers(),
328 ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
329 "concurrent marking");
330
331 heap->try_inject_alloc_failure();
332 op_mark();
333 }
334
335 void ShenandoahConcurrentGC::entry_thread_roots() {
336 ShenandoahHeap* const heap = ShenandoahHeap::heap();
337 static const char* msg = "Concurrent thread roots";
338 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_thread_roots);
339 EventMark em("%s", msg);
340
341 ShenandoahWorkerScope scope(heap->workers(),
342 ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
343 msg);
458 ShenandoahWorkerScope scope(heap->workers(),
459 ShenandoahWorkerPolicy::calc_workers_for_conc_update_ref(),
460 "concurrent reference update");
461
462 heap->try_inject_alloc_failure();
463 op_updaterefs();
464 }
465
466 void ShenandoahConcurrentGC::entry_cleanup_complete() {
467 ShenandoahHeap* const heap = ShenandoahHeap::heap();
468 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
469 static const char* msg = "Concurrent cleanup";
470 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_cleanup_complete, true /* log_heap_usage */);
471 EventMark em("%s", msg);
472
473 // This phase does not use workers, no need for setup
474 heap->try_inject_alloc_failure();
475 op_cleanup_complete();
476 }
477
478 void ShenandoahConcurrentGC::op_reset() {
479 ShenandoahHeap* const heap = ShenandoahHeap::heap();
480 if (ShenandoahPacing) {
481 heap->pacer()->setup_for_reset();
482 }
483
484 heap->prepare_gc();
485 }
486
487 class ShenandoahInitMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
488 private:
489 ShenandoahMarkingContext* const _ctx;
490 public:
491 ShenandoahInitMarkUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
492
493 void heap_region_do(ShenandoahHeapRegion* r) {
494 assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index());
495 if (r->is_active()) {
496 // Check if region needs updating its TAMS. We have updated it already during concurrent
497 // reset, so it is very likely we don't need to do another write here.
498 if (_ctx->top_at_mark_start(r) != r->top()) {
499 _ctx->capture_top_at_mark_start(r);
500 }
501 } else {
502 assert(_ctx->top_at_mark_start(r) == r->top(),
503 "Region " SIZE_FORMAT " should already have correct TAMS", r->index());
504 }
505 }
506
507 bool is_thread_safe() { return true; }
508 };
509
510 void ShenandoahConcurrentGC::start_mark() {
511 _mark.start_mark();
512 }
513
514 void ShenandoahConcurrentGC::op_init_mark() {
515 ShenandoahHeap* const heap = ShenandoahHeap::heap();
516 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
517 assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
518
519 assert(heap->marking_context()->is_bitmap_clear(), "need clear marking bitmap");
520 assert(!heap->marking_context()->is_complete(), "should not be complete");
521 assert(!heap->has_forwarded_objects(), "No forwarded objects on this path");
522
523 if (ShenandoahVerify) {
524 heap->verifier()->verify_before_concmark();
525 }
526
527 if (VerifyBeforeGC) {
528 Universe::verify();
529 }
530
531 heap->set_concurrent_mark_in_progress(true);
532
533 start_mark();
534
535 {
536 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states);
537 ShenandoahInitMarkUpdateRegionStateClosure cl;
538 heap->parallel_heap_region_iterate(&cl);
539 }
540
541 // Weak reference processing
542 ShenandoahReferenceProcessor* rp = heap->ref_processor();
543 rp->reset_thread_locals();
544 rp->set_soft_reference_policy(heap->soft_ref_policy()->should_clear_all_soft_refs());
545
546 // Make above changes visible to worker threads
547 OrderAccess::fence();
548 // Arm nmethods for concurrent marking. When a nmethod is about to be executed,
549 // we need to make sure that all its metadata are marked. alternative is to remark
550 // thread roots at final mark pause, but it can be potential latency killer.
551 if (heap->unload_classes()) {
552 ShenandoahCodeRoots::arm_nmethods();
553 }
554
555 ShenandoahStackWatermark::change_epoch_id();
556 if (ShenandoahPacing) {
557 heap->pacer()->setup_for_mark();
558 }
559 }
560
561 void ShenandoahConcurrentGC::op_mark_roots() {
562 _mark.mark_concurrent_roots();
563 }
564
565 void ShenandoahConcurrentGC::op_mark() {
566 _mark.concurrent_mark();
567 }
568
569 void ShenandoahConcurrentGC::op_final_mark() {
570 ShenandoahHeap* const heap = ShenandoahHeap::heap();
571 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
572 assert(!heap->has_forwarded_objects(), "No forwarded objects on this path");
573
574 if (ShenandoahVerify) {
575 heap->verifier()->verify_roots_no_forwarded();
576 }
577
578 if (!heap->cancelled_gc()) {
579 _mark.finish_mark();
580 assert(!heap->cancelled_gc(), "STW mark cannot OOM");
581
582 // Notify JVMTI that the tagmap table will need cleaning.
583 JvmtiTagMap::set_needs_cleaning();
584
585 heap->prepare_regions_and_collection_set(true /*concurrent*/);
586
587 // Has to be done after cset selection
588 heap->prepare_concurrent_roots();
589
590 if (!heap->collection_set()->is_empty()) {
591 if (ShenandoahVerify) {
592 heap->verifier()->verify_before_evacuation();
593 }
594
595 heap->set_evacuation_in_progress(true);
596 // From here on, we need to update references.
597 heap->set_has_forwarded_objects(true);
598
599 // Verify before arming for concurrent processing.
600 // Otherwise, verification can trigger stack processing.
601 if (ShenandoahVerify) {
602 heap->verifier()->verify_during_evacuation();
603 }
604
605 // Arm nmethods/stack for concurrent processing
606 ShenandoahCodeRoots::arm_nmethods();
607 ShenandoahStackWatermark::change_epoch_id();
608
609 if (ShenandoahPacing) {
610 heap->pacer()->setup_for_evac();
611 }
612 } else {
613 if (ShenandoahVerify) {
614 heap->verifier()->verify_after_concmark();
615 }
616
617 if (VerifyAfterGC) {
618 Universe::verify();
619 }
620 }
621 }
622 }
623
624 class ShenandoahConcurrentEvacThreadClosure : public ThreadClosure {
625 private:
626 OopClosure* const _oops;
627
628 public:
657 _java_threads.threads_do(&thr_cl, worker_id);
658 }
659 };
660
661 void ShenandoahConcurrentGC::op_thread_roots() {
662 ShenandoahHeap* const heap = ShenandoahHeap::heap();
663 assert(heap->is_evacuation_in_progress(), "Checked by caller");
664 ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_thread_roots);
665 ShenandoahConcurrentEvacUpdateThreadTask task(heap->workers()->active_workers());
666 heap->workers()->run_task(&task);
667 }
668
669 void ShenandoahConcurrentGC::op_weak_refs() {
670 ShenandoahHeap* const heap = ShenandoahHeap::heap();
671 assert(heap->is_concurrent_weak_root_in_progress(), "Only during this phase");
672 // Concurrent weak refs processing
673 ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_refs);
674 if (heap->gc_cause() == GCCause::_wb_breakpoint) {
675 ShenandoahBreakpoint::at_after_reference_processing_started();
676 }
677 heap->ref_processor()->process_references(ShenandoahPhaseTimings::conc_weak_refs, heap->workers(), true /* concurrent */);
678 }
679
680 class ShenandoahEvacUpdateCleanupOopStorageRootsClosure : public BasicOopIterateClosure {
681 private:
682 ShenandoahHeap* const _heap;
683 ShenandoahMarkingContext* const _mark_context;
684 bool _evac_in_progress;
685 Thread* const _thread;
686
687 public:
688 ShenandoahEvacUpdateCleanupOopStorageRootsClosure();
689 void do_oop(oop* p);
690 void do_oop(narrowOop* p);
691 };
692
693 ShenandoahEvacUpdateCleanupOopStorageRootsClosure::ShenandoahEvacUpdateCleanupOopStorageRootsClosure() :
694 _heap(ShenandoahHeap::heap()),
695 _mark_context(ShenandoahHeap::heap()->marking_context()),
696 _evac_in_progress(ShenandoahHeap::heap()->is_evacuation_in_progress()),
697 _thread(Thread::current()) {
698 }
699
700 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(oop* p) {
701 const oop obj = RawAccess<>::oop_load(p);
702 if (!CompressedOops::is_null(obj)) {
703 if (!_mark_context->is_marked(obj)) {
704 shenandoah_assert_correct(p, obj);
705 ShenandoahHeap::atomic_clear_oop(p, obj);
706 } else if (_evac_in_progress && _heap->in_collection_set(obj)) {
707 oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
708 if (resolved == obj) {
709 resolved = _heap->evacuate_object(obj, _thread);
710 }
711 ShenandoahHeap::atomic_update_oop(resolved, p, obj);
712 assert(_heap->cancelled_gc() ||
713 _mark_context->is_marked(resolved) && !_heap->in_collection_set(resolved),
714 "Sanity");
715 }
716 }
717 }
718
719 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(narrowOop* p) {
720 ShouldNotReachHere();
721 }
722
723 class ShenandoahIsCLDAliveClosure : public CLDClosure {
724 public:
725 void do_cld(ClassLoaderData* cld) {
910 assert(heap->is_concurrent_strong_root_in_progress(), "Checked by caller");
911 ShenandoahConcurrentRootsEvacUpdateTask task(ShenandoahPhaseTimings::conc_strong_roots);
912 heap->workers()->run_task(&task);
913 heap->set_concurrent_strong_root_in_progress(false);
914 }
915
916 void ShenandoahConcurrentGC::op_cleanup_early() {
917 ShenandoahHeap::heap()->free_set()->recycle_trash();
918 }
919
920 void ShenandoahConcurrentGC::op_evacuate() {
921 ShenandoahHeap::heap()->evacuate_collection_set(true /*concurrent*/);
922 }
923
924 void ShenandoahConcurrentGC::op_init_updaterefs() {
925 ShenandoahHeap* const heap = ShenandoahHeap::heap();
926 heap->set_evacuation_in_progress(false);
927 heap->set_concurrent_weak_root_in_progress(false);
928 heap->prepare_update_heap_references(true /*concurrent*/);
929 heap->set_update_refs_in_progress(true);
930
931 if (ShenandoahPacing) {
932 heap->pacer()->setup_for_updaterefs();
933 }
934 }
935
936 void ShenandoahConcurrentGC::op_updaterefs() {
937 ShenandoahHeap::heap()->update_heap_references(true /*concurrent*/);
938 }
939
940 class ShenandoahUpdateThreadClosure : public HandshakeClosure {
941 private:
942 ShenandoahUpdateRefsClosure _cl;
943 public:
944 ShenandoahUpdateThreadClosure();
945 void do_thread(Thread* thread);
946 };
947
948 ShenandoahUpdateThreadClosure::ShenandoahUpdateThreadClosure() :
949 HandshakeClosure("Shenandoah Update Thread Roots") {
950 }
955 ResourceMark rm;
956 jt->oops_do(&_cl, nullptr);
957 }
958 }
959
960 void ShenandoahConcurrentGC::op_update_thread_roots() {
961 ShenandoahUpdateThreadClosure cl;
962 Handshake::execute(&cl);
963 }
964
965 void ShenandoahConcurrentGC::op_final_updaterefs() {
966 ShenandoahHeap* const heap = ShenandoahHeap::heap();
967 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
968 assert(!heap->_update_refs_iterator.has_next(), "Should have finished update references");
969
970 heap->finish_concurrent_roots();
971
972 // Clear cancelled GC, if set. On cancellation path, the block before would handle
973 // everything.
974 if (heap->cancelled_gc()) {
975 heap->clear_cancelled_gc();
976 }
977
978 // Has to be done before cset is clear
979 if (ShenandoahVerify) {
980 heap->verifier()->verify_roots_in_to_space();
981 }
982
983 heap->update_heap_region_states(true /*concurrent*/);
984
985 heap->set_update_refs_in_progress(false);
986 heap->set_has_forwarded_objects(false);
987
988 if (ShenandoahVerify) {
989 heap->verifier()->verify_after_updaterefs();
990 }
991
992 if (VerifyAfterGC) {
993 Universe::verify();
994 }
995
996 heap->rebuild_free_set(true /*concurrent*/);
997 }
998
999 void ShenandoahConcurrentGC::op_final_roots() {
1000 ShenandoahHeap::heap()->set_concurrent_weak_root_in_progress(false);
1001 }
1002
1003 void ShenandoahConcurrentGC::op_cleanup_complete() {
1004 ShenandoahHeap::heap()->free_set()->recycle_trash();
1005 }
1006
1007 bool ShenandoahConcurrentGC::check_cancellation_and_abort(ShenandoahDegenPoint point) {
1008 if (ShenandoahHeap::heap()->cancelled_gc()) {
1009 _degen_point = point;
1010 return true;
1011 }
1012 return false;
1013 }
1014
1015 const char* ShenandoahConcurrentGC::init_mark_event_message() const {
1016 ShenandoahHeap* const heap = ShenandoahHeap::heap();
1017 assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here");
1018 if (heap->unload_classes()) {
1019 return "Pause Init Mark (unload classes)";
1020 } else {
1021 return "Pause Init Mark";
1022 }
1023 }
1024
1025 const char* ShenandoahConcurrentGC::final_mark_event_message() const {
1026 ShenandoahHeap* const heap = ShenandoahHeap::heap();
1027 assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here");
1028 if (heap->unload_classes()) {
1029 return "Pause Final Mark (unload classes)";
1030 } else {
1031 return "Pause Final Mark";
1032 }
1033 }
1034
1035 const char* ShenandoahConcurrentGC::conc_mark_event_message() const {
1036 ShenandoahHeap* const heap = ShenandoahHeap::heap();
1037 assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here");
1038 if (heap->unload_classes()) {
1039 return "Concurrent marking (unload classes)";
1040 } else {
1041 return "Concurrent marking";
1042 }
1043 }
|
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26
27 #include "gc/shared/barrierSetNMethod.hpp"
28 #include "gc/shared/collectorCounters.hpp"
29 #include "gc/shared/continuationGCSupport.inline.hpp"
30 #include "gc/shenandoah/shenandoahBreakpoint.hpp"
31 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
32 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
33 #include "gc/shenandoah/shenandoahFreeSet.hpp"
34 #include "gc/shenandoah/shenandoahGeneration.hpp"
35 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
36 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
37 #include "gc/shenandoah/shenandoahLock.hpp"
38 #include "gc/shenandoah/shenandoahMark.inline.hpp"
39 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
40 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
41 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
42 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
43 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
44 #include "gc/shenandoah/shenandoahStackWatermark.hpp"
45 #include "gc/shenandoah/shenandoahUtils.hpp"
46 #include "gc/shenandoah/shenandoahVerifier.hpp"
47 #include "gc/shenandoah/shenandoahVMOperations.hpp"
48 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
49 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
50 #include "memory/allocation.hpp"
51 #include "prims/jvmtiTagMap.hpp"
52 #include "runtime/vmThread.hpp"
53 #include "utilities/events.hpp"
54
55 // Breakpoint support
56 class ShenandoahBreakpointGCScope : public StackObj {
71 }
72 };
73
74 class ShenandoahBreakpointMarkScope : public StackObj {
75 private:
76 const GCCause::Cause _cause;
77 public:
78 ShenandoahBreakpointMarkScope(GCCause::Cause cause) : _cause(cause) {
79 if (_cause == GCCause::_wb_breakpoint) {
80 ShenandoahBreakpoint::at_after_marking_started();
81 }
82 }
83
84 ~ShenandoahBreakpointMarkScope() {
85 if (_cause == GCCause::_wb_breakpoint) {
86 ShenandoahBreakpoint::at_before_marking_completed();
87 }
88 }
89 };
90
91 ShenandoahConcurrentGC::ShenandoahConcurrentGC(ShenandoahGeneration* generation, bool do_old_gc_bootstrap) :
92 _mark(generation),
93 _degen_point(ShenandoahDegenPoint::_degenerated_unset),
94 _abbreviated(false),
95 _do_old_gc_bootstrap(do_old_gc_bootstrap),
96 _generation(generation) {
97 }
98
99 ShenandoahGC::ShenandoahDegenPoint ShenandoahConcurrentGC::degen_point() const {
100 return _degen_point;
101 }
102
103 bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) {
104 ShenandoahHeap* const heap = ShenandoahHeap::heap();
105 heap->start_conc_gc();
106
107 ShenandoahBreakpointGCScope breakpoint_gc_scope(cause);
108
109 // Reset for upcoming marking
110 entry_reset();
111
112 // Start initial mark under STW
113 vmop_entry_init_mark();
114
115 {
116 ShenandoahBreakpointMarkScope breakpoint_mark_scope(cause);
117
118 // Reset task queue stats here, rather than in mark_concurrent_roots
119 // because remembered set scan will `push` oops into the queues and
120 // resetting after this happens will lose those counts.
121 TASKQUEUE_STATS_ONLY(_mark.task_queues()->reset_taskqueue_stats());
122
123 // Concurrent remembered set scanning
124 entry_scan_remembered_set();
125 // When RS scanning yields, we will need a check_cancellation_and_abort()
126 // degeneration point here.
127
128 // Concurrent mark roots
129 entry_mark_roots();
130 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_roots)) return false;
131
132 // Continue concurrent mark
133 entry_mark();
134 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_mark)) return false;
135 }
136
137 // Complete marking under STW, and start evacuation
138 vmop_entry_final_mark();
139
140 // If GC was cancelled before final mark, then the safepoint operation will do nothing
141 // and the concurrent mark will still be in progress. In this case it is safe to resume
142 // the degenerated cycle from the marking phase. On the other hand, if the GC is cancelled
143 // after final mark (but before this check), then the final mark safepoint operation
144 // will have finished the mark (setting concurrent mark in progress to false). Final mark
145 // will also have setup state (in concurrent stack processing) that will not be safe to
146 // resume from the marking phase in the degenerated cycle. That is, if the cancellation
147 // occurred after final mark, we must resume the degenerated cycle after the marking phase.
148 if (_generation->is_concurrent_mark_in_progress() && check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_mark)) {
149 assert(!heap->is_concurrent_weak_root_in_progress(), "Weak roots should not be in progress when concurrent mark is in progress");
150 return false;
151 }
152
153 // Concurrent stack processing
154 if (heap->is_evacuation_in_progress()) {
155 entry_thread_roots();
156 }
157
158 // Process weak roots that might still point to regions that would be broken by cleanup
159 if (heap->is_concurrent_weak_root_in_progress()) {
160 entry_weak_refs();
161 entry_weak_roots();
162 }
163
164 // Final mark might have reclaimed some immediate garbage, kick cleanup to reclaim
165 // the space. This would be the last action if there is nothing to evacuate. Note that
166 // we will not age young-gen objects in the case that we skip evacuation.
167 entry_cleanup_early();
168
169 {
170 ShenandoahHeapLocker locker(heap->lock());
171 heap->free_set()->log_status();
172 }
173
174 // Perform concurrent class unloading
175 if (heap->unload_classes() &&
176 heap->is_concurrent_weak_root_in_progress()) {
177 entry_class_unloading();
178 }
179
180 // Processing strong roots
181 // This may be skipped if there is nothing to update/evacuate.
182 // If so, strong_root_in_progress would be unset.
183 if (heap->is_concurrent_strong_root_in_progress()) {
184 entry_strong_roots();
185 }
186
187 // Global marking has completed. We need to fill in any unmarked objects in the old generation
188 // so that subsequent remembered set scans will not walk pointers into reclaimed memory.
189 if (!heap->cancelled_gc() && heap->mode()->is_generational() && _generation->generation_mode() == GLOBAL) {
190 entry_global_coalesce_and_fill();
191 }
192
193 // Continue the cycle with evacuation and optional update-refs.
194 // This may be skipped if there is nothing to evacuate.
195 // If so, evac_in_progress would be unset by collection set preparation code.
196 if (heap->is_evacuation_in_progress()) {
197 // Concurrently evacuate
198 entry_evacuate();
199 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_evac)) return false;
200
201 // Perform update-refs phase.
202 vmop_entry_init_updaterefs();
203 entry_updaterefs();
204 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_updaterefs)) return false;
205
206 // Concurrent update thread roots
207 entry_update_thread_roots();
208 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_updaterefs)) return false;
209
210 vmop_entry_final_updaterefs();
211
212 // Update references freed up collection set, kick the cleanup to reclaim the space.
213 entry_cleanup_complete();
214 } else {
215 // We chose not to evacuate because we found sufficient immediate garbage.
216 vmop_entry_final_roots(heap->is_aging_cycle());
217 _abbreviated = true;
218 }
219
220 if (heap->mode()->is_generational()) {
221 size_t old_available, young_available;
222 {
223 ShenandoahYoungGeneration* young_gen = heap->young_generation();
224 ShenandoahGeneration* old_gen = heap->old_generation();
225 ShenandoahHeapLocker locker(heap->lock());
226
227 size_t old_usage_before_evac = heap->capture_old_usage(0);
228 size_t old_usage_now = old_gen->used();
229 size_t promoted_bytes = old_usage_now - old_usage_before_evac;
230 heap->set_previous_promotion(promoted_bytes);
231
232 young_gen->unadjust_available();
233 old_gen->unadjust_available();
234 // No need to old_gen->increase_used().
235 // That was done when plabs were allocated, accounting for both old evacs and promotions.
236
237 young_available = young_gen->adjusted_available();
238 old_available = old_gen->adjusted_available();
239
240 heap->set_alloc_supplement_reserve(0);
241 heap->set_young_evac_reserve(0);
242 heap->set_old_evac_reserve(0);
243 heap->reset_old_evac_expended();
244 heap->set_promoted_reserve(0);
245 }
246 }
247 return true;
248 }
249
250 void ShenandoahConcurrentGC::vmop_entry_init_mark() {
251 ShenandoahHeap* const heap = ShenandoahHeap::heap();
252 TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
253 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_mark_gross);
254
255 heap->try_inject_alloc_failure();
256 VM_ShenandoahInitMark op(this);
257 VMThread::execute(&op); // jump to entry_init_mark() under safepoint
258 }
259
260 void ShenandoahConcurrentGC::vmop_entry_final_mark() {
261 ShenandoahHeap* const heap = ShenandoahHeap::heap();
262 TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
263 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_mark_gross);
264
265 heap->try_inject_alloc_failure();
266 VM_ShenandoahFinalMarkStartEvac op(this);
270 void ShenandoahConcurrentGC::vmop_entry_init_updaterefs() {
271 ShenandoahHeap* const heap = ShenandoahHeap::heap();
272 TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
273 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_update_refs_gross);
274
275 heap->try_inject_alloc_failure();
276 VM_ShenandoahInitUpdateRefs op(this);
277 VMThread::execute(&op);
278 }
279
280 void ShenandoahConcurrentGC::vmop_entry_final_updaterefs() {
281 ShenandoahHeap* const heap = ShenandoahHeap::heap();
282 TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
283 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_update_refs_gross);
284
285 heap->try_inject_alloc_failure();
286 VM_ShenandoahFinalUpdateRefs op(this);
287 VMThread::execute(&op);
288 }
289
290 void ShenandoahConcurrentGC::vmop_entry_final_roots(bool increment_region_ages) {
291 ShenandoahHeap* const heap = ShenandoahHeap::heap();
292 TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
293 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_roots_gross);
294
295 // This phase does not use workers, no need for setup
296 heap->try_inject_alloc_failure();
297 VM_ShenandoahFinalRoots op(this, increment_region_ages);
298 VMThread::execute(&op);
299 }
300
301 void ShenandoahConcurrentGC::entry_init_mark() {
302 char msg[1024];
303 init_mark_event_message(msg, sizeof(msg));
304 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::init_mark);
305 EventMark em("%s", msg);
306
307 ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
308 ShenandoahWorkerPolicy::calc_workers_for_init_marking(),
309 "init marking");
310
311 op_init_mark();
312 }
313
314 void ShenandoahConcurrentGC::entry_final_mark() {
315 char msg[1024];
316 final_mark_event_message(msg, sizeof(msg));
317 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_mark);
318 EventMark em("%s", msg);
319
320 ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
321 ShenandoahWorkerPolicy::calc_workers_for_final_marking(),
322 "final marking");
323
324 op_final_mark();
325 }
326
327 void ShenandoahConcurrentGC::entry_init_updaterefs() {
328 static const char* msg = "Pause Init Update Refs";
329 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::init_update_refs);
330 EventMark em("%s", msg);
331
332 // No workers used in this phase, no setup required
333 op_init_updaterefs();
334 }
335
336 void ShenandoahConcurrentGC::entry_final_updaterefs() {
351 EventMark em("%s", msg);
352
353 op_final_roots();
354 }
355
356 void ShenandoahConcurrentGC::entry_reset() {
357 ShenandoahHeap* const heap = ShenandoahHeap::heap();
358 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
359 static const char* msg = "Concurrent reset";
360 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_reset);
361 EventMark em("%s", msg);
362
363 ShenandoahWorkerScope scope(heap->workers(),
364 ShenandoahWorkerPolicy::calc_workers_for_conc_reset(),
365 "concurrent reset");
366
367 heap->try_inject_alloc_failure();
368 op_reset();
369 }
370
371 void ShenandoahConcurrentGC::entry_scan_remembered_set() {
372 if (_generation->generation_mode() == YOUNG) {
373 ShenandoahHeap* const heap = ShenandoahHeap::heap();
374 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
375 const char* msg = "Concurrent remembered set scanning";
376 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::init_scan_rset);
377 EventMark em("%s", msg);
378
379 ShenandoahWorkerScope scope(heap->workers(),
380 ShenandoahWorkerPolicy::calc_workers_for_rs_scanning(),
381 msg);
382
383 heap->try_inject_alloc_failure();
384 _generation->scan_remembered_set(true /* is_concurrent */);
385 }
386 }
387
388 void ShenandoahConcurrentGC::entry_mark_roots() {
389 ShenandoahHeap* const heap = ShenandoahHeap::heap();
390 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
391 const char* msg = "Concurrent marking roots";
392 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_mark_roots);
393 EventMark em("%s", msg);
394
395 ShenandoahWorkerScope scope(heap->workers(),
396 ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
397 "concurrent marking roots");
398
399 heap->try_inject_alloc_failure();
400 op_mark_roots();
401 }
402
403 void ShenandoahConcurrentGC::entry_mark() {
404 char msg[1024];
405 ShenandoahHeap* const heap = ShenandoahHeap::heap();
406 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
407 conc_mark_event_message(msg, sizeof(msg));
408 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_mark);
409 EventMark em("%s", msg);
410
411 ShenandoahWorkerScope scope(heap->workers(),
412 ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
413 "concurrent marking");
414
415 heap->try_inject_alloc_failure();
416 op_mark();
417 }
418
419 void ShenandoahConcurrentGC::entry_thread_roots() {
420 ShenandoahHeap* const heap = ShenandoahHeap::heap();
421 static const char* msg = "Concurrent thread roots";
422 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_thread_roots);
423 EventMark em("%s", msg);
424
425 ShenandoahWorkerScope scope(heap->workers(),
426 ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
427 msg);
542 ShenandoahWorkerScope scope(heap->workers(),
543 ShenandoahWorkerPolicy::calc_workers_for_conc_update_ref(),
544 "concurrent reference update");
545
546 heap->try_inject_alloc_failure();
547 op_updaterefs();
548 }
549
550 void ShenandoahConcurrentGC::entry_cleanup_complete() {
551 ShenandoahHeap* const heap = ShenandoahHeap::heap();
552 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
553 static const char* msg = "Concurrent cleanup";
554 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_cleanup_complete, true /* log_heap_usage */);
555 EventMark em("%s", msg);
556
557 // This phase does not use workers, no need for setup
558 heap->try_inject_alloc_failure();
559 op_cleanup_complete();
560 }
561
562 void ShenandoahConcurrentGC::entry_global_coalesce_and_fill() {
563 ShenandoahHeap* const heap = ShenandoahHeap::heap();
564
565 const char* msg = "Coalescing and filling old regions in global collect";
566 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::coalesce_and_fill);
567
568 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
569 EventMark em("%s", msg);
570 ShenandoahWorkerScope scope(heap->workers(),
571 ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
572 "concurrent coalesce and fill");
573
574 op_global_coalesce_and_fill();
575 }
576
577 void ShenandoahConcurrentGC::op_reset() {
578 ShenandoahHeap* const heap = ShenandoahHeap::heap();
579 if (ShenandoahPacing) {
580 heap->pacer()->setup_for_reset();
581 }
582 _generation->prepare_gc();
583 }
584
585 class ShenandoahInitMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
586 private:
587 ShenandoahMarkingContext* const _ctx;
588 public:
589 ShenandoahInitMarkUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
590
591 void heap_region_do(ShenandoahHeapRegion* r) {
592 assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index());
593 if (r->is_active()) {
594 // Check if region needs updating its TAMS. We have updated it already during concurrent
595 // reset, so it is very likely we don't need to do another write here. Since most regions
596 // are not "active", this path is relatively rare.
597 if (_ctx->top_at_mark_start(r) != r->top()) {
598 _ctx->capture_top_at_mark_start(r);
599 }
600 } else {
601 assert(_ctx->top_at_mark_start(r) == r->top(),
602 "Region " SIZE_FORMAT " should already have correct TAMS", r->index());
603 }
604 }
605
606 bool is_thread_safe() { return true; }
607 };
608
609 void ShenandoahConcurrentGC::start_mark() {
610 _mark.start_mark();
611 }
612
613 void ShenandoahConcurrentGC::op_init_mark() {
614 ShenandoahHeap* const heap = ShenandoahHeap::heap();
615 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
616 assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
617
618 assert(_generation->is_bitmap_clear(), "need clear marking bitmap");
619 assert(!_generation->is_mark_complete(), "should not be complete");
620 assert(!heap->has_forwarded_objects(), "No forwarded objects on this path");
621
622
623 if (heap->mode()->is_generational()) {
624 if (_generation->generation_mode() == YOUNG || (_generation->generation_mode() == GLOBAL && ShenandoahVerify)) {
625 // The current implementation of swap_remembered_set() copies the write-card-table
626 // to the read-card-table. The remembered sets are also swapped for GLOBAL collections
627 // so that the verifier works with the correct copy of the card table when verifying.
628 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_swap_rset);
629 _generation->swap_remembered_set();
630 }
631
632 if (_generation->generation_mode() == GLOBAL) {
633 heap->cancel_old_gc();
634 } else if (heap->is_concurrent_old_mark_in_progress()) {
635 // Purge the SATB buffers, transferring any valid, old pointers to the
636 // old generation mark queue. Any pointers in a young region will be
637 // abandoned.
638 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_transfer_satb);
639 heap->transfer_old_pointers_from_satb();
640 }
641 }
642
643 if (ShenandoahVerify) {
644 heap->verifier()->verify_before_concmark();
645 }
646
647 if (VerifyBeforeGC) {
648 Universe::verify();
649 }
650
651 _generation->set_concurrent_mark_in_progress(true);
652
653 start_mark();
654
655 if (_do_old_gc_bootstrap) {
656 // Update region state for both young and old regions
657 // TODO: We should be able to pull this out of the safepoint for the bootstrap
658 // cycle. The top of an old region will only move when a GC cycle evacuates
659 // objects into it. When we start an old cycle, we know that nothing can touch
660 // the top of old regions.
661 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states);
662 ShenandoahInitMarkUpdateRegionStateClosure cl;
663 heap->parallel_heap_region_iterate(&cl);
664 } else {
665 // Update region state for only young regions
666 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states);
667 ShenandoahInitMarkUpdateRegionStateClosure cl;
668 _generation->parallel_heap_region_iterate(&cl);
669 }
670
671 // Weak reference processing
672 ShenandoahReferenceProcessor* rp = _generation->ref_processor();
673 rp->reset_thread_locals();
674 rp->set_soft_reference_policy(heap->soft_ref_policy()->should_clear_all_soft_refs());
675
676 // Make above changes visible to worker threads
677 OrderAccess::fence();
678
679 // Arm nmethods for concurrent marking. When a nmethod is about to be executed,
680 // we need to make sure that all its metadata are marked. alternative is to remark
681 // thread roots at final mark pause, but it can be potential latency killer.
682 if (heap->unload_classes()) {
683 ShenandoahCodeRoots::arm_nmethods();
684 }
685
686 ShenandoahStackWatermark::change_epoch_id();
687 if (ShenandoahPacing) {
688 heap->pacer()->setup_for_mark();
689 }
690 }
691
692 void ShenandoahConcurrentGC::op_mark_roots() {
693 _mark.mark_concurrent_roots();
694 }
695
696 void ShenandoahConcurrentGC::op_mark() {
697 _mark.concurrent_mark();
698 }
699
700 void ShenandoahConcurrentGC::op_final_mark() {
701 ShenandoahHeap* const heap = ShenandoahHeap::heap();
702 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
703 assert(!heap->has_forwarded_objects(), "No forwarded objects on this path");
704
705 if (ShenandoahVerify) {
706 heap->verifier()->verify_roots_no_forwarded();
707 }
708
709 if (!heap->cancelled_gc()) {
710 _mark.finish_mark();
711 assert(!heap->cancelled_gc(), "STW mark cannot OOM");
712
713 // Notify JVMTI that the tagmap table will need cleaning.
714 JvmtiTagMap::set_needs_cleaning();
715
716 // The collection set is chosen by prepare_regions_and_collection_set().
717 //
718 // TODO: Under severe memory overload conditions that can be checked here, we may want to limit
719 // the inclusion of old-gen candidates within the collection set. This would allow us to prioritize efforts on
720 // evacuating young-gen, This remediation is most appropriate when old-gen availability is very high (so there
721 // are negligible negative impacts from delaying completion of old-gen evacuation) and when young-gen collections
722 // are "under duress" (as signalled by very low availability of memory within young-gen, indicating that/ young-gen
723 // collections are not triggering frequently enough).
724 _generation->prepare_regions_and_collection_set(true /*concurrent*/);
725
726 // Upon return from prepare_regions_and_collection_set(), certain parameters have been established to govern the
727 // evacuation efforts that are about to begin. In particular:
728 //
729 // heap->get_promoted_reserve() represents the amount of memory within old-gen's available memory that has
730 // been set aside to hold objects promoted from young-gen memory. This represents an estimated percentage
731 // of the live young-gen memory within the collection set. If there is more data ready to be promoted than
732 // can fit within this reserve, the promotion of some objects will be deferred until a subsequent evacuation
733 // pass.
734 //
735 // heap->get_old_evac_reserve() represents the amount of memory within old-gen's available memory that has been
736 // set aside to hold objects evacuated from the old-gen collection set.
737 //
738 // heap->get_young_evac_reserve() represents the amount of memory within young-gen's available memory that has
739 // been set aside to hold objects evacuated from the young-gen collection set. Conservatively, this value
740 // equals the entire amount of live young-gen memory within the collection set, even though some of this memory
741 // will likely be promoted.
742 //
743 // heap->get_alloc_supplement_reserve() represents the amount of old-gen memory that can be allocated during evacuation
744 // and update-refs phases of gc. The young evacuation reserve has already been removed from this quantity.
745
746 // Has to be done after cset selection
747 heap->prepare_concurrent_roots();
748
749 if (!heap->collection_set()->is_empty()) {
750 LogTarget(Info, gc, ergo) lt;
751 if (lt.is_enabled()) {
752 ResourceMark rm;
753 LogStream ls(lt);
754 heap->collection_set()->print_on(&ls);
755 }
756
757 if (ShenandoahVerify) {
758 heap->verifier()->verify_before_evacuation();
759 }
760
761 heap->set_evacuation_in_progress(true);
762 // From here on, we need to update references.
763 heap->set_has_forwarded_objects(true);
764
765 // Verify before arming for concurrent processing.
766 // Otherwise, verification can trigger stack processing.
767 if (ShenandoahVerify) {
768 heap->verifier()->verify_during_evacuation();
769 }
770
771 // Arm nmethods/stack for concurrent processing
772 ShenandoahCodeRoots::arm_nmethods();
773 ShenandoahStackWatermark::change_epoch_id();
774
775 if (heap->mode()->is_generational()) {
776 // Calculate the temporary evacuation allowance supplement to young-gen memory capacity (for allocations
777 // and young-gen evacuations).
778 size_t young_available = heap->young_generation()->adjust_available(heap->get_alloc_supplement_reserve());
779 // old_available is memory that can hold promotions and evacuations. Subtract out the memory that is being
780 // loaned for young-gen allocations or evacuations.
781 size_t old_available = heap->old_generation()->adjust_available(-heap->get_alloc_supplement_reserve());
782
783 log_info(gc, ergo)("After generational memory budget adjustments, old available: " SIZE_FORMAT
784 "%s, young_available: " SIZE_FORMAT "%s",
785 byte_size_in_proper_unit(old_available), proper_unit_for_byte_size(old_available),
786 byte_size_in_proper_unit(young_available), proper_unit_for_byte_size(young_available));
787 }
788
789 if (ShenandoahPacing) {
790 heap->pacer()->setup_for_evac();
791 }
792 } else {
793 if (ShenandoahVerify) {
794 heap->verifier()->verify_after_concmark();
795 }
796
797 if (VerifyAfterGC) {
798 Universe::verify();
799 }
800 }
801 }
802 }
803
804 class ShenandoahConcurrentEvacThreadClosure : public ThreadClosure {
805 private:
806 OopClosure* const _oops;
807
808 public:
837 _java_threads.threads_do(&thr_cl, worker_id);
838 }
839 };
840
841 void ShenandoahConcurrentGC::op_thread_roots() {
842 ShenandoahHeap* const heap = ShenandoahHeap::heap();
843 assert(heap->is_evacuation_in_progress(), "Checked by caller");
844 ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_thread_roots);
845 ShenandoahConcurrentEvacUpdateThreadTask task(heap->workers()->active_workers());
846 heap->workers()->run_task(&task);
847 }
848
849 void ShenandoahConcurrentGC::op_weak_refs() {
850 ShenandoahHeap* const heap = ShenandoahHeap::heap();
851 assert(heap->is_concurrent_weak_root_in_progress(), "Only during this phase");
852 // Concurrent weak refs processing
853 ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_refs);
854 if (heap->gc_cause() == GCCause::_wb_breakpoint) {
855 ShenandoahBreakpoint::at_after_reference_processing_started();
856 }
857 _generation->ref_processor()->process_references(ShenandoahPhaseTimings::conc_weak_refs, heap->workers(), true /* concurrent */);
858 }
859
860 class ShenandoahEvacUpdateCleanupOopStorageRootsClosure : public BasicOopIterateClosure {
861 private:
862 ShenandoahHeap* const _heap;
863 ShenandoahMarkingContext* const _mark_context;
864 bool _evac_in_progress;
865 Thread* const _thread;
866
867 public:
868 ShenandoahEvacUpdateCleanupOopStorageRootsClosure();
869 void do_oop(oop* p);
870 void do_oop(narrowOop* p);
871 };
872
873 ShenandoahEvacUpdateCleanupOopStorageRootsClosure::ShenandoahEvacUpdateCleanupOopStorageRootsClosure() :
874 _heap(ShenandoahHeap::heap()),
875 _mark_context(ShenandoahHeap::heap()->marking_context()),
876 _evac_in_progress(ShenandoahHeap::heap()->is_evacuation_in_progress()),
877 _thread(Thread::current()) {
878 }
879
880 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(oop* p) {
881 const oop obj = RawAccess<>::oop_load(p);
882 if (!CompressedOops::is_null(obj)) {
883 if (!_mark_context->is_marked(obj)) {
884 if (_heap->is_in_active_generation(obj)) {
885 // TODO: This worries me. Here we are asserting that an unmarked from-space object is 'correct'.
886 // Normally, I would call this a bogus assert, but there seems to be a legitimate use-case for
887 // accessing from-space objects during class unloading. However, the from-space object may have
888 // been "filled". We've made no effort to prevent old generation classes being unloaded by young
889 // gen (and vice-versa).
890 shenandoah_assert_correct(p, obj);
891 ShenandoahHeap::atomic_clear_oop(p, obj);
892 }
893 } else if (_evac_in_progress && _heap->in_collection_set(obj)) {
894 oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
895 if (resolved == obj) {
896 resolved = _heap->evacuate_object(obj, _thread);
897 }
898 ShenandoahHeap::atomic_update_oop(resolved, p, obj);
899 assert(_heap->cancelled_gc() ||
900 _mark_context->is_marked(resolved) && !_heap->in_collection_set(resolved),
901 "Sanity");
902 }
903 }
904 }
905
906 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(narrowOop* p) {
907 ShouldNotReachHere();
908 }
909
910 class ShenandoahIsCLDAliveClosure : public CLDClosure {
911 public:
912 void do_cld(ClassLoaderData* cld) {
1097 assert(heap->is_concurrent_strong_root_in_progress(), "Checked by caller");
1098 ShenandoahConcurrentRootsEvacUpdateTask task(ShenandoahPhaseTimings::conc_strong_roots);
1099 heap->workers()->run_task(&task);
1100 heap->set_concurrent_strong_root_in_progress(false);
1101 }
1102
1103 void ShenandoahConcurrentGC::op_cleanup_early() {
1104 ShenandoahHeap::heap()->free_set()->recycle_trash();
1105 }
1106
1107 void ShenandoahConcurrentGC::op_evacuate() {
1108 ShenandoahHeap::heap()->evacuate_collection_set(true /*concurrent*/);
1109 }
1110
1111 void ShenandoahConcurrentGC::op_init_updaterefs() {
1112 ShenandoahHeap* const heap = ShenandoahHeap::heap();
1113 heap->set_evacuation_in_progress(false);
1114 heap->set_concurrent_weak_root_in_progress(false);
1115 heap->prepare_update_heap_references(true /*concurrent*/);
1116 heap->set_update_refs_in_progress(true);
1117 if (ShenandoahVerify) {
1118 heap->verifier()->verify_before_updaterefs();
1119 }
1120 if (ShenandoahPacing) {
1121 heap->pacer()->setup_for_updaterefs();
1122 }
1123 }
1124
1125 void ShenandoahConcurrentGC::op_updaterefs() {
1126 ShenandoahHeap::heap()->update_heap_references(true /*concurrent*/);
1127 }
1128
1129 class ShenandoahUpdateThreadClosure : public HandshakeClosure {
1130 private:
1131 ShenandoahUpdateRefsClosure _cl;
1132 public:
1133 ShenandoahUpdateThreadClosure();
1134 void do_thread(Thread* thread);
1135 };
1136
1137 ShenandoahUpdateThreadClosure::ShenandoahUpdateThreadClosure() :
1138 HandshakeClosure("Shenandoah Update Thread Roots") {
1139 }
1144 ResourceMark rm;
1145 jt->oops_do(&_cl, nullptr);
1146 }
1147 }
1148
1149 void ShenandoahConcurrentGC::op_update_thread_roots() {
1150 ShenandoahUpdateThreadClosure cl;
1151 Handshake::execute(&cl);
1152 }
1153
1154 void ShenandoahConcurrentGC::op_final_updaterefs() {
1155 ShenandoahHeap* const heap = ShenandoahHeap::heap();
1156 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
1157 assert(!heap->_update_refs_iterator.has_next(), "Should have finished update references");
1158
1159 heap->finish_concurrent_roots();
1160
1161 // Clear cancelled GC, if set. On cancellation path, the block before would handle
1162 // everything.
1163 if (heap->cancelled_gc()) {
1164 heap->clear_cancelled_gc(true /* clear oom handler */);
1165 }
1166
1167 // Has to be done before cset is clear
1168 if (ShenandoahVerify) {
1169 heap->verifier()->verify_roots_in_to_space();
1170 }
1171
1172 if (heap->mode()->is_generational() && heap->is_concurrent_old_mark_in_progress()) {
1173 // When the SATB barrier is left on to support concurrent old gen mark, it may pick up writes to
1174 // objects in the collection set. After those objects are evacuated, the pointers in the
1175 // SATB are no longer safe. Once we have finished update references, we are guaranteed that
1176 // no more writes to the collection set are possible.
1177 //
1178 // This will transfer any old pointers in _active_ regions from the SATB to the old gen
1179 // mark queues. All other pointers will be discarded. This would also discard any pointers
1180 // in old regions that were included in a mixed evacuation. We aren't using the SATB filter
1181 // methods here because we cannot control when they execute. If the SATB filter runs _after_
1182 // a region has been recycled, we will not be able to detect the bad pointer.
1183 //
1184 // We are not concerned about skipping this step in abbreviated cycles because regions
1185 // with no live objects cannot have been written to and so cannot have entries in the SATB
1186 // buffers.
1187 heap->transfer_old_pointers_from_satb();
1188 }
1189
1190 heap->update_heap_region_states(true /*concurrent*/);
1191
1192 heap->set_update_refs_in_progress(false);
1193 heap->set_has_forwarded_objects(false);
1194
1195 // Aging_cycle is only relevant during evacuation cycle for individual objects and during final mark for
1196 // entire regions. Both of these relevant operations occur before final update refs.
1197 heap->set_aging_cycle(false);
1198
1199 if (ShenandoahVerify) {
1200 heap->verifier()->verify_after_updaterefs();
1201 }
1202
1203 if (VerifyAfterGC) {
1204 Universe::verify();
1205 }
1206
1207 heap->rebuild_free_set(true /*concurrent*/);
1208 heap->adjust_generation_sizes();
1209 }
1210
1211 void ShenandoahConcurrentGC::op_final_roots() {
1212 ShenandoahHeap::heap()->set_concurrent_weak_root_in_progress(false);
1213 }
1214
1215 void ShenandoahConcurrentGC::op_cleanup_complete() {
1216 ShenandoahHeap::heap()->free_set()->recycle_trash();
1217 }
1218
1219 void ShenandoahConcurrentGC::op_global_coalesce_and_fill() {
1220 ShenandoahHeap::heap()->coalesce_and_fill_old_regions();
1221 }
1222
1223 bool ShenandoahConcurrentGC::check_cancellation_and_abort(ShenandoahDegenPoint point) {
1224 if (ShenandoahHeap::heap()->cancelled_gc()) {
1225 _degen_point = point;
1226 return true;
1227 }
1228 return false;
1229 }
1230
1231 void ShenandoahConcurrentGC::init_mark_event_message(char* buf, size_t len) const {
1232 ShenandoahHeap* const heap = ShenandoahHeap::heap();
1233 assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here");
1234 if (heap->unload_classes()) {
1235 jio_snprintf(buf, len, "Pause Init Mark (%s) (unload classes)", _generation->name());
1236 } else {
1237 jio_snprintf(buf, len, "Pause Init Mark (%s)", _generation->name());
1238 }
1239 }
1240
1241 void ShenandoahConcurrentGC::final_mark_event_message(char* buf, size_t len) const {
1242 ShenandoahHeap* const heap = ShenandoahHeap::heap();
1243 assert(!heap->has_forwarded_objects() || heap->is_concurrent_old_mark_in_progress(),
1244 "Should not have forwarded objects during final mark (unless old gen concurrent mark is running)");
1245 if (heap->unload_classes()) {
1246 jio_snprintf(buf, len, "Pause Final Mark (%s) (unload classes)", _generation->name());
1247 } else {
1248 jio_snprintf(buf, len, "Pause Final Mark (%s)", _generation->name());
1249 }
1250 }
1251
1252 void ShenandoahConcurrentGC::conc_mark_event_message(char* buf, size_t len) const {
1253 ShenandoahHeap* const heap = ShenandoahHeap::heap();
1254 assert(!heap->has_forwarded_objects() || heap->is_concurrent_old_mark_in_progress(),
1255 "Should not have forwarded objects concurrent mark (unless old gen concurrent mark is running");
1256 if (heap->unload_classes()) {
1257 jio_snprintf(buf, len, "Concurrent marking (%s) (unload classes)", _generation->name());
1258 } else {
1259 jio_snprintf(buf, len, "Concurrent marking (%s)", _generation->name());
1260 }
1261 }
|