1 /*
2 * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2021, 2022, Red Hat, Inc. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27
28 #include "gc/shared/barrierSetNMethod.hpp"
29 #include "gc/shared/collectorCounters.hpp"
30 #include "gc/shared/continuationGCSupport.inline.hpp"
31 #include "gc/shenandoah/shenandoahBreakpoint.hpp"
32 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
33 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
34 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
35 #include "gc/shenandoah/shenandoahFreeSet.hpp"
36 #include "gc/shenandoah/shenandoahLock.hpp"
37 #include "gc/shenandoah/shenandoahMark.inline.hpp"
38 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
39 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
40 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
41 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
42 #include "gc/shenandoah/shenandoahStackWatermark.hpp"
43 #include "gc/shenandoah/shenandoahUtils.hpp"
44 #include "gc/shenandoah/shenandoahVerifier.hpp"
45 #include "gc/shenandoah/shenandoahVMOperations.hpp"
46 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
47 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
48 #include "memory/allocation.hpp"
49 #include "prims/jvmtiTagMap.hpp"
50 #include "runtime/vmThread.hpp"
51 #include "utilities/events.hpp"
52
53 // Breakpoint support
54 class ShenandoahBreakpointGCScope : public StackObj {
55 private:
69 }
70 };
71
72 class ShenandoahBreakpointMarkScope : public StackObj {
73 private:
74 const GCCause::Cause _cause;
75 public:
76 ShenandoahBreakpointMarkScope(GCCause::Cause cause) : _cause(cause) {
77 if (_cause == GCCause::_wb_breakpoint) {
78 ShenandoahBreakpoint::at_after_marking_started();
79 }
80 }
81
82 ~ShenandoahBreakpointMarkScope() {
83 if (_cause == GCCause::_wb_breakpoint) {
84 ShenandoahBreakpoint::at_before_marking_completed();
85 }
86 }
87 };
88
89 ShenandoahConcurrentGC::ShenandoahConcurrentGC() :
90 _mark(),
91 _degen_point(ShenandoahDegenPoint::_degenerated_unset),
92 _abbreviated(false) {
93 }
94
95 ShenandoahGC::ShenandoahDegenPoint ShenandoahConcurrentGC::degen_point() const {
96 return _degen_point;
97 }
98
99 void ShenandoahConcurrentGC::cancel() {
100 ShenandoahConcurrentMark::cancel();
101 }
102
103 bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) {
104 ShenandoahHeap* const heap = ShenandoahHeap::heap();
105 ShenandoahBreakpointGCScope breakpoint_gc_scope(cause);
106
107 // Reset for upcoming marking
108 entry_reset();
109
110 // Start initial mark under STW
111 vmop_entry_init_mark();
112
113 {
114 ShenandoahBreakpointMarkScope breakpoint_mark_scope(cause);
115 // Concurrent mark roots
116 entry_mark_roots();
117 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_outside_cycle)) {
118 return false;
119 }
120
121 // Continue concurrent mark
122 entry_mark();
123 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_mark)) {
124 return false;
125 }
126 }
127
128 // Complete marking under STW, and start evacuation
129 vmop_entry_final_mark();
130
131 // If the GC was cancelled before final mark, nothing happens on the safepoint. We are still
132 // in the marking phase and must resume the degenerated cycle from there. If the GC was cancelled
133 // after final mark, then we've entered the evacuation phase and must resume the degenerated cycle
134 // from that phase.
135 if (heap->is_concurrent_mark_in_progress()) {
136 bool cancelled = check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_mark);
137 assert(cancelled, "GC must have been cancelled between concurrent and final mark");
138 return false;
139 }
140
141 // Concurrent stack processing
142 if (heap->is_evacuation_in_progress()) {
143 entry_thread_roots();
144 }
145
146 // Process weak roots that might still point to regions that would be broken by cleanup
147 if (heap->is_concurrent_weak_root_in_progress()) {
148 entry_weak_refs();
149 entry_weak_roots();
150 }
151
152 // Final mark might have reclaimed some immediate garbage, kick cleanup to reclaim
153 // the space. This would be the last action if there is nothing to evacuate.
154 entry_cleanup_early();
155
156 heap->free_set()->log_status_under_lock();
157
158 // Perform concurrent class unloading
159 if (heap->unload_classes() &&
160 heap->is_concurrent_weak_root_in_progress()) {
161 entry_class_unloading();
162 }
163
164 // Processing strong roots
165 // This may be skipped if there is nothing to update/evacuate.
166 // If so, strong_root_in_progress would be unset.
167 if (heap->is_concurrent_strong_root_in_progress()) {
168 entry_strong_roots();
169 }
170
171 // Continue the cycle with evacuation and optional update-refs.
172 // This may be skipped if there is nothing to evacuate.
173 // If so, evac_in_progress would be unset by collection set preparation code.
179 }
180
181 // Perform update-refs phase.
182 vmop_entry_init_updaterefs();
183 entry_updaterefs();
184 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_updaterefs)) {
185 return false;
186 }
187
188 // Concurrent update thread roots
189 entry_update_thread_roots();
190 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_updaterefs)) {
191 return false;
192 }
193
194 vmop_entry_final_updaterefs();
195
196 // Update references freed up collection set, kick the cleanup to reclaim the space.
197 entry_cleanup_complete();
198 } else {
199 vmop_entry_final_roots();
200 _abbreviated = true;
201 }
202
203 return true;
204 }
205
206 void ShenandoahConcurrentGC::vmop_entry_init_mark() {
207 ShenandoahHeap* const heap = ShenandoahHeap::heap();
208 TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
209 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_mark_gross);
210
211 heap->try_inject_alloc_failure();
212 VM_ShenandoahInitMark op(this);
213 VMThread::execute(&op); // jump to entry_init_mark() under safepoint
214 }
215
216 void ShenandoahConcurrentGC::vmop_entry_final_mark() {
217 ShenandoahHeap* const heap = ShenandoahHeap::heap();
218 TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
219 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_mark_gross);
220
221 heap->try_inject_alloc_failure();
222 VM_ShenandoahFinalMarkStartEvac op(this);
283 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::init_update_refs);
284 EventMark em("%s", msg);
285
286 // No workers used in this phase, no setup required
287 op_init_updaterefs();
288 }
289
290 void ShenandoahConcurrentGC::entry_final_updaterefs() {
291 static const char* msg = "Pause Final Update Refs";
292 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_update_refs);
293 EventMark em("%s", msg);
294
295 ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
296 ShenandoahWorkerPolicy::calc_workers_for_final_update_ref(),
297 "final reference update");
298
299 op_final_updaterefs();
300 }
301
302 void ShenandoahConcurrentGC::entry_final_roots() {
303 static const char* msg = "Pause Final Roots";
304 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_roots);
305 EventMark em("%s", msg);
306
307 op_final_roots();
308 }
309
310 void ShenandoahConcurrentGC::entry_reset() {
311 ShenandoahHeap* const heap = ShenandoahHeap::heap();
312 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
313 static const char* msg = "Concurrent reset";
314 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_reset);
315 EventMark em("%s", msg);
316
317 ShenandoahWorkerScope scope(heap->workers(),
318 ShenandoahWorkerPolicy::calc_workers_for_conc_reset(),
319 "concurrent reset");
320
321 heap->try_inject_alloc_failure();
322 op_reset();
323 }
324
325 void ShenandoahConcurrentGC::entry_mark_roots() {
326 ShenandoahHeap* const heap = ShenandoahHeap::heap();
327 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
328 const char* msg = "Concurrent marking roots";
329 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_mark_roots);
330 EventMark em("%s", msg);
331
332 ShenandoahWorkerScope scope(heap->workers(),
333 ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
334 "concurrent marking roots");
335
336 heap->try_inject_alloc_failure();
337 op_mark_roots();
338 }
339
340 void ShenandoahConcurrentGC::entry_mark() {
341 ShenandoahHeap* const heap = ShenandoahHeap::heap();
342 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
351 heap->try_inject_alloc_failure();
352 op_mark();
353 }
354
355 void ShenandoahConcurrentGC::entry_thread_roots() {
356 ShenandoahHeap* const heap = ShenandoahHeap::heap();
357 static const char* msg = "Concurrent thread roots";
358 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_thread_roots);
359 EventMark em("%s", msg);
360
361 ShenandoahWorkerScope scope(heap->workers(),
362 ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
363 msg);
364
365 heap->try_inject_alloc_failure();
366 op_thread_roots();
367 }
368
369 void ShenandoahConcurrentGC::entry_weak_refs() {
370 ShenandoahHeap* const heap = ShenandoahHeap::heap();
371 static const char* msg = "Concurrent weak references";
372 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_weak_refs);
373 EventMark em("%s", msg);
374
375 ShenandoahWorkerScope scope(heap->workers(),
376 ShenandoahWorkerPolicy::calc_workers_for_conc_refs_processing(),
377 "concurrent weak references");
378
379 heap->try_inject_alloc_failure();
380 op_weak_refs();
381 }
382
383 void ShenandoahConcurrentGC::entry_weak_roots() {
384 ShenandoahHeap* const heap = ShenandoahHeap::heap();
385 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
386 static const char* msg = "Concurrent weak roots";
387 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_weak_roots);
388 EventMark em("%s", msg);
389
390 ShenandoahWorkerScope scope(heap->workers(),
391 ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
392 "concurrent weak root");
393
394 heap->try_inject_alloc_failure();
395 op_weak_roots();
396 }
397
398 void ShenandoahConcurrentGC::entry_class_unloading() {
399 ShenandoahHeap* const heap = ShenandoahHeap::heap();
400 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
401 static const char* msg = "Concurrent class unloading";
402 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_class_unload);
403 EventMark em("%s", msg);
404
405 ShenandoahWorkerScope scope(heap->workers(),
406 ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
413 void ShenandoahConcurrentGC::entry_strong_roots() {
414 ShenandoahHeap* const heap = ShenandoahHeap::heap();
415 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
416 static const char* msg = "Concurrent strong roots";
417 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_strong_roots);
418 EventMark em("%s", msg);
419
420 ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_strong_roots);
421
422 ShenandoahWorkerScope scope(heap->workers(),
423 ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
424 "concurrent strong root");
425
426 heap->try_inject_alloc_failure();
427 op_strong_roots();
428 }
429
430 void ShenandoahConcurrentGC::entry_cleanup_early() {
431 ShenandoahHeap* const heap = ShenandoahHeap::heap();
432 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
433 static const char* msg = "Concurrent cleanup";
434 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_cleanup_early, true /* log_heap_usage */);
435 EventMark em("%s", msg);
436
437 // This phase does not use workers, no need for setup
438 heap->try_inject_alloc_failure();
439 op_cleanup_early();
440 }
441
442 void ShenandoahConcurrentGC::entry_evacuate() {
443 ShenandoahHeap* const heap = ShenandoahHeap::heap();
444 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
445
446 static const char* msg = "Concurrent evacuation";
447 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_evac);
448 EventMark em("%s", msg);
449
450 ShenandoahWorkerScope scope(heap->workers(),
451 ShenandoahWorkerPolicy::calc_workers_for_conc_evac(),
452 "concurrent evacuation");
453
454 heap->try_inject_alloc_failure();
455 op_evacuate();
456 }
457
458 void ShenandoahConcurrentGC::entry_update_thread_roots() {
459 ShenandoahHeap* const heap = ShenandoahHeap::heap();
460 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
461
462 static const char* msg = "Concurrent update thread roots";
463 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_update_thread_roots);
464 EventMark em("%s", msg);
465
466 // No workers used in this phase, no setup required
467 heap->try_inject_alloc_failure();
468 op_update_thread_roots();
469 }
470
471 void ShenandoahConcurrentGC::entry_updaterefs() {
472 ShenandoahHeap* const heap = ShenandoahHeap::heap();
473 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
474 static const char* msg = "Concurrent update references";
475 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_update_refs);
476 EventMark em("%s", msg);
477
478 ShenandoahWorkerScope scope(heap->workers(),
479 ShenandoahWorkerPolicy::calc_workers_for_conc_update_ref(),
480 "concurrent reference update");
481
482 heap->try_inject_alloc_failure();
483 op_updaterefs();
484 }
485
486 void ShenandoahConcurrentGC::entry_cleanup_complete() {
487 ShenandoahHeap* const heap = ShenandoahHeap::heap();
488 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
489 static const char* msg = "Concurrent cleanup";
490 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_cleanup_complete, true /* log_heap_usage */);
491 EventMark em("%s", msg);
492
493 // This phase does not use workers, no need for setup
494 heap->try_inject_alloc_failure();
495 op_cleanup_complete();
496 }
497
498 void ShenandoahConcurrentGC::op_reset() {
499 ShenandoahHeap* const heap = ShenandoahHeap::heap();
500 if (ShenandoahPacing) {
501 heap->pacer()->setup_for_reset();
502 }
503
504 heap->prepare_gc();
505 }
506
507 class ShenandoahInitMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
508 private:
509 ShenandoahMarkingContext* const _ctx;
510 public:
511 ShenandoahInitMarkUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
512
513 void heap_region_do(ShenandoahHeapRegion* r) {
514 assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index());
515 if (r->is_active()) {
516 // Check if region needs updating its TAMS. We have updated it already during concurrent
517 // reset, so it is very likely we don't need to do another write here.
518 if (_ctx->top_at_mark_start(r) != r->top()) {
519 _ctx->capture_top_at_mark_start(r);
520 }
521 } else {
522 assert(_ctx->top_at_mark_start(r) == r->top(),
523 "Region " SIZE_FORMAT " should already have correct TAMS", r->index());
524 }
525 }
526
527 bool is_thread_safe() { return true; }
528 };
529
530 void ShenandoahConcurrentGC::start_mark() {
531 _mark.start_mark();
532 }
533
534 void ShenandoahConcurrentGC::op_init_mark() {
535 ShenandoahHeap* const heap = ShenandoahHeap::heap();
536 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
537 assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
538
539 assert(heap->marking_context()->is_bitmap_clear(), "need clear marking bitmap");
540 assert(!heap->marking_context()->is_complete(), "should not be complete");
541 assert(!heap->has_forwarded_objects(), "No forwarded objects on this path");
542
543 if (ShenandoahVerify) {
544 heap->verifier()->verify_before_concmark();
545 }
546
547 if (VerifyBeforeGC) {
548 Universe::verify();
549 }
550
551 heap->set_concurrent_mark_in_progress(true);
552
553 start_mark();
554
555 {
556 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states);
557 ShenandoahInitMarkUpdateRegionStateClosure cl;
558 heap->parallel_heap_region_iterate(&cl);
559 }
560
561 // Weak reference processing
562 ShenandoahReferenceProcessor* rp = heap->ref_processor();
563 rp->reset_thread_locals();
564 rp->set_soft_reference_policy(heap->soft_ref_policy()->should_clear_all_soft_refs());
565
566 // Make above changes visible to worker threads
567 OrderAccess::fence();
568
569 // Arm nmethods for concurrent mark
570 ShenandoahCodeRoots::arm_nmethods_for_mark();
571
572 ShenandoahStackWatermark::change_epoch_id();
573 if (ShenandoahPacing) {
574 heap->pacer()->setup_for_mark();
575 }
576 }
577
578 void ShenandoahConcurrentGC::op_mark_roots() {
579 _mark.mark_concurrent_roots();
580 }
581
582 void ShenandoahConcurrentGC::op_mark() {
583 _mark.concurrent_mark();
584 }
585
586 void ShenandoahConcurrentGC::op_final_mark() {
587 ShenandoahHeap* const heap = ShenandoahHeap::heap();
588 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
589 assert(!heap->has_forwarded_objects(), "No forwarded objects on this path");
590
591 if (ShenandoahVerify) {
592 heap->verifier()->verify_roots_no_forwarded();
593 }
594
595 if (!heap->cancelled_gc()) {
596 _mark.finish_mark();
597 assert(!heap->cancelled_gc(), "STW mark cannot OOM");
598
599 // Notify JVMTI that the tagmap table will need cleaning.
600 JvmtiTagMap::set_needs_cleaning();
601
602 heap->prepare_regions_and_collection_set(true /*concurrent*/);
603
604 // Has to be done after cset selection
605 heap->prepare_concurrent_roots();
606
607 if (!heap->collection_set()->is_empty()) {
608 if (ShenandoahVerify) {
609 heap->verifier()->verify_before_evacuation();
610 }
611
612 heap->set_evacuation_in_progress(true);
613 // From here on, we need to update references.
614 heap->set_has_forwarded_objects(true);
615
616 // Arm nmethods/stack for concurrent processing
617 ShenandoahCodeRoots::arm_nmethods_for_evac();
618 ShenandoahStackWatermark::change_epoch_id();
619
620 if (ShenandoahPacing) {
621 heap->pacer()->setup_for_evac();
622 }
623 } else {
624 if (ShenandoahVerify) {
625 heap->verifier()->verify_after_concmark();
626 }
627
628 if (VerifyAfterGC) {
629 Universe::verify();
630 }
631 }
632 }
633 }
634
635 class ShenandoahConcurrentEvacThreadClosure : public ThreadClosure {
636 private:
637 OopClosure* const _oops;
638
639 public:
640 ShenandoahConcurrentEvacThreadClosure(OopClosure* oops);
641 void do_thread(Thread* thread);
642 };
643
644 ShenandoahConcurrentEvacThreadClosure::ShenandoahConcurrentEvacThreadClosure(OopClosure* oops) :
645 _oops(oops) {
646 }
647
648 void ShenandoahConcurrentEvacThreadClosure::do_thread(Thread* thread) {
649 JavaThread* const jt = JavaThread::cast(thread);
650 StackWatermarkSet::finish_processing(jt, _oops, StackWatermarkKind::gc);
651 }
652
653 class ShenandoahConcurrentEvacUpdateThreadTask : public WorkerTask {
654 private:
655 ShenandoahJavaThreadsIterator _java_threads;
656
657 public:
658 ShenandoahConcurrentEvacUpdateThreadTask(uint n_workers) :
659 WorkerTask("Shenandoah Evacuate/Update Concurrent Thread Roots"),
660 _java_threads(ShenandoahPhaseTimings::conc_thread_roots, n_workers) {
661 }
662
663 void work(uint worker_id) {
664 // ShenandoahEvacOOMScope has to be setup by ShenandoahContextEvacuateUpdateRootsClosure.
665 // Otherwise, may deadlock with watermark lock
666 ShenandoahContextEvacuateUpdateRootsClosure oops_cl;
667 ShenandoahConcurrentEvacThreadClosure thr_cl(&oops_cl);
668 _java_threads.threads_do(&thr_cl, worker_id);
669 }
670 };
671
672 void ShenandoahConcurrentGC::op_thread_roots() {
673 ShenandoahHeap* const heap = ShenandoahHeap::heap();
674 assert(heap->is_evacuation_in_progress(), "Checked by caller");
675 ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_thread_roots);
676 ShenandoahConcurrentEvacUpdateThreadTask task(heap->workers()->active_workers());
677 heap->workers()->run_task(&task);
678 }
679
680 void ShenandoahConcurrentGC::op_weak_refs() {
681 ShenandoahHeap* const heap = ShenandoahHeap::heap();
682 assert(heap->is_concurrent_weak_root_in_progress(), "Only during this phase");
683 // Concurrent weak refs processing
684 ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_refs);
685 if (heap->gc_cause() == GCCause::_wb_breakpoint) {
686 ShenandoahBreakpoint::at_after_reference_processing_started();
687 }
688 heap->ref_processor()->process_references(ShenandoahPhaseTimings::conc_weak_refs, heap->workers(), true /* concurrent */);
689 }
690
691 class ShenandoahEvacUpdateCleanupOopStorageRootsClosure : public BasicOopIterateClosure {
692 private:
693 ShenandoahHeap* const _heap;
694 ShenandoahMarkingContext* const _mark_context;
695 bool _evac_in_progress;
696 Thread* const _thread;
697
698 public:
699 ShenandoahEvacUpdateCleanupOopStorageRootsClosure();
700 void do_oop(oop* p);
701 void do_oop(narrowOop* p);
702 };
703
704 ShenandoahEvacUpdateCleanupOopStorageRootsClosure::ShenandoahEvacUpdateCleanupOopStorageRootsClosure() :
705 _heap(ShenandoahHeap::heap()),
706 _mark_context(ShenandoahHeap::heap()->marking_context()),
707 _evac_in_progress(ShenandoahHeap::heap()->is_evacuation_in_progress()),
708 _thread(Thread::current()) {
709 }
710
711 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(oop* p) {
712 const oop obj = RawAccess<>::oop_load(p);
713 if (!CompressedOops::is_null(obj)) {
714 if (!_mark_context->is_marked(obj)) {
715 // Note: The obj is dead here. Do not touch it, just clear.
716 ShenandoahHeap::atomic_clear_oop(p, obj);
717 } else if (_evac_in_progress && _heap->in_collection_set(obj)) {
718 oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
719 if (resolved == obj) {
720 resolved = _heap->evacuate_object(obj, _thread);
721 }
722 shenandoah_assert_not_in_cset_except(p, resolved, _heap->cancelled_gc());
723 ShenandoahHeap::atomic_update_oop(resolved, p, obj);
724 }
725 }
726 }
727
728 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(narrowOop* p) {
729 ShouldNotReachHere();
730 }
731
732 class ShenandoahIsCLDAliveClosure : public CLDClosure {
733 public:
734 void do_cld(ClassLoaderData* cld) {
735 cld->is_alive();
736 }
802 }
803 }
804 };
805
806 void ShenandoahConcurrentGC::op_weak_roots() {
807 ShenandoahHeap* const heap = ShenandoahHeap::heap();
808 assert(heap->is_concurrent_weak_root_in_progress(), "Only during this phase");
809 // Concurrent weak root processing
810 {
811 ShenandoahTimingsTracker t(ShenandoahPhaseTimings::conc_weak_roots_work);
812 ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_roots_work);
813 ShenandoahConcurrentWeakRootsEvacUpdateTask task(ShenandoahPhaseTimings::conc_weak_roots_work);
814 heap->workers()->run_task(&task);
815 }
816
817 // Perform handshake to flush out dead oops
818 {
819 ShenandoahTimingsTracker t(ShenandoahPhaseTimings::conc_weak_roots_rendezvous);
820 heap->rendezvous_threads("Shenandoah Concurrent Weak Roots");
821 }
822 }
823
824 void ShenandoahConcurrentGC::op_class_unloading() {
825 ShenandoahHeap* const heap = ShenandoahHeap::heap();
826 assert (heap->is_concurrent_weak_root_in_progress() &&
827 heap->unload_classes(),
828 "Checked by caller");
829 heap->do_class_unloading();
830 }
831
832 class ShenandoahEvacUpdateCodeCacheClosure : public NMethodClosure {
833 private:
834 BarrierSetNMethod* const _bs;
835 ShenandoahEvacuateUpdateMetadataClosure _cl;
836
837 public:
838 ShenandoahEvacUpdateCodeCacheClosure() :
839 _bs(BarrierSet::barrier_set()->barrier_set_nmethod()),
840 _cl() {
841 }
898 ShenandoahHeap* const heap = ShenandoahHeap::heap();
899 assert(heap->is_concurrent_strong_root_in_progress(), "Checked by caller");
900 ShenandoahConcurrentRootsEvacUpdateTask task(ShenandoahPhaseTimings::conc_strong_roots);
901 heap->workers()->run_task(&task);
902 heap->set_concurrent_strong_root_in_progress(false);
903 }
904
905 void ShenandoahConcurrentGC::op_cleanup_early() {
906 ShenandoahHeap::heap()->free_set()->recycle_trash();
907 }
908
909 void ShenandoahConcurrentGC::op_evacuate() {
910 ShenandoahHeap::heap()->evacuate_collection_set(true /*concurrent*/);
911 }
912
913 void ShenandoahConcurrentGC::op_init_updaterefs() {
914 ShenandoahHeap* const heap = ShenandoahHeap::heap();
915 heap->set_evacuation_in_progress(false);
916 heap->set_concurrent_weak_root_in_progress(false);
917 heap->prepare_update_heap_references(true /*concurrent*/);
918 if (ShenandoahVerify) {
919 heap->verifier()->verify_before_updaterefs();
920 }
921
922 heap->set_update_refs_in_progress(true);
923 if (ShenandoahPacing) {
924 heap->pacer()->setup_for_updaterefs();
925 }
926 }
927
928 void ShenandoahConcurrentGC::op_updaterefs() {
929 ShenandoahHeap::heap()->update_heap_references(true /*concurrent*/);
930 }
931
932 class ShenandoahUpdateThreadClosure : public HandshakeClosure {
933 private:
934 // This closure runs when thread is stopped for handshake, which means
935 // we can use non-concurrent closure here, as long as it only updates
936 // locations modified by the thread itself, i.e. stack locations.
937 ShenandoahNonConcUpdateRefsClosure _cl;
938 public:
939 ShenandoahUpdateThreadClosure();
940 void do_thread(Thread* thread);
941 };
942
950 ResourceMark rm;
951 jt->oops_do(&_cl, nullptr);
952 }
953 }
954
955 void ShenandoahConcurrentGC::op_update_thread_roots() {
956 ShenandoahUpdateThreadClosure cl;
957 Handshake::execute(&cl);
958 }
959
960 void ShenandoahConcurrentGC::op_final_updaterefs() {
961 ShenandoahHeap* const heap = ShenandoahHeap::heap();
962 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
963 assert(!heap->_update_refs_iterator.has_next(), "Should have finished update references");
964
965 heap->finish_concurrent_roots();
966
967 // Clear cancelled GC, if set. On cancellation path, the block before would handle
968 // everything.
969 if (heap->cancelled_gc()) {
970 heap->clear_cancelled_gc();
971 }
972
973 // Has to be done before cset is clear
974 if (ShenandoahVerify) {
975 heap->verifier()->verify_roots_in_to_space();
976 }
977
978 heap->update_heap_region_states(true /*concurrent*/);
979
980 heap->set_update_refs_in_progress(false);
981 heap->set_has_forwarded_objects(false);
982
983 if (ShenandoahVerify) {
984 heap->verifier()->verify_after_updaterefs();
985 }
986
987 if (VerifyAfterGC) {
988 Universe::verify();
989 }
990
991 heap->rebuild_free_set(true /*concurrent*/);
992 }
993
994 void ShenandoahConcurrentGC::op_final_roots() {
995 ShenandoahHeap::heap()->set_concurrent_weak_root_in_progress(false);
996 }
997
998 void ShenandoahConcurrentGC::op_cleanup_complete() {
999 ShenandoahHeap::heap()->free_set()->recycle_trash();
1000 }
1001
1002 bool ShenandoahConcurrentGC::check_cancellation_and_abort(ShenandoahDegenPoint point) {
1003 if (ShenandoahHeap::heap()->cancelled_gc()) {
1004 _degen_point = point;
1005 return true;
1006 }
1007 return false;
1008 }
1009
1010 const char* ShenandoahConcurrentGC::init_mark_event_message() const {
1011 ShenandoahHeap* const heap = ShenandoahHeap::heap();
1012 assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here");
1013 if (heap->unload_classes()) {
1014 return "Pause Init Mark (unload classes)";
1015 } else {
1016 return "Pause Init Mark";
1017 }
1018 }
1019
1020 const char* ShenandoahConcurrentGC::final_mark_event_message() const {
1021 ShenandoahHeap* const heap = ShenandoahHeap::heap();
1022 assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here");
1023 if (heap->unload_classes()) {
1024 return "Pause Final Mark (unload classes)";
1025 } else {
1026 return "Pause Final Mark";
1027 }
1028 }
1029
1030 const char* ShenandoahConcurrentGC::conc_mark_event_message() const {
1031 ShenandoahHeap* const heap = ShenandoahHeap::heap();
1032 assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here");
1033 if (heap->unload_classes()) {
1034 return "Concurrent marking (unload classes)";
1035 } else {
1036 return "Concurrent marking";
1037 }
1038 }
|
1 /*
2 * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2021, 2022, Red Hat, Inc. All rights reserved.
4 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
5 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 *
7 * This code is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 only, as
9 * published by the Free Software Foundation.
10 *
11 * This code is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * version 2 for more details (a copy is included in the LICENSE file that
15 * accompanied this code).
16 *
17 * You should have received a copy of the GNU General Public License version
18 * 2 along with this work; if not, write to the Free Software Foundation,
19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20 *
21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
22 * or visit www.oracle.com if you need additional information or have any
23 * questions.
24 *
25 */
26
27 #include "precompiled.hpp"
28
29 #include "gc/shared/barrierSetNMethod.hpp"
30 #include "gc/shared/collectorCounters.hpp"
31 #include "gc/shared/continuationGCSupport.inline.hpp"
32 #include "gc/shenandoah/shenandoahBreakpoint.hpp"
33 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
34 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
35 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
36 #include "gc/shenandoah/shenandoahFreeSet.hpp"
37 #include "gc/shenandoah/shenandoahGeneration.hpp"
38 #include "gc/shenandoah/shenandoahGenerationalHeap.hpp"
39 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
40 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
41 #include "gc/shenandoah/shenandoahLock.hpp"
42 #include "gc/shenandoah/shenandoahMark.inline.hpp"
43 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
44 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
45 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
46 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
47 #include "gc/shenandoah/shenandoahStackWatermark.hpp"
48 #include "gc/shenandoah/shenandoahUtils.hpp"
49 #include "gc/shenandoah/shenandoahVerifier.hpp"
50 #include "gc/shenandoah/shenandoahVMOperations.hpp"
51 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
52 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
53 #include "memory/allocation.hpp"
54 #include "prims/jvmtiTagMap.hpp"
55 #include "runtime/vmThread.hpp"
56 #include "utilities/events.hpp"
57
58 // Breakpoint support
59 class ShenandoahBreakpointGCScope : public StackObj {
60 private:
74 }
75 };
76
77 class ShenandoahBreakpointMarkScope : public StackObj {
78 private:
79 const GCCause::Cause _cause;
80 public:
81 ShenandoahBreakpointMarkScope(GCCause::Cause cause) : _cause(cause) {
82 if (_cause == GCCause::_wb_breakpoint) {
83 ShenandoahBreakpoint::at_after_marking_started();
84 }
85 }
86
87 ~ShenandoahBreakpointMarkScope() {
88 if (_cause == GCCause::_wb_breakpoint) {
89 ShenandoahBreakpoint::at_before_marking_completed();
90 }
91 }
92 };
93
94 ShenandoahConcurrentGC::ShenandoahConcurrentGC(ShenandoahGeneration* generation, bool do_old_gc_bootstrap) :
95 _mark(generation),
96 _generation(generation),
97 _degen_point(ShenandoahDegenPoint::_degenerated_unset),
98 _abbreviated(false),
99 _do_old_gc_bootstrap(do_old_gc_bootstrap) {
100 }
101
102 ShenandoahGC::ShenandoahDegenPoint ShenandoahConcurrentGC::degen_point() const {
103 return _degen_point;
104 }
105
106 bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) {
107 ShenandoahHeap* const heap = ShenandoahHeap::heap();
108
109 ShenandoahBreakpointGCScope breakpoint_gc_scope(cause);
110
111 // Reset for upcoming marking
112 entry_reset();
113
114 // Start initial mark under STW
115 vmop_entry_init_mark();
116
117 {
118 ShenandoahBreakpointMarkScope breakpoint_mark_scope(cause);
119
120 // Reset task queue stats here, rather than in mark_concurrent_roots,
121 // because remembered set scan will `push` oops into the queues and
122 // resetting after this happens will lose those counts.
123 TASKQUEUE_STATS_ONLY(_mark.task_queues()->reset_taskqueue_stats());
124
125 // Concurrent remembered set scanning
126 entry_scan_remembered_set();
127
128 // Concurrent mark roots
129 entry_mark_roots();
130 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_roots)) {
131 return false;
132 }
133
134 // Continue concurrent mark
135 entry_mark();
136 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_mark)) {
137 return false;
138 }
139 }
140
141 // Complete marking under STW, and start evacuation
142 vmop_entry_final_mark();
143
144 // If the GC was cancelled before final mark, nothing happens on the safepoint. We are still
145 // in the marking phase and must resume the degenerated cycle from there. If the GC was cancelled
146 // after final mark, then we've entered the evacuation phase and must resume the degenerated cycle
147 // from that phase.
148 if (_generation->is_concurrent_mark_in_progress()) {
149 bool cancelled = check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_mark);
150 assert(cancelled, "GC must have been cancelled between concurrent and final mark");
151 return false;
152 }
153
154 // Concurrent stack processing
155 if (heap->is_evacuation_in_progress()) {
156 entry_thread_roots();
157 }
158
159 // Process weak roots that might still point to regions that would be broken by cleanup
160 if (heap->is_concurrent_weak_root_in_progress()) {
161 entry_weak_refs();
162 entry_weak_roots();
163 }
164
165 // Final mark might have reclaimed some immediate garbage, kick cleanup to reclaim
166 // the space. This would be the last action if there is nothing to evacuate. Note that
167 // we will not age young-gen objects in the case that we skip evacuation.
168 entry_cleanup_early();
169
170 heap->free_set()->log_status_under_lock();
171
172 // Perform concurrent class unloading
173 if (heap->unload_classes() &&
174 heap->is_concurrent_weak_root_in_progress()) {
175 entry_class_unloading();
176 }
177
178 // Processing strong roots
179 // This may be skipped if there is nothing to update/evacuate.
180 // If so, strong_root_in_progress would be unset.
181 if (heap->is_concurrent_strong_root_in_progress()) {
182 entry_strong_roots();
183 }
184
185 // Continue the cycle with evacuation and optional update-refs.
186 // This may be skipped if there is nothing to evacuate.
187 // If so, evac_in_progress would be unset by collection set preparation code.
193 }
194
195 // Perform update-refs phase.
196 vmop_entry_init_updaterefs();
197 entry_updaterefs();
198 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_updaterefs)) {
199 return false;
200 }
201
202 // Concurrent update thread roots
203 entry_update_thread_roots();
204 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_updaterefs)) {
205 return false;
206 }
207
208 vmop_entry_final_updaterefs();
209
210 // Update references freed up collection set, kick the cleanup to reclaim the space.
211 entry_cleanup_complete();
212 } else {
213 // We chose not to evacuate because we found sufficient immediate garbage.
214 // However, there may still be regions to promote in place, so do that now.
215 if (has_in_place_promotions(heap)) {
216 entry_promote_in_place();
217
218 // If the promote-in-place operation was cancelled, we can have the degenerated
219 // cycle complete the operation. It will see that no evacuations are in progress,
220 // and that there are regions wanting promotion. The risk with not handling the
221 // cancellation would be failing to restore top for these regions and leaving
222 // them unable to serve allocations for the old generation.
223 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_evac)) {
224 return false;
225 }
226 }
227
228 // At this point, the cycle is effectively complete. If the cycle has been cancelled here,
229 // the control thread will detect it on its next iteration and run a degenerated young cycle.
230 vmop_entry_final_roots();
231 _abbreviated = true;
232 }
233
234 // We defer generation resizing actions until after cset regions have been recycled. We do this even following an
235 // abbreviated cycle.
236 if (heap->mode()->is_generational()) {
237 ShenandoahGenerationalHeap::heap()->complete_concurrent_cycle();
238 }
239 return true;
240 }
241
242 void ShenandoahConcurrentGC::vmop_entry_init_mark() {
243 ShenandoahHeap* const heap = ShenandoahHeap::heap();
244 TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
245 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_mark_gross);
246
247 heap->try_inject_alloc_failure();
248 VM_ShenandoahInitMark op(this);
249 VMThread::execute(&op); // jump to entry_init_mark() under safepoint
250 }
251
252 void ShenandoahConcurrentGC::vmop_entry_final_mark() {
253 ShenandoahHeap* const heap = ShenandoahHeap::heap();
254 TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
255 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_mark_gross);
256
257 heap->try_inject_alloc_failure();
258 VM_ShenandoahFinalMarkStartEvac op(this);
319 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::init_update_refs);
320 EventMark em("%s", msg);
321
322 // No workers used in this phase, no setup required
323 op_init_updaterefs();
324 }
325
326 void ShenandoahConcurrentGC::entry_final_updaterefs() {
327 static const char* msg = "Pause Final Update Refs";
328 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_update_refs);
329 EventMark em("%s", msg);
330
331 ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
332 ShenandoahWorkerPolicy::calc_workers_for_final_update_ref(),
333 "final reference update");
334
335 op_final_updaterefs();
336 }
337
338 void ShenandoahConcurrentGC::entry_final_roots() {
339 const char* msg = final_roots_event_message();
340 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_roots);
341 EventMark em("%s", msg);
342
343 op_final_roots();
344 }
345
346 void ShenandoahConcurrentGC::entry_reset() {
347 ShenandoahHeap* const heap = ShenandoahHeap::heap();
348 heap->try_inject_alloc_failure();
349
350 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
351 {
352 const char* msg = conc_reset_event_message();
353 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_reset);
354 EventMark em("%s", msg);
355
356 ShenandoahWorkerScope scope(heap->workers(),
357 ShenandoahWorkerPolicy::calc_workers_for_conc_reset(),
358 msg);
359 op_reset();
360 }
361
362 if (_do_old_gc_bootstrap) {
363 static const char* msg = "Concurrent reset (Old)";
364 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_reset_old);
365 ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
366 ShenandoahWorkerPolicy::calc_workers_for_conc_reset(),
367 msg);
368 EventMark em("%s", msg);
369
370 heap->old_generation()->prepare_gc();
371 }
372 }
373
374 void ShenandoahConcurrentGC::entry_scan_remembered_set() {
375 if (_generation->is_young()) {
376 ShenandoahHeap* const heap = ShenandoahHeap::heap();
377 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
378 const char* msg = "Concurrent remembered set scanning";
379 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::init_scan_rset);
380 EventMark em("%s", msg);
381
382 ShenandoahWorkerScope scope(heap->workers(),
383 ShenandoahWorkerPolicy::calc_workers_for_rs_scanning(),
384 msg);
385
386 heap->try_inject_alloc_failure();
387 _generation->scan_remembered_set(true /* is_concurrent */);
388 }
389 }
390
391 void ShenandoahConcurrentGC::entry_mark_roots() {
392 ShenandoahHeap* const heap = ShenandoahHeap::heap();
393 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
394 const char* msg = "Concurrent marking roots";
395 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_mark_roots);
396 EventMark em("%s", msg);
397
398 ShenandoahWorkerScope scope(heap->workers(),
399 ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
400 "concurrent marking roots");
401
402 heap->try_inject_alloc_failure();
403 op_mark_roots();
404 }
405
406 void ShenandoahConcurrentGC::entry_mark() {
407 ShenandoahHeap* const heap = ShenandoahHeap::heap();
408 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
417 heap->try_inject_alloc_failure();
418 op_mark();
419 }
420
421 void ShenandoahConcurrentGC::entry_thread_roots() {
422 ShenandoahHeap* const heap = ShenandoahHeap::heap();
423 static const char* msg = "Concurrent thread roots";
424 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_thread_roots);
425 EventMark em("%s", msg);
426
427 ShenandoahWorkerScope scope(heap->workers(),
428 ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
429 msg);
430
431 heap->try_inject_alloc_failure();
432 op_thread_roots();
433 }
434
435 void ShenandoahConcurrentGC::entry_weak_refs() {
436 ShenandoahHeap* const heap = ShenandoahHeap::heap();
437 const char* msg = conc_weak_refs_event_message();
438 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_weak_refs);
439 EventMark em("%s", msg);
440
441 ShenandoahWorkerScope scope(heap->workers(),
442 ShenandoahWorkerPolicy::calc_workers_for_conc_refs_processing(),
443 "concurrent weak references");
444
445 heap->try_inject_alloc_failure();
446 op_weak_refs();
447 }
448
449 void ShenandoahConcurrentGC::entry_weak_roots() {
450 ShenandoahHeap* const heap = ShenandoahHeap::heap();
451 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
452 const char* msg = conc_weak_roots_event_message();
453 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_weak_roots);
454 EventMark em("%s", msg);
455
456 ShenandoahWorkerScope scope(heap->workers(),
457 ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
458 "concurrent weak root");
459
460 heap->try_inject_alloc_failure();
461 op_weak_roots();
462 }
463
464 void ShenandoahConcurrentGC::entry_class_unloading() {
465 ShenandoahHeap* const heap = ShenandoahHeap::heap();
466 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
467 static const char* msg = "Concurrent class unloading";
468 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_class_unload);
469 EventMark em("%s", msg);
470
471 ShenandoahWorkerScope scope(heap->workers(),
472 ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
479 void ShenandoahConcurrentGC::entry_strong_roots() {
480 ShenandoahHeap* const heap = ShenandoahHeap::heap();
481 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
482 static const char* msg = "Concurrent strong roots";
483 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_strong_roots);
484 EventMark em("%s", msg);
485
486 ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_strong_roots);
487
488 ShenandoahWorkerScope scope(heap->workers(),
489 ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
490 "concurrent strong root");
491
492 heap->try_inject_alloc_failure();
493 op_strong_roots();
494 }
495
496 void ShenandoahConcurrentGC::entry_cleanup_early() {
497 ShenandoahHeap* const heap = ShenandoahHeap::heap();
498 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
499 const char* msg = conc_cleanup_event_message();
500 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_cleanup_early, true /* log_heap_usage */);
501 EventMark em("%s", msg);
502
503 // This phase does not use workers, no need for setup
504 heap->try_inject_alloc_failure();
505 op_cleanup_early();
506 }
507
508 void ShenandoahConcurrentGC::entry_evacuate() {
509 ShenandoahHeap* const heap = ShenandoahHeap::heap();
510 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
511
512 static const char* msg = "Concurrent evacuation";
513 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_evac);
514 EventMark em("%s", msg);
515
516 ShenandoahWorkerScope scope(heap->workers(),
517 ShenandoahWorkerPolicy::calc_workers_for_conc_evac(),
518 "concurrent evacuation");
519
520 heap->try_inject_alloc_failure();
521 op_evacuate();
522 }
523
524 void ShenandoahConcurrentGC::entry_promote_in_place() {
525 shenandoah_assert_generational();
526
527 ShenandoahHeap* const heap = ShenandoahHeap::heap();
528 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
529
530 static const char* msg = "Promote in place";
531 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::promote_in_place);
532 EventMark em("%s", msg);
533
534 ShenandoahWorkerScope scope(heap->workers(),
535 ShenandoahWorkerPolicy::calc_workers_for_conc_evac(),
536 "promote in place");
537
538 ShenandoahGenerationalHeap::heap()->promote_regions_in_place(true);
539 }
540
541 void ShenandoahConcurrentGC::entry_update_thread_roots() {
542 ShenandoahHeap* const heap = ShenandoahHeap::heap();
543 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
544
545 static const char* msg = "Concurrent update thread roots";
546 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_update_thread_roots);
547 EventMark em("%s", msg);
548
549 // No workers used in this phase, no setup required
550 heap->try_inject_alloc_failure();
551 op_update_thread_roots();
552 }
553
554 void ShenandoahConcurrentGC::entry_updaterefs() {
555 ShenandoahHeap* const heap = ShenandoahHeap::heap();
556 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
557 static const char* msg = "Concurrent update references";
558 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_update_refs);
559 EventMark em("%s", msg);
560
561 ShenandoahWorkerScope scope(heap->workers(),
562 ShenandoahWorkerPolicy::calc_workers_for_conc_update_ref(),
563 "concurrent reference update");
564
565 heap->try_inject_alloc_failure();
566 op_updaterefs();
567 }
568
569 void ShenandoahConcurrentGC::entry_cleanup_complete() {
570 ShenandoahHeap* const heap = ShenandoahHeap::heap();
571 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
572 const char* msg = conc_cleanup_event_message();
573 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_cleanup_complete, true /* log_heap_usage */);
574 EventMark em("%s", msg);
575
576 // This phase does not use workers, no need for setup
577 heap->try_inject_alloc_failure();
578 op_cleanup_complete();
579 }
580
581 void ShenandoahConcurrentGC::op_reset() {
582 ShenandoahHeap* const heap = ShenandoahHeap::heap();
583 if (ShenandoahPacing) {
584 heap->pacer()->setup_for_reset();
585 }
586 _generation->prepare_gc();
587 }
588
589 class ShenandoahInitMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
590 private:
591 ShenandoahMarkingContext* const _ctx;
592 public:
593 ShenandoahInitMarkUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
594
595 void heap_region_do(ShenandoahHeapRegion* r) {
596 assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index());
597 if (r->is_active()) {
598 // Check if region needs updating its TAMS. We have updated it already during concurrent
599 // reset, so it is very likely we don't need to do another write here. Since most regions
600 // are not "active", this path is relatively rare.
601 if (_ctx->top_at_mark_start(r) != r->top()) {
602 _ctx->capture_top_at_mark_start(r);
603 }
604 } else {
605 assert(_ctx->top_at_mark_start(r) == r->top(),
606 "Region " SIZE_FORMAT " should already have correct TAMS", r->index());
607 }
608 }
609
610 bool is_thread_safe() { return true; }
611 };
612
613 void ShenandoahConcurrentGC::start_mark() {
614 _mark.start_mark();
615 }
616
617 void ShenandoahConcurrentGC::op_init_mark() {
618 ShenandoahHeap* const heap = ShenandoahHeap::heap();
619 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
620 assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
621
622 assert(_generation->is_bitmap_clear(), "need clear marking bitmap");
623 assert(!_generation->is_mark_complete(), "should not be complete");
624 assert(!heap->has_forwarded_objects(), "No forwarded objects on this path");
625
626
627 if (heap->mode()->is_generational()) {
628 if (_generation->is_young()) {
629 // The current implementation of swap_remembered_set() copies the write-card-table to the read-card-table.
630 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_swap_rset);
631 _generation->swap_remembered_set();
632 }
633
634 if (_generation->is_global()) {
635 heap->old_generation()->cancel_gc();
636 } else if (heap->is_concurrent_old_mark_in_progress()) {
637 // Purge the SATB buffers, transferring any valid, old pointers to the
638 // old generation mark queue. Any pointers in a young region will be
639 // abandoned.
640 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_transfer_satb);
641 heap->old_generation()->transfer_pointers_from_satb();
642 }
643 }
644
645 if (ShenandoahVerify) {
646 heap->verifier()->verify_before_concmark();
647 }
648
649 if (VerifyBeforeGC) {
650 Universe::verify();
651 }
652
653 _generation->set_concurrent_mark_in_progress(true);
654
655 start_mark();
656
657 if (_do_old_gc_bootstrap) {
658 shenandoah_assert_generational();
659 // Update region state for both young and old regions
660 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states);
661 ShenandoahInitMarkUpdateRegionStateClosure cl;
662 heap->parallel_heap_region_iterate(&cl);
663 heap->old_generation()->ref_processor()->reset_thread_locals();
664 } else {
665 // Update region state for only young regions
666 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states);
667 ShenandoahInitMarkUpdateRegionStateClosure cl;
668 _generation->parallel_heap_region_iterate(&cl);
669 }
670
671 // Weak reference processing
672 ShenandoahReferenceProcessor* rp = _generation->ref_processor();
673 rp->reset_thread_locals();
674 rp->set_soft_reference_policy(heap->soft_ref_policy()->should_clear_all_soft_refs());
675
676 // Make above changes visible to worker threads
677 OrderAccess::fence();
678
679 // Arm nmethods for concurrent mark
680 ShenandoahCodeRoots::arm_nmethods_for_mark();
681
682 ShenandoahStackWatermark::change_epoch_id();
683 if (ShenandoahPacing) {
684 heap->pacer()->setup_for_mark();
685 }
686 }
687
688 void ShenandoahConcurrentGC::op_mark_roots() {
689 _mark.mark_concurrent_roots();
690 }
691
692 void ShenandoahConcurrentGC::op_mark() {
693 _mark.concurrent_mark();
694 }
695
696 void ShenandoahConcurrentGC::op_final_mark() {
697 ShenandoahHeap* const heap = ShenandoahHeap::heap();
698 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
699 assert(!heap->has_forwarded_objects(), "No forwarded objects on this path");
700
701 if (ShenandoahVerify) {
702 heap->verifier()->verify_roots_no_forwarded();
703 }
704
705 if (!heap->cancelled_gc()) {
706 _mark.finish_mark();
707 assert(!heap->cancelled_gc(), "STW mark cannot OOM");
708
709 // Notify JVMTI that the tagmap table will need cleaning.
710 JvmtiTagMap::set_needs_cleaning();
711
712 // The collection set is chosen by prepare_regions_and_collection_set(). Additionally, certain parameters have been
713 // established to govern the evacuation efforts that are about to begin. Refer to comments on reserve members in
714 // ShenandoahGeneration and ShenandoahOldGeneration for more detail.
715 _generation->prepare_regions_and_collection_set(true /*concurrent*/);
716
717 // Has to be done after cset selection
718 heap->prepare_concurrent_roots();
719
720 if (!heap->collection_set()->is_empty()) {
721 LogTarget(Debug, gc, cset) lt;
722 if (lt.is_enabled()) {
723 ResourceMark rm;
724 LogStream ls(lt);
725 heap->collection_set()->print_on(&ls);
726 }
727
728 if (ShenandoahVerify) {
729 heap->verifier()->verify_before_evacuation();
730 }
731
732 heap->set_evacuation_in_progress(true);
733 // From here on, we need to update references.
734 heap->set_has_forwarded_objects(true);
735
736 // Arm nmethods/stack for concurrent processing
737 ShenandoahCodeRoots::arm_nmethods_for_evac();
738 ShenandoahStackWatermark::change_epoch_id();
739
740 if (ShenandoahPacing) {
741 heap->pacer()->setup_for_evac();
742 }
743 } else {
744 if (ShenandoahVerify) {
745 if (has_in_place_promotions(heap)) {
746 heap->verifier()->verify_after_concmark_with_promotions();
747 } else {
748 heap->verifier()->verify_after_concmark();
749 }
750 }
751
752 if (VerifyAfterGC) {
753 Universe::verify();
754 }
755 }
756 }
757 }
758
759 bool ShenandoahConcurrentGC::has_in_place_promotions(ShenandoahHeap* heap) {
760 return heap->mode()->is_generational() && heap->old_generation()->has_in_place_promotions();
761 }
762
763 template<bool GENERATIONAL>
764 class ShenandoahConcurrentEvacThreadClosure : public ThreadClosure {
765 private:
766 OopClosure* const _oops;
767 public:
768 explicit ShenandoahConcurrentEvacThreadClosure(OopClosure* oops) : _oops(oops) {}
769
770 void do_thread(Thread* thread) override {
771 JavaThread* const jt = JavaThread::cast(thread);
772 StackWatermarkSet::finish_processing(jt, _oops, StackWatermarkKind::gc);
773 if (GENERATIONAL) {
774 ShenandoahThreadLocalData::enable_plab_promotions(thread);
775 }
776 }
777 };
778
779 template<bool GENERATIONAL>
780 class ShenandoahConcurrentEvacUpdateThreadTask : public WorkerTask {
781 private:
782 ShenandoahJavaThreadsIterator _java_threads;
783
784 public:
785 explicit ShenandoahConcurrentEvacUpdateThreadTask(uint n_workers) :
786 WorkerTask("Shenandoah Evacuate/Update Concurrent Thread Roots"),
787 _java_threads(ShenandoahPhaseTimings::conc_thread_roots, n_workers) {
788 }
789
790 void work(uint worker_id) override {
791 if (GENERATIONAL) {
792 Thread* worker_thread = Thread::current();
793 ShenandoahThreadLocalData::enable_plab_promotions(worker_thread);
794 }
795
796 // ShenandoahEvacOOMScope has to be setup by ShenandoahContextEvacuateUpdateRootsClosure.
797 // Otherwise, may deadlock with watermark lock
798 ShenandoahContextEvacuateUpdateRootsClosure oops_cl;
799 ShenandoahConcurrentEvacThreadClosure<GENERATIONAL> thr_cl(&oops_cl);
800 _java_threads.threads_do(&thr_cl, worker_id);
801 }
802 };
803
804 void ShenandoahConcurrentGC::op_thread_roots() {
805 ShenandoahHeap* const heap = ShenandoahHeap::heap();
806 assert(heap->is_evacuation_in_progress(), "Checked by caller");
807 ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_thread_roots);
808 if (heap->mode()->is_generational()) {
809 ShenandoahConcurrentEvacUpdateThreadTask<true> task(heap->workers()->active_workers());
810 heap->workers()->run_task(&task);
811 } else {
812 ShenandoahConcurrentEvacUpdateThreadTask<false> task(heap->workers()->active_workers());
813 heap->workers()->run_task(&task);
814 }
815 }
816
817 void ShenandoahConcurrentGC::op_weak_refs() {
818 ShenandoahHeap* const heap = ShenandoahHeap::heap();
819 assert(heap->is_concurrent_weak_root_in_progress(), "Only during this phase");
820 // Concurrent weak refs processing
821 ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_refs);
822 if (heap->gc_cause() == GCCause::_wb_breakpoint) {
823 ShenandoahBreakpoint::at_after_reference_processing_started();
824 }
825 _generation->ref_processor()->process_references(ShenandoahPhaseTimings::conc_weak_refs, heap->workers(), true /* concurrent */);
826 }
827
828 class ShenandoahEvacUpdateCleanupOopStorageRootsClosure : public BasicOopIterateClosure {
829 private:
830 ShenandoahHeap* const _heap;
831 ShenandoahMarkingContext* const _mark_context;
832 bool _evac_in_progress;
833 Thread* const _thread;
834
835 public:
836 ShenandoahEvacUpdateCleanupOopStorageRootsClosure();
837 void do_oop(oop* p);
838 void do_oop(narrowOop* p);
839 };
840
841 ShenandoahEvacUpdateCleanupOopStorageRootsClosure::ShenandoahEvacUpdateCleanupOopStorageRootsClosure() :
842 _heap(ShenandoahHeap::heap()),
843 _mark_context(ShenandoahHeap::heap()->marking_context()),
844 _evac_in_progress(ShenandoahHeap::heap()->is_evacuation_in_progress()),
845 _thread(Thread::current()) {
846 }
847
848 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(oop* p) {
849 const oop obj = RawAccess<>::oop_load(p);
850 if (!CompressedOops::is_null(obj)) {
851 if (!_mark_context->is_marked(obj)) {
852 shenandoah_assert_generations_reconciled();
853 if (_heap->is_in_active_generation(obj)) {
854 // Note: The obj is dead here. Do not touch it, just clear.
855 ShenandoahHeap::atomic_clear_oop(p, obj);
856 }
857 } else if (_evac_in_progress && _heap->in_collection_set(obj)) {
858 oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
859 if (resolved == obj) {
860 resolved = _heap->evacuate_object(obj, _thread);
861 }
862 shenandoah_assert_not_in_cset_except(p, resolved, _heap->cancelled_gc());
863 ShenandoahHeap::atomic_update_oop(resolved, p, obj);
864 }
865 }
866 }
867
868 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(narrowOop* p) {
869 ShouldNotReachHere();
870 }
871
872 class ShenandoahIsCLDAliveClosure : public CLDClosure {
873 public:
874 void do_cld(ClassLoaderData* cld) {
875 cld->is_alive();
876 }
942 }
943 }
944 };
945
946 void ShenandoahConcurrentGC::op_weak_roots() {
947 ShenandoahHeap* const heap = ShenandoahHeap::heap();
948 assert(heap->is_concurrent_weak_root_in_progress(), "Only during this phase");
949 // Concurrent weak root processing
950 {
951 ShenandoahTimingsTracker t(ShenandoahPhaseTimings::conc_weak_roots_work);
952 ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_roots_work);
953 ShenandoahConcurrentWeakRootsEvacUpdateTask task(ShenandoahPhaseTimings::conc_weak_roots_work);
954 heap->workers()->run_task(&task);
955 }
956
957 // Perform handshake to flush out dead oops
958 {
959 ShenandoahTimingsTracker t(ShenandoahPhaseTimings::conc_weak_roots_rendezvous);
960 heap->rendezvous_threads("Shenandoah Concurrent Weak Roots");
961 }
962 // We can only toggle concurrent_weak_root_in_progress flag
963 // at a safepoint, so that mutators see a consistent
964 // value. The flag will be cleared at the next safepoint.
965 }
966
967 void ShenandoahConcurrentGC::op_class_unloading() {
968 ShenandoahHeap* const heap = ShenandoahHeap::heap();
969 assert (heap->is_concurrent_weak_root_in_progress() &&
970 heap->unload_classes(),
971 "Checked by caller");
972 heap->do_class_unloading();
973 }
974
975 class ShenandoahEvacUpdateCodeCacheClosure : public NMethodClosure {
976 private:
977 BarrierSetNMethod* const _bs;
978 ShenandoahEvacuateUpdateMetadataClosure _cl;
979
980 public:
981 ShenandoahEvacUpdateCodeCacheClosure() :
982 _bs(BarrierSet::barrier_set()->barrier_set_nmethod()),
983 _cl() {
984 }
1041 ShenandoahHeap* const heap = ShenandoahHeap::heap();
1042 assert(heap->is_concurrent_strong_root_in_progress(), "Checked by caller");
1043 ShenandoahConcurrentRootsEvacUpdateTask task(ShenandoahPhaseTimings::conc_strong_roots);
1044 heap->workers()->run_task(&task);
1045 heap->set_concurrent_strong_root_in_progress(false);
1046 }
1047
1048 void ShenandoahConcurrentGC::op_cleanup_early() {
1049 ShenandoahHeap::heap()->free_set()->recycle_trash();
1050 }
1051
1052 void ShenandoahConcurrentGC::op_evacuate() {
1053 ShenandoahHeap::heap()->evacuate_collection_set(true /*concurrent*/);
1054 }
1055
1056 void ShenandoahConcurrentGC::op_init_updaterefs() {
1057 ShenandoahHeap* const heap = ShenandoahHeap::heap();
1058 heap->set_evacuation_in_progress(false);
1059 heap->set_concurrent_weak_root_in_progress(false);
1060 heap->prepare_update_heap_references(true /*concurrent*/);
1061 heap->set_update_refs_in_progress(true);
1062 if (ShenandoahVerify) {
1063 heap->verifier()->verify_before_updaterefs();
1064 }
1065 if (ShenandoahPacing) {
1066 heap->pacer()->setup_for_updaterefs();
1067 }
1068 }
1069
1070 void ShenandoahConcurrentGC::op_updaterefs() {
1071 ShenandoahHeap::heap()->update_heap_references(true /*concurrent*/);
1072 }
1073
1074 class ShenandoahUpdateThreadClosure : public HandshakeClosure {
1075 private:
1076 // This closure runs when thread is stopped for handshake, which means
1077 // we can use non-concurrent closure here, as long as it only updates
1078 // locations modified by the thread itself, i.e. stack locations.
1079 ShenandoahNonConcUpdateRefsClosure _cl;
1080 public:
1081 ShenandoahUpdateThreadClosure();
1082 void do_thread(Thread* thread);
1083 };
1084
1092 ResourceMark rm;
1093 jt->oops_do(&_cl, nullptr);
1094 }
1095 }
1096
1097 void ShenandoahConcurrentGC::op_update_thread_roots() {
1098 ShenandoahUpdateThreadClosure cl;
1099 Handshake::execute(&cl);
1100 }
1101
1102 void ShenandoahConcurrentGC::op_final_updaterefs() {
1103 ShenandoahHeap* const heap = ShenandoahHeap::heap();
1104 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
1105 assert(!heap->_update_refs_iterator.has_next(), "Should have finished update references");
1106
1107 heap->finish_concurrent_roots();
1108
1109 // Clear cancelled GC, if set. On cancellation path, the block before would handle
1110 // everything.
1111 if (heap->cancelled_gc()) {
1112 heap->clear_cancelled_gc(true /* clear oom handler */);
1113 }
1114
1115 // Has to be done before cset is clear
1116 if (ShenandoahVerify) {
1117 heap->verifier()->verify_roots_in_to_space();
1118 }
1119
1120 // If we are running in generational mode and this is an aging cycle, this will also age active
1121 // regions that haven't been used for allocation.
1122 heap->update_heap_region_states(true /*concurrent*/);
1123
1124 heap->set_update_refs_in_progress(false);
1125 heap->set_has_forwarded_objects(false);
1126
1127 if (heap->mode()->is_generational() && heap->is_concurrent_old_mark_in_progress()) {
1128 // When the SATB barrier is left on to support concurrent old gen mark, it may pick up writes to
1129 // objects in the collection set. After those objects are evacuated, the pointers in the
1130 // SATB are no longer safe. Once we have finished update references, we are guaranteed that
1131 // no more writes to the collection set are possible.
1132 //
1133 // This will transfer any old pointers in _active_ regions from the SATB to the old gen
1134 // mark queues. All other pointers will be discarded. This would also discard any pointers
1135 // in old regions that were included in a mixed evacuation. We aren't using the SATB filter
1136 // methods here because we cannot control when they execute. If the SATB filter runs _after_
1137 // a region has been recycled, we will not be able to detect the bad pointer.
1138 //
1139 // We are not concerned about skipping this step in abbreviated cycles because regions
1140 // with no live objects cannot have been written to and so cannot have entries in the SATB
1141 // buffers.
1142 heap->old_generation()->transfer_pointers_from_satb();
1143
1144 // Aging_cycle is only relevant during evacuation cycle for individual objects and during final mark for
1145 // entire regions. Both of these relevant operations occur before final update refs.
1146 ShenandoahGenerationalHeap::heap()->set_aging_cycle(false);
1147 }
1148
1149 if (ShenandoahVerify) {
1150 heap->verifier()->verify_after_updaterefs();
1151 }
1152
1153 if (VerifyAfterGC) {
1154 Universe::verify();
1155 }
1156
1157 heap->rebuild_free_set(true /*concurrent*/);
1158 }
1159
1160 void ShenandoahConcurrentGC::op_final_roots() {
1161
1162 ShenandoahHeap *heap = ShenandoahHeap::heap();
1163 heap->set_concurrent_weak_root_in_progress(false);
1164 heap->set_evacuation_in_progress(false);
1165
1166 if (heap->mode()->is_generational()) {
1167 // If the cycle was shortened for having enough immediate garbage, this could be
1168 // the last GC safepoint before concurrent marking of old resumes. We must be sure
1169 // that old mark threads don't see any pointers to garbage in the SATB buffers.
1170 if (heap->is_concurrent_old_mark_in_progress()) {
1171 heap->old_generation()->transfer_pointers_from_satb();
1172 }
1173
1174 if (!_generation->is_old()) {
1175 ShenandoahGenerationalHeap::heap()->update_region_ages(_generation->complete_marking_context());
1176 }
1177 }
1178 }
1179
1180 void ShenandoahConcurrentGC::op_cleanup_complete() {
1181 ShenandoahHeap::heap()->free_set()->recycle_trash();
1182 }
1183
1184 bool ShenandoahConcurrentGC::check_cancellation_and_abort(ShenandoahDegenPoint point) {
1185 if (ShenandoahHeap::heap()->cancelled_gc()) {
1186 _degen_point = point;
1187 return true;
1188 }
1189 return false;
1190 }
1191
1192 const char* ShenandoahConcurrentGC::init_mark_event_message() const {
1193 ShenandoahHeap* const heap = ShenandoahHeap::heap();
1194 assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here");
1195 if (heap->unload_classes()) {
1196 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Init Mark", " (unload classes)");
1197 } else {
1198 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Init Mark", "");
1199 }
1200 }
1201
1202 const char* ShenandoahConcurrentGC::final_mark_event_message() const {
1203 ShenandoahHeap* const heap = ShenandoahHeap::heap();
1204 assert(!heap->has_forwarded_objects() || heap->is_concurrent_old_mark_in_progress(),
1205 "Should not have forwarded objects during final mark, unless old gen concurrent mark is running");
1206
1207 if (heap->unload_classes()) {
1208 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Final Mark", " (unload classes)");
1209 } else {
1210 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Final Mark", "");
1211 }
1212 }
1213
1214 const char* ShenandoahConcurrentGC::conc_mark_event_message() const {
1215 ShenandoahHeap* const heap = ShenandoahHeap::heap();
1216 assert(!heap->has_forwarded_objects() || heap->is_concurrent_old_mark_in_progress(),
1217 "Should not have forwarded objects concurrent mark, unless old gen concurrent mark is running");
1218 if (heap->unload_classes()) {
1219 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent marking", " (unload classes)");
1220 } else {
1221 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent marking", "");
1222 }
1223 }
1224
1225 const char* ShenandoahConcurrentGC::conc_reset_event_message() const {
1226 if (ShenandoahHeap::heap()->unload_classes()) {
1227 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent reset", " (unload classes)");
1228 } else {
1229 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent reset", "");
1230 }
1231 }
1232
1233 const char* ShenandoahConcurrentGC::final_roots_event_message() const {
1234 if (ShenandoahHeap::heap()->unload_classes()) {
1235 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Final Roots", " (unload classes)");
1236 } else {
1237 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Final Roots", "");
1238 }
1239 }
1240
1241 const char* ShenandoahConcurrentGC::conc_weak_refs_event_message() const {
1242 if (ShenandoahHeap::heap()->unload_classes()) {
1243 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent weak references", " (unload classes)");
1244 } else {
1245 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent weak references", "");
1246 }
1247 }
1248
1249 const char* ShenandoahConcurrentGC::conc_weak_roots_event_message() const {
1250 if (ShenandoahHeap::heap()->unload_classes()) {
1251 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent weak roots", " (unload classes)");
1252 } else {
1253 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent weak roots", "");
1254 }
1255 }
1256
1257 const char* ShenandoahConcurrentGC::conc_cleanup_event_message() const {
1258 if (ShenandoahHeap::heap()->unload_classes()) {
1259 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent cleanup", " (unload classes)");
1260 } else {
1261 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent cleanup", "");
1262 }
1263 }
|