1 /*
2 * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2021, 2022, Red Hat, Inc. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27
28 #include "gc/shared/barrierSetNMethod.hpp"
29 #include "gc/shared/collectorCounters.hpp"
30 #include "gc/shared/continuationGCSupport.inline.hpp"
31 #include "gc/shenandoah/shenandoahBreakpoint.hpp"
32 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
33 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
34 #include "gc/shenandoah/shenandoahFreeSet.hpp"
35 #include "gc/shenandoah/shenandoahLock.hpp"
36 #include "gc/shenandoah/shenandoahMark.inline.hpp"
37 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
38 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
39 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
40 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
41 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
42 #include "gc/shenandoah/shenandoahStackWatermark.hpp"
43 #include "gc/shenandoah/shenandoahUtils.hpp"
44 #include "gc/shenandoah/shenandoahVerifier.hpp"
45 #include "gc/shenandoah/shenandoahVMOperations.hpp"
46 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
47 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
48 #include "memory/allocation.hpp"
49 #include "prims/jvmtiTagMap.hpp"
50 #include "runtime/vmThread.hpp"
51 #include "utilities/events.hpp"
52
53 // Breakpoint support
54 class ShenandoahBreakpointGCScope : public StackObj {
69 }
70 };
71
72 class ShenandoahBreakpointMarkScope : public StackObj {
73 private:
74 const GCCause::Cause _cause;
75 public:
76 ShenandoahBreakpointMarkScope(GCCause::Cause cause) : _cause(cause) {
77 if (_cause == GCCause::_wb_breakpoint) {
78 ShenandoahBreakpoint::at_after_marking_started();
79 }
80 }
81
82 ~ShenandoahBreakpointMarkScope() {
83 if (_cause == GCCause::_wb_breakpoint) {
84 ShenandoahBreakpoint::at_before_marking_completed();
85 }
86 }
87 };
88
89 ShenandoahConcurrentGC::ShenandoahConcurrentGC() :
90 _mark(),
91 _degen_point(ShenandoahDegenPoint::_degenerated_unset) {
92 }
93
94 ShenandoahGC::ShenandoahDegenPoint ShenandoahConcurrentGC::degen_point() const {
95 return _degen_point;
96 }
97
98 void ShenandoahConcurrentGC::cancel() {
99 ShenandoahConcurrentMark::cancel();
100 }
101
102 bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) {
103 ShenandoahHeap* const heap = ShenandoahHeap::heap();
104 ShenandoahBreakpointGCScope breakpoint_gc_scope(cause);
105
106 // Reset for upcoming marking
107 entry_reset();
108
109 // Start initial mark under STW
110 vmop_entry_init_mark();
111
112 {
113 ShenandoahBreakpointMarkScope breakpoint_mark_scope(cause);
114 // Concurrent mark roots
115 entry_mark_roots();
116 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_outside_cycle)) return false;
117
118 // Continue concurrent mark
119 entry_mark();
120 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_mark)) return false;
121 }
122
123 // Complete marking under STW, and start evacuation
124 vmop_entry_final_mark();
125
126 // Concurrent stack processing
127 if (heap->is_evacuation_in_progress()) {
128 entry_thread_roots();
129 }
130
131 // Process weak roots that might still point to regions that would be broken by cleanup
132 if (heap->is_concurrent_weak_root_in_progress()) {
133 entry_weak_refs();
134 entry_weak_roots();
135 }
136
137 // Final mark might have reclaimed some immediate garbage, kick cleanup to reclaim
138 // the space. This would be the last action if there is nothing to evacuate.
139 entry_cleanup_early();
140
141 {
142 ShenandoahHeapLocker locker(heap->lock());
143 heap->free_set()->log_status();
144 }
145
146 // Perform concurrent class unloading
147 if (heap->unload_classes() &&
148 heap->is_concurrent_weak_root_in_progress()) {
149 entry_class_unloading();
150 }
151
152 // Processing strong roots
153 // This may be skipped if there is nothing to update/evacuate.
154 // If so, strong_root_in_progress would be unset.
155 if (heap->is_concurrent_strong_root_in_progress()) {
156 entry_strong_roots();
157 }
158
159 // Continue the cycle with evacuation and optional update-refs.
160 // This may be skipped if there is nothing to evacuate.
161 // If so, evac_in_progress would be unset by collection set preparation code.
162 if (heap->is_evacuation_in_progress()) {
163 // Concurrently evacuate
164 entry_evacuate();
165 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_evac)) return false;
166
167 // Perform update-refs phase.
168 vmop_entry_init_updaterefs();
169 entry_updaterefs();
170 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_updaterefs)) return false;
171
172 // Concurrent update thread roots
173 entry_update_thread_roots();
174 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_updaterefs)) return false;
175
176 vmop_entry_final_updaterefs();
177
178 // Update references freed up collection set, kick the cleanup to reclaim the space.
179 entry_cleanup_complete();
180 } else {
181 vmop_entry_final_roots();
182 }
183
184 return true;
185 }
186
187 void ShenandoahConcurrentGC::vmop_entry_init_mark() {
188 ShenandoahHeap* const heap = ShenandoahHeap::heap();
189 TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
190 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_mark_gross);
191
192 heap->try_inject_alloc_failure();
193 VM_ShenandoahInitMark op(this);
194 VMThread::execute(&op); // jump to entry_init_mark() under safepoint
195 }
196
197 void ShenandoahConcurrentGC::vmop_entry_final_mark() {
198 ShenandoahHeap* const heap = ShenandoahHeap::heap();
199 TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
200 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_mark_gross);
201
202 heap->try_inject_alloc_failure();
203 VM_ShenandoahFinalMarkStartEvac op(this);
264 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::init_update_refs);
265 EventMark em("%s", msg);
266
267 // No workers used in this phase, no setup required
268 op_init_updaterefs();
269 }
270
271 void ShenandoahConcurrentGC::entry_final_updaterefs() {
272 static const char* msg = "Pause Final Update Refs";
273 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_update_refs);
274 EventMark em("%s", msg);
275
276 ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
277 ShenandoahWorkerPolicy::calc_workers_for_final_update_ref(),
278 "final reference update");
279
280 op_final_updaterefs();
281 }
282
283 void ShenandoahConcurrentGC::entry_final_roots() {
284 static const char* msg = "Pause Final Roots";
285 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_roots);
286 EventMark em("%s", msg);
287
288 op_final_roots();
289 }
290
291 void ShenandoahConcurrentGC::entry_reset() {
292 ShenandoahHeap* const heap = ShenandoahHeap::heap();
293 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
294 static const char* msg = "Concurrent reset";
295 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_reset);
296 EventMark em("%s", msg);
297
298 ShenandoahWorkerScope scope(heap->workers(),
299 ShenandoahWorkerPolicy::calc_workers_for_conc_reset(),
300 "concurrent reset");
301
302 heap->try_inject_alloc_failure();
303 op_reset();
304 }
305
306 void ShenandoahConcurrentGC::entry_mark_roots() {
307 ShenandoahHeap* const heap = ShenandoahHeap::heap();
308 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
309 const char* msg = "Concurrent marking roots";
310 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_mark_roots);
311 EventMark em("%s", msg);
312
313 ShenandoahWorkerScope scope(heap->workers(),
314 ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
315 "concurrent marking roots");
316
317 heap->try_inject_alloc_failure();
318 op_mark_roots();
319 }
320
321 void ShenandoahConcurrentGC::entry_mark() {
322 ShenandoahHeap* const heap = ShenandoahHeap::heap();
323 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
332 heap->try_inject_alloc_failure();
333 op_mark();
334 }
335
336 void ShenandoahConcurrentGC::entry_thread_roots() {
337 ShenandoahHeap* const heap = ShenandoahHeap::heap();
338 static const char* msg = "Concurrent thread roots";
339 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_thread_roots);
340 EventMark em("%s", msg);
341
342 ShenandoahWorkerScope scope(heap->workers(),
343 ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
344 msg);
345
346 heap->try_inject_alloc_failure();
347 op_thread_roots();
348 }
349
350 void ShenandoahConcurrentGC::entry_weak_refs() {
351 ShenandoahHeap* const heap = ShenandoahHeap::heap();
352 static const char* msg = "Concurrent weak references";
353 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_weak_refs);
354 EventMark em("%s", msg);
355
356 ShenandoahWorkerScope scope(heap->workers(),
357 ShenandoahWorkerPolicy::calc_workers_for_conc_refs_processing(),
358 "concurrent weak references");
359
360 heap->try_inject_alloc_failure();
361 op_weak_refs();
362 }
363
364 void ShenandoahConcurrentGC::entry_weak_roots() {
365 ShenandoahHeap* const heap = ShenandoahHeap::heap();
366 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
367 static const char* msg = "Concurrent weak roots";
368 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_weak_roots);
369 EventMark em("%s", msg);
370
371 ShenandoahWorkerScope scope(heap->workers(),
372 ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
373 "concurrent weak root");
374
375 heap->try_inject_alloc_failure();
376 op_weak_roots();
377 }
378
379 void ShenandoahConcurrentGC::entry_class_unloading() {
380 ShenandoahHeap* const heap = ShenandoahHeap::heap();
381 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
382 static const char* msg = "Concurrent class unloading";
383 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_class_unload);
384 EventMark em("%s", msg);
385
386 ShenandoahWorkerScope scope(heap->workers(),
387 ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
394 void ShenandoahConcurrentGC::entry_strong_roots() {
395 ShenandoahHeap* const heap = ShenandoahHeap::heap();
396 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
397 static const char* msg = "Concurrent strong roots";
398 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_strong_roots);
399 EventMark em("%s", msg);
400
401 ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_strong_roots);
402
403 ShenandoahWorkerScope scope(heap->workers(),
404 ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
405 "concurrent strong root");
406
407 heap->try_inject_alloc_failure();
408 op_strong_roots();
409 }
410
411 void ShenandoahConcurrentGC::entry_cleanup_early() {
412 ShenandoahHeap* const heap = ShenandoahHeap::heap();
413 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
414 static const char* msg = "Concurrent cleanup";
415 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_cleanup_early, true /* log_heap_usage */);
416 EventMark em("%s", msg);
417
418 // This phase does not use workers, no need for setup
419 heap->try_inject_alloc_failure();
420 op_cleanup_early();
421 }
422
423 void ShenandoahConcurrentGC::entry_evacuate() {
424 ShenandoahHeap* const heap = ShenandoahHeap::heap();
425 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
426
427 static const char* msg = "Concurrent evacuation";
428 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_evac);
429 EventMark em("%s", msg);
430
431 ShenandoahWorkerScope scope(heap->workers(),
432 ShenandoahWorkerPolicy::calc_workers_for_conc_evac(),
433 "concurrent evacuation");
434
435 heap->try_inject_alloc_failure();
436 op_evacuate();
437 }
438
439 void ShenandoahConcurrentGC::entry_update_thread_roots() {
440 ShenandoahHeap* const heap = ShenandoahHeap::heap();
441 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
442
443 static const char* msg = "Concurrent update thread roots";
444 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_update_thread_roots);
445 EventMark em("%s", msg);
446
447 // No workers used in this phase, no setup required
448 heap->try_inject_alloc_failure();
449 op_update_thread_roots();
450 }
451
452 void ShenandoahConcurrentGC::entry_updaterefs() {
453 ShenandoahHeap* const heap = ShenandoahHeap::heap();
454 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
455 static const char* msg = "Concurrent update references";
456 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_update_refs);
457 EventMark em("%s", msg);
458
459 ShenandoahWorkerScope scope(heap->workers(),
460 ShenandoahWorkerPolicy::calc_workers_for_conc_update_ref(),
461 "concurrent reference update");
462
463 heap->try_inject_alloc_failure();
464 op_updaterefs();
465 }
466
467 void ShenandoahConcurrentGC::entry_cleanup_complete() {
468 ShenandoahHeap* const heap = ShenandoahHeap::heap();
469 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
470 static const char* msg = "Concurrent cleanup";
471 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_cleanup_complete, true /* log_heap_usage */);
472 EventMark em("%s", msg);
473
474 // This phase does not use workers, no need for setup
475 heap->try_inject_alloc_failure();
476 op_cleanup_complete();
477 }
478
479 void ShenandoahConcurrentGC::op_reset() {
480 ShenandoahHeap* const heap = ShenandoahHeap::heap();
481 if (ShenandoahPacing) {
482 heap->pacer()->setup_for_reset();
483 }
484
485 heap->prepare_gc();
486 }
487
488 class ShenandoahInitMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
489 private:
490 ShenandoahMarkingContext* const _ctx;
491 public:
492 ShenandoahInitMarkUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
493
494 void heap_region_do(ShenandoahHeapRegion* r) {
495 assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index());
496 if (r->is_active()) {
497 // Check if region needs updating its TAMS. We have updated it already during concurrent
498 // reset, so it is very likely we don't need to do another write here.
499 if (_ctx->top_at_mark_start(r) != r->top()) {
500 _ctx->capture_top_at_mark_start(r);
501 }
502 } else {
503 assert(_ctx->top_at_mark_start(r) == r->top(),
504 "Region " SIZE_FORMAT " should already have correct TAMS", r->index());
505 }
506 }
507
508 bool is_thread_safe() { return true; }
509 };
510
511 void ShenandoahConcurrentGC::start_mark() {
512 _mark.start_mark();
513 }
514
515 void ShenandoahConcurrentGC::op_init_mark() {
516 ShenandoahHeap* const heap = ShenandoahHeap::heap();
517 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
518 assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
519
520 assert(heap->marking_context()->is_bitmap_clear(), "need clear marking bitmap");
521 assert(!heap->marking_context()->is_complete(), "should not be complete");
522 assert(!heap->has_forwarded_objects(), "No forwarded objects on this path");
523
524 if (ShenandoahVerify) {
525 heap->verifier()->verify_before_concmark();
526 }
527
528 if (VerifyBeforeGC) {
529 Universe::verify();
530 }
531
532 heap->set_concurrent_mark_in_progress(true);
533
534 start_mark();
535
536 {
537 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states);
538 ShenandoahInitMarkUpdateRegionStateClosure cl;
539 heap->parallel_heap_region_iterate(&cl);
540 }
541
542 // Weak reference processing
543 ShenandoahReferenceProcessor* rp = heap->ref_processor();
544 rp->reset_thread_locals();
545 rp->set_soft_reference_policy(heap->soft_ref_policy()->should_clear_all_soft_refs());
546
547 // Make above changes visible to worker threads
548 OrderAccess::fence();
549
550 // Arm nmethods for concurrent mark
551 ShenandoahCodeRoots::arm_nmethods_for_mark();
552
553 ShenandoahStackWatermark::change_epoch_id();
554 if (ShenandoahPacing) {
555 heap->pacer()->setup_for_mark();
556 }
557 }
558
559 void ShenandoahConcurrentGC::op_mark_roots() {
560 _mark.mark_concurrent_roots();
561 }
562
563 void ShenandoahConcurrentGC::op_mark() {
564 _mark.concurrent_mark();
565 }
566
567 void ShenandoahConcurrentGC::op_final_mark() {
568 ShenandoahHeap* const heap = ShenandoahHeap::heap();
569 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
570 assert(!heap->has_forwarded_objects(), "No forwarded objects on this path");
571
572 if (ShenandoahVerify) {
573 heap->verifier()->verify_roots_no_forwarded();
574 }
575
576 if (!heap->cancelled_gc()) {
577 _mark.finish_mark();
578 assert(!heap->cancelled_gc(), "STW mark cannot OOM");
579
580 // Notify JVMTI that the tagmap table will need cleaning.
581 JvmtiTagMap::set_needs_cleaning();
582
583 heap->prepare_regions_and_collection_set(true /*concurrent*/);
584
585 // Has to be done after cset selection
586 heap->prepare_concurrent_roots();
587
588 if (!heap->collection_set()->is_empty()) {
589 if (ShenandoahVerify) {
590 heap->verifier()->verify_before_evacuation();
591 }
592
593 heap->set_evacuation_in_progress(true);
594 // From here on, we need to update references.
595 heap->set_has_forwarded_objects(true);
596
597 // Verify before arming for concurrent processing.
598 // Otherwise, verification can trigger stack processing.
599 if (ShenandoahVerify) {
600 heap->verifier()->verify_during_evacuation();
601 }
602
603 // Arm nmethods/stack for concurrent processing
604 ShenandoahCodeRoots::arm_nmethods_for_evac();
605 ShenandoahStackWatermark::change_epoch_id();
606
607 if (ShenandoahPacing) {
608 heap->pacer()->setup_for_evac();
609 }
610 } else {
611 if (ShenandoahVerify) {
612 heap->verifier()->verify_after_concmark();
613 }
614
615 if (VerifyAfterGC) {
616 Universe::verify();
617 }
618 }
619 }
620 }
621
622 class ShenandoahConcurrentEvacThreadClosure : public ThreadClosure {
623 private:
624 OopClosure* const _oops;
625
626 public:
627 ShenandoahConcurrentEvacThreadClosure(OopClosure* oops);
628 void do_thread(Thread* thread);
629 };
630
631 ShenandoahConcurrentEvacThreadClosure::ShenandoahConcurrentEvacThreadClosure(OopClosure* oops) :
632 _oops(oops) {
633 }
634
635 void ShenandoahConcurrentEvacThreadClosure::do_thread(Thread* thread) {
636 JavaThread* const jt = JavaThread::cast(thread);
637 StackWatermarkSet::finish_processing(jt, _oops, StackWatermarkKind::gc);
638 }
639
640 class ShenandoahConcurrentEvacUpdateThreadTask : public WorkerTask {
641 private:
642 ShenandoahJavaThreadsIterator _java_threads;
643
644 public:
645 ShenandoahConcurrentEvacUpdateThreadTask(uint n_workers) :
646 WorkerTask("Shenandoah Evacuate/Update Concurrent Thread Roots"),
647 _java_threads(ShenandoahPhaseTimings::conc_thread_roots, n_workers) {
648 }
649
650 void work(uint worker_id) {
651 // ShenandoahEvacOOMScope has to be setup by ShenandoahContextEvacuateUpdateRootsClosure.
652 // Otherwise, may deadlock with watermark lock
653 ShenandoahContextEvacuateUpdateRootsClosure oops_cl;
654 ShenandoahConcurrentEvacThreadClosure thr_cl(&oops_cl);
655 _java_threads.threads_do(&thr_cl, worker_id);
656 }
657 };
658
659 void ShenandoahConcurrentGC::op_thread_roots() {
660 ShenandoahHeap* const heap = ShenandoahHeap::heap();
661 assert(heap->is_evacuation_in_progress(), "Checked by caller");
662 ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_thread_roots);
663 ShenandoahConcurrentEvacUpdateThreadTask task(heap->workers()->active_workers());
664 heap->workers()->run_task(&task);
665 }
666
667 void ShenandoahConcurrentGC::op_weak_refs() {
668 ShenandoahHeap* const heap = ShenandoahHeap::heap();
669 assert(heap->is_concurrent_weak_root_in_progress(), "Only during this phase");
670 // Concurrent weak refs processing
671 ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_refs);
672 if (heap->gc_cause() == GCCause::_wb_breakpoint) {
673 ShenandoahBreakpoint::at_after_reference_processing_started();
674 }
675 heap->ref_processor()->process_references(ShenandoahPhaseTimings::conc_weak_refs, heap->workers(), true /* concurrent */);
676 }
677
678 class ShenandoahEvacUpdateCleanupOopStorageRootsClosure : public BasicOopIterateClosure {
679 private:
680 ShenandoahHeap* const _heap;
681 ShenandoahMarkingContext* const _mark_context;
682 bool _evac_in_progress;
683 Thread* const _thread;
684
685 public:
686 ShenandoahEvacUpdateCleanupOopStorageRootsClosure();
687 void do_oop(oop* p);
688 void do_oop(narrowOop* p);
689 };
690
691 ShenandoahEvacUpdateCleanupOopStorageRootsClosure::ShenandoahEvacUpdateCleanupOopStorageRootsClosure() :
692 _heap(ShenandoahHeap::heap()),
693 _mark_context(ShenandoahHeap::heap()->marking_context()),
694 _evac_in_progress(ShenandoahHeap::heap()->is_evacuation_in_progress()),
695 _thread(Thread::current()) {
696 }
697
698 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(oop* p) {
699 const oop obj = RawAccess<>::oop_load(p);
700 if (!CompressedOops::is_null(obj)) {
701 if (!_mark_context->is_marked(obj)) {
702 shenandoah_assert_correct(p, obj);
703 ShenandoahHeap::atomic_clear_oop(p, obj);
704 } else if (_evac_in_progress && _heap->in_collection_set(obj)) {
705 oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
706 if (resolved == obj) {
707 resolved = _heap->evacuate_object(obj, _thread);
708 }
709 ShenandoahHeap::atomic_update_oop(resolved, p, obj);
710 assert(_heap->cancelled_gc() ||
711 _mark_context->is_marked(resolved) && !_heap->in_collection_set(resolved),
712 "Sanity");
713 }
714 }
715 }
716
717 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(narrowOop* p) {
718 ShouldNotReachHere();
719 }
720
721 class ShenandoahIsCLDAliveClosure : public CLDClosure {
722 public:
723 void do_cld(ClassLoaderData* cld) {
724 cld->is_alive();
725 }
726 };
727
728 class ShenandoahIsNMethodAliveClosure: public NMethodClosure {
729 public:
730 void do_nmethod(nmethod* n) {
731 n->is_unloading();
732 }
791 }
792 }
793 };
794
795 void ShenandoahConcurrentGC::op_weak_roots() {
796 ShenandoahHeap* const heap = ShenandoahHeap::heap();
797 assert(heap->is_concurrent_weak_root_in_progress(), "Only during this phase");
798 // Concurrent weak root processing
799 {
800 ShenandoahTimingsTracker t(ShenandoahPhaseTimings::conc_weak_roots_work);
801 ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_roots_work);
802 ShenandoahConcurrentWeakRootsEvacUpdateTask task(ShenandoahPhaseTimings::conc_weak_roots_work);
803 heap->workers()->run_task(&task);
804 }
805
806 // Perform handshake to flush out dead oops
807 {
808 ShenandoahTimingsTracker t(ShenandoahPhaseTimings::conc_weak_roots_rendezvous);
809 heap->rendezvous_threads();
810 }
811 }
812
813 void ShenandoahConcurrentGC::op_class_unloading() {
814 ShenandoahHeap* const heap = ShenandoahHeap::heap();
815 assert (heap->is_concurrent_weak_root_in_progress() &&
816 heap->unload_classes(),
817 "Checked by caller");
818 heap->do_class_unloading();
819 }
820
821 class ShenandoahEvacUpdateCodeCacheClosure : public NMethodClosure {
822 private:
823 BarrierSetNMethod* const _bs;
824 ShenandoahEvacuateUpdateMetadataClosure _cl;
825
826 public:
827 ShenandoahEvacUpdateCodeCacheClosure() :
828 _bs(BarrierSet::barrier_set()->barrier_set_nmethod()),
829 _cl() {
830 }
887 ShenandoahHeap* const heap = ShenandoahHeap::heap();
888 assert(heap->is_concurrent_strong_root_in_progress(), "Checked by caller");
889 ShenandoahConcurrentRootsEvacUpdateTask task(ShenandoahPhaseTimings::conc_strong_roots);
890 heap->workers()->run_task(&task);
891 heap->set_concurrent_strong_root_in_progress(false);
892 }
893
894 void ShenandoahConcurrentGC::op_cleanup_early() {
895 ShenandoahHeap::heap()->free_set()->recycle_trash();
896 }
897
898 void ShenandoahConcurrentGC::op_evacuate() {
899 ShenandoahHeap::heap()->evacuate_collection_set(true /*concurrent*/);
900 }
901
902 void ShenandoahConcurrentGC::op_init_updaterefs() {
903 ShenandoahHeap* const heap = ShenandoahHeap::heap();
904 heap->set_evacuation_in_progress(false);
905 heap->set_concurrent_weak_root_in_progress(false);
906 heap->prepare_update_heap_references(true /*concurrent*/);
907 heap->set_update_refs_in_progress(true);
908
909 if (ShenandoahPacing) {
910 heap->pacer()->setup_for_updaterefs();
911 }
912 }
913
914 void ShenandoahConcurrentGC::op_updaterefs() {
915 ShenandoahHeap::heap()->update_heap_references(true /*concurrent*/);
916 }
917
918 class ShenandoahUpdateThreadClosure : public HandshakeClosure {
919 private:
920 ShenandoahUpdateRefsClosure _cl;
921 public:
922 ShenandoahUpdateThreadClosure();
923 void do_thread(Thread* thread);
924 };
925
926 ShenandoahUpdateThreadClosure::ShenandoahUpdateThreadClosure() :
927 HandshakeClosure("Shenandoah Update Thread Roots") {
928 }
933 ResourceMark rm;
934 jt->oops_do(&_cl, nullptr);
935 }
936 }
937
938 void ShenandoahConcurrentGC::op_update_thread_roots() {
939 ShenandoahUpdateThreadClosure cl;
940 Handshake::execute(&cl);
941 }
942
943 void ShenandoahConcurrentGC::op_final_updaterefs() {
944 ShenandoahHeap* const heap = ShenandoahHeap::heap();
945 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
946 assert(!heap->_update_refs_iterator.has_next(), "Should have finished update references");
947
948 heap->finish_concurrent_roots();
949
950 // Clear cancelled GC, if set. On cancellation path, the block before would handle
951 // everything.
952 if (heap->cancelled_gc()) {
953 heap->clear_cancelled_gc();
954 }
955
956 // Has to be done before cset is clear
957 if (ShenandoahVerify) {
958 heap->verifier()->verify_roots_in_to_space();
959 }
960
961 heap->update_heap_region_states(true /*concurrent*/);
962
963 heap->set_update_refs_in_progress(false);
964 heap->set_has_forwarded_objects(false);
965
966 if (ShenandoahVerify) {
967 heap->verifier()->verify_after_updaterefs();
968 }
969
970 if (VerifyAfterGC) {
971 Universe::verify();
972 }
973
974 heap->rebuild_free_set(true /*concurrent*/);
975 }
976
977 void ShenandoahConcurrentGC::op_final_roots() {
978 ShenandoahHeap::heap()->set_concurrent_weak_root_in_progress(false);
979 }
980
981 void ShenandoahConcurrentGC::op_cleanup_complete() {
982 ShenandoahHeap::heap()->free_set()->recycle_trash();
983 }
984
985 bool ShenandoahConcurrentGC::check_cancellation_and_abort(ShenandoahDegenPoint point) {
986 if (ShenandoahHeap::heap()->cancelled_gc()) {
987 _degen_point = point;
988 return true;
989 }
990 return false;
991 }
992
993 const char* ShenandoahConcurrentGC::init_mark_event_message() const {
994 ShenandoahHeap* const heap = ShenandoahHeap::heap();
995 assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here");
996 if (heap->unload_classes()) {
997 return "Pause Init Mark (unload classes)";
998 } else {
999 return "Pause Init Mark";
1000 }
1001 }
1002
1003 const char* ShenandoahConcurrentGC::final_mark_event_message() const {
1004 ShenandoahHeap* const heap = ShenandoahHeap::heap();
1005 assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here");
1006 if (heap->unload_classes()) {
1007 return "Pause Final Mark (unload classes)";
1008 } else {
1009 return "Pause Final Mark";
1010 }
1011 }
1012
1013 const char* ShenandoahConcurrentGC::conc_mark_event_message() const {
1014 ShenandoahHeap* const heap = ShenandoahHeap::heap();
1015 assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here");
1016 if (heap->unload_classes()) {
1017 return "Concurrent marking (unload classes)";
1018 } else {
1019 return "Concurrent marking";
1020 }
1021 }
|
1 /*
2 * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2021, 2022, Red Hat, Inc. All rights reserved.
4 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
5 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 *
7 * This code is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 only, as
9 * published by the Free Software Foundation.
10 *
11 * This code is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * version 2 for more details (a copy is included in the LICENSE file that
15 * accompanied this code).
16 *
17 * You should have received a copy of the GNU General Public License version
18 * 2 along with this work; if not, write to the Free Software Foundation,
19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20 *
21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
22 * or visit www.oracle.com if you need additional information or have any
23 * questions.
24 *
25 */
26
27 #include "precompiled.hpp"
28
29 #include "gc/shared/barrierSetNMethod.hpp"
30 #include "gc/shared/collectorCounters.hpp"
31 #include "gc/shared/continuationGCSupport.inline.hpp"
32 #include "gc/shenandoah/shenandoahBreakpoint.hpp"
33 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
34 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
35 #include "gc/shenandoah/shenandoahFreeSet.hpp"
36 #include "gc/shenandoah/shenandoahGeneration.hpp"
37 #include "gc/shenandoah/shenandoahGenerationalHeap.hpp"
38 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
39 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
40 #include "gc/shenandoah/shenandoahLock.hpp"
41 #include "gc/shenandoah/shenandoahMark.inline.hpp"
42 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
43 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
44 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
45 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
46 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
47 #include "gc/shenandoah/shenandoahStackWatermark.hpp"
48 #include "gc/shenandoah/shenandoahUtils.hpp"
49 #include "gc/shenandoah/shenandoahVerifier.hpp"
50 #include "gc/shenandoah/shenandoahVMOperations.hpp"
51 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
52 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
53 #include "memory/allocation.hpp"
54 #include "prims/jvmtiTagMap.hpp"
55 #include "runtime/vmThread.hpp"
56 #include "utilities/events.hpp"
57
58 // Breakpoint support
59 class ShenandoahBreakpointGCScope : public StackObj {
74 }
75 };
76
77 class ShenandoahBreakpointMarkScope : public StackObj {
78 private:
79 const GCCause::Cause _cause;
80 public:
81 ShenandoahBreakpointMarkScope(GCCause::Cause cause) : _cause(cause) {
82 if (_cause == GCCause::_wb_breakpoint) {
83 ShenandoahBreakpoint::at_after_marking_started();
84 }
85 }
86
87 ~ShenandoahBreakpointMarkScope() {
88 if (_cause == GCCause::_wb_breakpoint) {
89 ShenandoahBreakpoint::at_before_marking_completed();
90 }
91 }
92 };
93
94 ShenandoahConcurrentGC::ShenandoahConcurrentGC(ShenandoahGeneration* generation, bool do_old_gc_bootstrap) :
95 _mark(generation),
96 _generation(generation),
97 _degen_point(ShenandoahDegenPoint::_degenerated_unset),
98 _abbreviated(false),
99 _do_old_gc_bootstrap(do_old_gc_bootstrap) {
100 }
101
102 ShenandoahGC::ShenandoahDegenPoint ShenandoahConcurrentGC::degen_point() const {
103 return _degen_point;
104 }
105
106 bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) {
107 ShenandoahHeap* const heap = ShenandoahHeap::heap();
108
109 ShenandoahBreakpointGCScope breakpoint_gc_scope(cause);
110
111 // Reset for upcoming marking
112 entry_reset();
113
114 // Start initial mark under STW
115 vmop_entry_init_mark();
116
117 {
118 ShenandoahBreakpointMarkScope breakpoint_mark_scope(cause);
119
120 // Reset task queue stats here, rather than in mark_concurrent_roots,
121 // because remembered set scan will `push` oops into the queues and
122 // resetting after this happens will lose those counts.
123 TASKQUEUE_STATS_ONLY(_mark.task_queues()->reset_taskqueue_stats());
124
125 // Concurrent remembered set scanning
126 entry_scan_remembered_set();
127
128 // Concurrent mark roots
129 entry_mark_roots();
130 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_roots)) {
131 return false;
132 }
133
134 // Continue concurrent mark
135 entry_mark();
136 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_mark)) {
137 return false;
138 }
139 }
140
141 // Complete marking under STW, and start evacuation
142 vmop_entry_final_mark();
143
144 // If the GC was cancelled just before final mark (but after the preceding cancellation check),
145 // then the safepoint operation will do nothing and the concurrent mark will still be in progress.
146 // In this case it is safe (and necessary) to resume the degenerated cycle from the marking phase.
147 //
148 // On the other hand, if the GC is cancelled after final mark (but before this check), then the
149 // final mark safepoint operation will have finished the mark (setting concurrent mark in progress
150 // to false). In this case (final mark has completed), we need control to fall past the next
151 // cancellation check and resume the degenerated cycle from the evacuation phase.
152 if (_generation->is_concurrent_mark_in_progress()) {
153 // If the concurrent mark is still in progress after the final mark safepoint, then the GC has
154 // been cancelled. The degenerated cycle must resume from the marking phase. Without this check,
155 // the non-generational mode may fall all the way to the end of this collect routine without
156 // having done anything (besides mark most of the heap). Without having collected anything, we
157 // can expect an 'out of cycle' degenerated GC which will again mark the entire heap. This is
158 // not optimal.
159 // For the generational mode, we cannot allow this. The generational mode relies on marking
160 // (including the final mark) to rebuild portions of the card table. If the generational mode does
161 // not complete marking after it has swapped the card tables, the root set on subsequent GCs will
162 // be incomplete, heap corruption may follow.
163 bool cancelled = check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_mark);
164 assert(cancelled, "GC must have been cancelled between concurrent and final mark");
165 return false;
166 }
167
168 // Concurrent stack processing
169 if (heap->is_evacuation_in_progress()) {
170 entry_thread_roots();
171 }
172
173 // Process weak roots that might still point to regions that would be broken by cleanup
174 if (heap->is_concurrent_weak_root_in_progress()) {
175 entry_weak_refs();
176 entry_weak_roots();
177 }
178
179 // Final mark might have reclaimed some immediate garbage, kick cleanup to reclaim
180 // the space. This would be the last action if there is nothing to evacuate. Note that
181 // we will not age young-gen objects in the case that we skip evacuation.
182 entry_cleanup_early();
183
184 heap->free_set()->log_status_under_lock();
185
186 // Perform concurrent class unloading
187 if (heap->unload_classes() &&
188 heap->is_concurrent_weak_root_in_progress()) {
189 entry_class_unloading();
190 }
191
192 // Processing strong roots
193 // This may be skipped if there is nothing to update/evacuate.
194 // If so, strong_root_in_progress would be unset.
195 if (heap->is_concurrent_strong_root_in_progress()) {
196 entry_strong_roots();
197 }
198
199 // Continue the cycle with evacuation and optional update-refs.
200 // This may be skipped if there is nothing to evacuate.
201 // If so, evac_in_progress would be unset by collection set preparation code.
202 if (heap->is_evacuation_in_progress()) {
203 // Concurrently evacuate
204 entry_evacuate();
205 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_evac)) {
206 return false;
207 }
208
209 // Perform update-refs phase.
210 vmop_entry_init_updaterefs();
211 entry_updaterefs();
212 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_updaterefs)) {
213 return false;
214 }
215
216 // Concurrent update thread roots
217 entry_update_thread_roots();
218 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_updaterefs)) {
219 return false;
220 }
221
222 vmop_entry_final_updaterefs();
223
224 // Update references freed up collection set, kick the cleanup to reclaim the space.
225 entry_cleanup_complete();
226 } else {
227 // We chose not to evacuate because we found sufficient immediate garbage.
228 // However, there may still be regions to promote in place, so do that now.
229 if (has_in_place_promotions(heap)) {
230 entry_promote_in_place();
231
232 // If the promote-in-place operation was cancelled, we can have the degenerated
233 // cycle complete the operation. It will see that no evacuations are in progress,
234 // and that there are regions wanting promotion. The risk with not handling the
235 // cancellation would be failing to restore top for these regions and leaving
236 // them unable to serve allocations for the old generation.
237 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_evac)) {
238 return false;
239 }
240 }
241
242 // At this point, the cycle is effectively complete. If the cycle has been cancelled here,
243 // the control thread will detect it on its next iteration and run a degenerated young cycle.
244 vmop_entry_final_roots();
245 _abbreviated = true;
246 }
247
248 // We defer generation resizing actions until after cset regions have been recycled. We do this even following an
249 // abbreviated cycle.
250 if (heap->mode()->is_generational()) {
251 ShenandoahGenerationalHeap::heap()->complete_concurrent_cycle();
252 }
253 return true;
254 }
255
256 void ShenandoahConcurrentGC::vmop_entry_init_mark() {
257 ShenandoahHeap* const heap = ShenandoahHeap::heap();
258 TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
259 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_mark_gross);
260
261 heap->try_inject_alloc_failure();
262 VM_ShenandoahInitMark op(this);
263 VMThread::execute(&op); // jump to entry_init_mark() under safepoint
264 }
265
266 void ShenandoahConcurrentGC::vmop_entry_final_mark() {
267 ShenandoahHeap* const heap = ShenandoahHeap::heap();
268 TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
269 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_mark_gross);
270
271 heap->try_inject_alloc_failure();
272 VM_ShenandoahFinalMarkStartEvac op(this);
333 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::init_update_refs);
334 EventMark em("%s", msg);
335
336 // No workers used in this phase, no setup required
337 op_init_updaterefs();
338 }
339
340 void ShenandoahConcurrentGC::entry_final_updaterefs() {
341 static const char* msg = "Pause Final Update Refs";
342 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_update_refs);
343 EventMark em("%s", msg);
344
345 ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
346 ShenandoahWorkerPolicy::calc_workers_for_final_update_ref(),
347 "final reference update");
348
349 op_final_updaterefs();
350 }
351
352 void ShenandoahConcurrentGC::entry_final_roots() {
353 const char* msg = final_roots_event_message();
354 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_roots);
355 EventMark em("%s", msg);
356
357 op_final_roots();
358 }
359
360 void ShenandoahConcurrentGC::entry_reset() {
361 ShenandoahHeap* const heap = ShenandoahHeap::heap();
362 heap->try_inject_alloc_failure();
363
364 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
365 {
366 const char* msg = conc_reset_event_message();
367 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_reset);
368 EventMark em("%s", msg);
369
370 ShenandoahWorkerScope scope(heap->workers(),
371 ShenandoahWorkerPolicy::calc_workers_for_conc_reset(),
372 msg);
373 op_reset();
374 }
375
376 if (_do_old_gc_bootstrap) {
377 static const char* msg = "Concurrent reset (Old)";
378 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_reset_old);
379 ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
380 ShenandoahWorkerPolicy::calc_workers_for_conc_reset(),
381 msg);
382 EventMark em("%s", msg);
383
384 heap->old_generation()->prepare_gc();
385 }
386 }
387
388 void ShenandoahConcurrentGC::entry_scan_remembered_set() {
389 if (_generation->is_young()) {
390 ShenandoahHeap* const heap = ShenandoahHeap::heap();
391 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
392 const char* msg = "Concurrent remembered set scanning";
393 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::init_scan_rset);
394 EventMark em("%s", msg);
395
396 ShenandoahWorkerScope scope(heap->workers(),
397 ShenandoahWorkerPolicy::calc_workers_for_rs_scanning(),
398 msg);
399
400 heap->try_inject_alloc_failure();
401 _generation->scan_remembered_set(true /* is_concurrent */);
402 }
403 }
404
405 void ShenandoahConcurrentGC::entry_mark_roots() {
406 ShenandoahHeap* const heap = ShenandoahHeap::heap();
407 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
408 const char* msg = "Concurrent marking roots";
409 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_mark_roots);
410 EventMark em("%s", msg);
411
412 ShenandoahWorkerScope scope(heap->workers(),
413 ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
414 "concurrent marking roots");
415
416 heap->try_inject_alloc_failure();
417 op_mark_roots();
418 }
419
420 void ShenandoahConcurrentGC::entry_mark() {
421 ShenandoahHeap* const heap = ShenandoahHeap::heap();
422 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
431 heap->try_inject_alloc_failure();
432 op_mark();
433 }
434
435 void ShenandoahConcurrentGC::entry_thread_roots() {
436 ShenandoahHeap* const heap = ShenandoahHeap::heap();
437 static const char* msg = "Concurrent thread roots";
438 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_thread_roots);
439 EventMark em("%s", msg);
440
441 ShenandoahWorkerScope scope(heap->workers(),
442 ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
443 msg);
444
445 heap->try_inject_alloc_failure();
446 op_thread_roots();
447 }
448
449 void ShenandoahConcurrentGC::entry_weak_refs() {
450 ShenandoahHeap* const heap = ShenandoahHeap::heap();
451 const char* msg = conc_weak_refs_event_message();
452 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_weak_refs);
453 EventMark em("%s", msg);
454
455 ShenandoahWorkerScope scope(heap->workers(),
456 ShenandoahWorkerPolicy::calc_workers_for_conc_refs_processing(),
457 "concurrent weak references");
458
459 heap->try_inject_alloc_failure();
460 op_weak_refs();
461 }
462
463 void ShenandoahConcurrentGC::entry_weak_roots() {
464 ShenandoahHeap* const heap = ShenandoahHeap::heap();
465 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
466 const char* msg = conc_weak_roots_event_message();
467 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_weak_roots);
468 EventMark em("%s", msg);
469
470 ShenandoahWorkerScope scope(heap->workers(),
471 ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
472 "concurrent weak root");
473
474 heap->try_inject_alloc_failure();
475 op_weak_roots();
476 }
477
478 void ShenandoahConcurrentGC::entry_class_unloading() {
479 ShenandoahHeap* const heap = ShenandoahHeap::heap();
480 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
481 static const char* msg = "Concurrent class unloading";
482 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_class_unload);
483 EventMark em("%s", msg);
484
485 ShenandoahWorkerScope scope(heap->workers(),
486 ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
493 void ShenandoahConcurrentGC::entry_strong_roots() {
494 ShenandoahHeap* const heap = ShenandoahHeap::heap();
495 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
496 static const char* msg = "Concurrent strong roots";
497 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_strong_roots);
498 EventMark em("%s", msg);
499
500 ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_strong_roots);
501
502 ShenandoahWorkerScope scope(heap->workers(),
503 ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
504 "concurrent strong root");
505
506 heap->try_inject_alloc_failure();
507 op_strong_roots();
508 }
509
510 void ShenandoahConcurrentGC::entry_cleanup_early() {
511 ShenandoahHeap* const heap = ShenandoahHeap::heap();
512 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
513 const char* msg = conc_cleanup_event_message();
514 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_cleanup_early, true /* log_heap_usage */);
515 EventMark em("%s", msg);
516
517 // This phase does not use workers, no need for setup
518 heap->try_inject_alloc_failure();
519 op_cleanup_early();
520 }
521
522 void ShenandoahConcurrentGC::entry_evacuate() {
523 ShenandoahHeap* const heap = ShenandoahHeap::heap();
524 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
525
526 static const char* msg = "Concurrent evacuation";
527 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_evac);
528 EventMark em("%s", msg);
529
530 ShenandoahWorkerScope scope(heap->workers(),
531 ShenandoahWorkerPolicy::calc_workers_for_conc_evac(),
532 "concurrent evacuation");
533
534 heap->try_inject_alloc_failure();
535 op_evacuate();
536 }
537
538 void ShenandoahConcurrentGC::entry_promote_in_place() {
539 shenandoah_assert_generational();
540
541 ShenandoahHeap* const heap = ShenandoahHeap::heap();
542 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
543
544 static const char* msg = "Promote in place";
545 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::promote_in_place);
546 EventMark em("%s", msg);
547
548 ShenandoahWorkerScope scope(heap->workers(),
549 ShenandoahWorkerPolicy::calc_workers_for_conc_evac(),
550 "promote in place");
551
552 ShenandoahGenerationalHeap::heap()->promote_regions_in_place(true);
553 }
554
555 void ShenandoahConcurrentGC::entry_update_thread_roots() {
556 ShenandoahHeap* const heap = ShenandoahHeap::heap();
557 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
558
559 static const char* msg = "Concurrent update thread roots";
560 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_update_thread_roots);
561 EventMark em("%s", msg);
562
563 // No workers used in this phase, no setup required
564 heap->try_inject_alloc_failure();
565 op_update_thread_roots();
566 }
567
568 void ShenandoahConcurrentGC::entry_updaterefs() {
569 ShenandoahHeap* const heap = ShenandoahHeap::heap();
570 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
571 static const char* msg = "Concurrent update references";
572 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_update_refs);
573 EventMark em("%s", msg);
574
575 ShenandoahWorkerScope scope(heap->workers(),
576 ShenandoahWorkerPolicy::calc_workers_for_conc_update_ref(),
577 "concurrent reference update");
578
579 heap->try_inject_alloc_failure();
580 op_updaterefs();
581 }
582
583 void ShenandoahConcurrentGC::entry_cleanup_complete() {
584 ShenandoahHeap* const heap = ShenandoahHeap::heap();
585 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
586 const char* msg = conc_cleanup_event_message();
587 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_cleanup_complete, true /* log_heap_usage */);
588 EventMark em("%s", msg);
589
590 // This phase does not use workers, no need for setup
591 heap->try_inject_alloc_failure();
592 op_cleanup_complete();
593 }
594
595 void ShenandoahConcurrentGC::op_reset() {
596 ShenandoahHeap* const heap = ShenandoahHeap::heap();
597 if (ShenandoahPacing) {
598 heap->pacer()->setup_for_reset();
599 }
600 _generation->prepare_gc();
601 }
602
603 class ShenandoahInitMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
604 private:
605 ShenandoahMarkingContext* const _ctx;
606 public:
607 ShenandoahInitMarkUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
608
609 void heap_region_do(ShenandoahHeapRegion* r) {
610 assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index());
611 if (r->is_active()) {
612 // Check if region needs updating its TAMS. We have updated it already during concurrent
613 // reset, so it is very likely we don't need to do another write here. Since most regions
614 // are not "active", this path is relatively rare.
615 if (_ctx->top_at_mark_start(r) != r->top()) {
616 _ctx->capture_top_at_mark_start(r);
617 }
618 } else {
619 assert(_ctx->top_at_mark_start(r) == r->top(),
620 "Region " SIZE_FORMAT " should already have correct TAMS", r->index());
621 }
622 }
623
624 bool is_thread_safe() { return true; }
625 };
626
627 void ShenandoahConcurrentGC::start_mark() {
628 _mark.start_mark();
629 }
630
631 void ShenandoahConcurrentGC::op_init_mark() {
632 ShenandoahHeap* const heap = ShenandoahHeap::heap();
633 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
634 assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
635
636 assert(_generation->is_bitmap_clear(), "need clear marking bitmap");
637 assert(!_generation->is_mark_complete(), "should not be complete");
638 assert(!heap->has_forwarded_objects(), "No forwarded objects on this path");
639
640
641 if (heap->mode()->is_generational()) {
642 if (_generation->is_young()) {
643 // The current implementation of swap_remembered_set() copies the write-card-table to the read-card-table.
644 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_swap_rset);
645 _generation->swap_remembered_set();
646 }
647
648 if (_generation->is_global()) {
649 heap->old_generation()->cancel_gc();
650 } else if (heap->is_concurrent_old_mark_in_progress()) {
651 // Purge the SATB buffers, transferring any valid, old pointers to the
652 // old generation mark queue. Any pointers in a young region will be
653 // abandoned.
654 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_transfer_satb);
655 heap->old_generation()->transfer_pointers_from_satb();
656 }
657 }
658
659 if (ShenandoahVerify) {
660 heap->verifier()->verify_before_concmark();
661 }
662
663 if (VerifyBeforeGC) {
664 Universe::verify();
665 }
666
667 _generation->set_concurrent_mark_in_progress(true);
668
669 start_mark();
670
671 if (_do_old_gc_bootstrap) {
672 shenandoah_assert_generational();
673 // Update region state for both young and old regions
674 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states);
675 ShenandoahInitMarkUpdateRegionStateClosure cl;
676 heap->parallel_heap_region_iterate(&cl);
677 heap->old_generation()->ref_processor()->reset_thread_locals();
678 } else {
679 // Update region state for only young regions
680 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states);
681 ShenandoahInitMarkUpdateRegionStateClosure cl;
682 _generation->parallel_heap_region_iterate(&cl);
683 }
684
685 // Weak reference processing
686 ShenandoahReferenceProcessor* rp = _generation->ref_processor();
687 rp->reset_thread_locals();
688 rp->set_soft_reference_policy(heap->soft_ref_policy()->should_clear_all_soft_refs());
689
690 // Make above changes visible to worker threads
691 OrderAccess::fence();
692
693 // Arm nmethods for concurrent mark
694 ShenandoahCodeRoots::arm_nmethods_for_mark();
695
696 ShenandoahStackWatermark::change_epoch_id();
697 if (ShenandoahPacing) {
698 heap->pacer()->setup_for_mark();
699 }
700 }
701
702 void ShenandoahConcurrentGC::op_mark_roots() {
703 _mark.mark_concurrent_roots();
704 }
705
706 void ShenandoahConcurrentGC::op_mark() {
707 _mark.concurrent_mark();
708 }
709
710 void ShenandoahConcurrentGC::op_final_mark() {
711 ShenandoahHeap* const heap = ShenandoahHeap::heap();
712 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
713 assert(!heap->has_forwarded_objects(), "No forwarded objects on this path");
714
715 if (ShenandoahVerify) {
716 heap->verifier()->verify_roots_no_forwarded();
717 }
718
719 if (!heap->cancelled_gc()) {
720 _mark.finish_mark();
721 assert(!heap->cancelled_gc(), "STW mark cannot OOM");
722
723 // Notify JVMTI that the tagmap table will need cleaning.
724 JvmtiTagMap::set_needs_cleaning();
725
726 // The collection set is chosen by prepare_regions_and_collection_set(). Additionally, certain parameters have been
727 // established to govern the evacuation efforts that are about to begin. Refer to comments on reserve members in
728 // ShenandoahGeneration and ShenandoahOldGeneration for more detail.
729 _generation->prepare_regions_and_collection_set(true /*concurrent*/);
730
731 // Has to be done after cset selection
732 heap->prepare_concurrent_roots();
733
734 if (!heap->collection_set()->is_empty()) {
735 LogTarget(Debug, gc, cset) lt;
736 if (lt.is_enabled()) {
737 ResourceMark rm;
738 LogStream ls(lt);
739 heap->collection_set()->print_on(&ls);
740 }
741
742 if (ShenandoahVerify) {
743 heap->verifier()->verify_before_evacuation();
744 }
745
746 heap->set_evacuation_in_progress(true);
747 // From here on, we need to update references.
748 heap->set_has_forwarded_objects(true);
749
750 // Arm nmethods/stack for concurrent processing
751 ShenandoahCodeRoots::arm_nmethods_for_evac();
752 ShenandoahStackWatermark::change_epoch_id();
753
754 if (ShenandoahPacing) {
755 heap->pacer()->setup_for_evac();
756 }
757 } else {
758 if (ShenandoahVerify) {
759 if (has_in_place_promotions(heap)) {
760 heap->verifier()->verify_after_concmark_with_promotions();
761 } else {
762 heap->verifier()->verify_after_concmark();
763 }
764 }
765
766 if (VerifyAfterGC) {
767 Universe::verify();
768 }
769 }
770 }
771 }
772
773 bool ShenandoahConcurrentGC::has_in_place_promotions(ShenandoahHeap* heap) {
774 return heap->mode()->is_generational() && heap->old_generation()->has_in_place_promotions();
775 }
776
777 template<bool GENERATIONAL>
778 class ShenandoahConcurrentEvacThreadClosure : public ThreadClosure {
779 private:
780 OopClosure* const _oops;
781 public:
782 explicit ShenandoahConcurrentEvacThreadClosure(OopClosure* oops) : _oops(oops) {}
783
784 void do_thread(Thread* thread) override {
785 JavaThread* const jt = JavaThread::cast(thread);
786 StackWatermarkSet::finish_processing(jt, _oops, StackWatermarkKind::gc);
787 if (GENERATIONAL) {
788 ShenandoahThreadLocalData::enable_plab_promotions(thread);
789 }
790 }
791 };
792
793 template<bool GENERATIONAL>
794 class ShenandoahConcurrentEvacUpdateThreadTask : public WorkerTask {
795 private:
796 ShenandoahJavaThreadsIterator _java_threads;
797
798 public:
799 explicit ShenandoahConcurrentEvacUpdateThreadTask(uint n_workers) :
800 WorkerTask("Shenandoah Evacuate/Update Concurrent Thread Roots"),
801 _java_threads(ShenandoahPhaseTimings::conc_thread_roots, n_workers) {
802 }
803
804 void work(uint worker_id) override {
805 if (GENERATIONAL) {
806 Thread* worker_thread = Thread::current();
807 ShenandoahThreadLocalData::enable_plab_promotions(worker_thread);
808 }
809
810 // ShenandoahEvacOOMScope has to be setup by ShenandoahContextEvacuateUpdateRootsClosure.
811 // Otherwise, may deadlock with watermark lock
812 ShenandoahContextEvacuateUpdateRootsClosure oops_cl;
813 ShenandoahConcurrentEvacThreadClosure<GENERATIONAL> thr_cl(&oops_cl);
814 _java_threads.threads_do(&thr_cl, worker_id);
815 }
816 };
817
818 void ShenandoahConcurrentGC::op_thread_roots() {
819 ShenandoahHeap* const heap = ShenandoahHeap::heap();
820 assert(heap->is_evacuation_in_progress(), "Checked by caller");
821 ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_thread_roots);
822 if (heap->mode()->is_generational()) {
823 ShenandoahConcurrentEvacUpdateThreadTask<true> task(heap->workers()->active_workers());
824 heap->workers()->run_task(&task);
825 } else {
826 ShenandoahConcurrentEvacUpdateThreadTask<false> task(heap->workers()->active_workers());
827 heap->workers()->run_task(&task);
828 }
829 }
830
831 void ShenandoahConcurrentGC::op_weak_refs() {
832 ShenandoahHeap* const heap = ShenandoahHeap::heap();
833 assert(heap->is_concurrent_weak_root_in_progress(), "Only during this phase");
834 // Concurrent weak refs processing
835 ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_refs);
836 if (heap->gc_cause() == GCCause::_wb_breakpoint) {
837 ShenandoahBreakpoint::at_after_reference_processing_started();
838 }
839 _generation->ref_processor()->process_references(ShenandoahPhaseTimings::conc_weak_refs, heap->workers(), true /* concurrent */);
840 }
841
842 class ShenandoahEvacUpdateCleanupOopStorageRootsClosure : public BasicOopIterateClosure {
843 private:
844 ShenandoahHeap* const _heap;
845 ShenandoahMarkingContext* const _mark_context;
846 bool _evac_in_progress;
847 Thread* const _thread;
848
849 public:
850 ShenandoahEvacUpdateCleanupOopStorageRootsClosure();
851 void do_oop(oop* p);
852 void do_oop(narrowOop* p);
853 };
854
855 ShenandoahEvacUpdateCleanupOopStorageRootsClosure::ShenandoahEvacUpdateCleanupOopStorageRootsClosure() :
856 _heap(ShenandoahHeap::heap()),
857 _mark_context(ShenandoahHeap::heap()->marking_context()),
858 _evac_in_progress(ShenandoahHeap::heap()->is_evacuation_in_progress()),
859 _thread(Thread::current()) {
860 }
861
862 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(oop* p) {
863 const oop obj = RawAccess<>::oop_load(p);
864 if (!CompressedOops::is_null(obj)) {
865 if (!_mark_context->is_marked(obj)) {
866 shenandoah_assert_generations_reconciled();
867 if (_heap->is_in_active_generation(obj)) {
868 // Here we are asserting that an unmarked from-space object is 'correct'. There seems to be a legitimate
869 // use-case for accessing from-space objects during concurrent class unloading. In all modes of Shenandoah,
870 // concurrent class unloading only happens during a global collection.
871 shenandoah_assert_correct(p, obj);
872 ShenandoahHeap::atomic_clear_oop(p, obj);
873 }
874 } else if (_evac_in_progress && _heap->in_collection_set(obj)) {
875 oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
876 if (resolved == obj) {
877 resolved = _heap->evacuate_object(obj, _thread);
878 }
879 shenandoah_assert_not_in_cset_except(p, resolved, _heap->cancelled_gc());
880 ShenandoahHeap::atomic_update_oop(resolved, p, obj);
881 }
882 }
883 }
884
885 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(narrowOop* p) {
886 ShouldNotReachHere();
887 }
888
889 class ShenandoahIsCLDAliveClosure : public CLDClosure {
890 public:
891 void do_cld(ClassLoaderData* cld) {
892 cld->is_alive();
893 }
894 };
895
896 class ShenandoahIsNMethodAliveClosure: public NMethodClosure {
897 public:
898 void do_nmethod(nmethod* n) {
899 n->is_unloading();
900 }
959 }
960 }
961 };
962
963 void ShenandoahConcurrentGC::op_weak_roots() {
964 ShenandoahHeap* const heap = ShenandoahHeap::heap();
965 assert(heap->is_concurrent_weak_root_in_progress(), "Only during this phase");
966 // Concurrent weak root processing
967 {
968 ShenandoahTimingsTracker t(ShenandoahPhaseTimings::conc_weak_roots_work);
969 ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_roots_work);
970 ShenandoahConcurrentWeakRootsEvacUpdateTask task(ShenandoahPhaseTimings::conc_weak_roots_work);
971 heap->workers()->run_task(&task);
972 }
973
974 // Perform handshake to flush out dead oops
975 {
976 ShenandoahTimingsTracker t(ShenandoahPhaseTimings::conc_weak_roots_rendezvous);
977 heap->rendezvous_threads();
978 }
979 // We can only toggle concurrent_weak_root_in_progress flag
980 // at a safepoint, so that mutators see a consistent
981 // value. The flag will be cleared at the next safepoint.
982 }
983
984 void ShenandoahConcurrentGC::op_class_unloading() {
985 ShenandoahHeap* const heap = ShenandoahHeap::heap();
986 assert (heap->is_concurrent_weak_root_in_progress() &&
987 heap->unload_classes(),
988 "Checked by caller");
989 heap->do_class_unloading();
990 }
991
992 class ShenandoahEvacUpdateCodeCacheClosure : public NMethodClosure {
993 private:
994 BarrierSetNMethod* const _bs;
995 ShenandoahEvacuateUpdateMetadataClosure _cl;
996
997 public:
998 ShenandoahEvacUpdateCodeCacheClosure() :
999 _bs(BarrierSet::barrier_set()->barrier_set_nmethod()),
1000 _cl() {
1001 }
1058 ShenandoahHeap* const heap = ShenandoahHeap::heap();
1059 assert(heap->is_concurrent_strong_root_in_progress(), "Checked by caller");
1060 ShenandoahConcurrentRootsEvacUpdateTask task(ShenandoahPhaseTimings::conc_strong_roots);
1061 heap->workers()->run_task(&task);
1062 heap->set_concurrent_strong_root_in_progress(false);
1063 }
1064
1065 void ShenandoahConcurrentGC::op_cleanup_early() {
1066 ShenandoahHeap::heap()->free_set()->recycle_trash();
1067 }
1068
1069 void ShenandoahConcurrentGC::op_evacuate() {
1070 ShenandoahHeap::heap()->evacuate_collection_set(true /*concurrent*/);
1071 }
1072
1073 void ShenandoahConcurrentGC::op_init_updaterefs() {
1074 ShenandoahHeap* const heap = ShenandoahHeap::heap();
1075 heap->set_evacuation_in_progress(false);
1076 heap->set_concurrent_weak_root_in_progress(false);
1077 heap->prepare_update_heap_references(true /*concurrent*/);
1078 if (ShenandoahVerify) {
1079 heap->verifier()->verify_before_updaterefs();
1080 }
1081
1082 heap->set_update_refs_in_progress(true);
1083 if (ShenandoahPacing) {
1084 heap->pacer()->setup_for_updaterefs();
1085 }
1086 }
1087
1088 void ShenandoahConcurrentGC::op_updaterefs() {
1089 ShenandoahHeap::heap()->update_heap_references(true /*concurrent*/);
1090 }
1091
1092 class ShenandoahUpdateThreadClosure : public HandshakeClosure {
1093 private:
1094 ShenandoahUpdateRefsClosure _cl;
1095 public:
1096 ShenandoahUpdateThreadClosure();
1097 void do_thread(Thread* thread);
1098 };
1099
1100 ShenandoahUpdateThreadClosure::ShenandoahUpdateThreadClosure() :
1101 HandshakeClosure("Shenandoah Update Thread Roots") {
1102 }
1107 ResourceMark rm;
1108 jt->oops_do(&_cl, nullptr);
1109 }
1110 }
1111
1112 void ShenandoahConcurrentGC::op_update_thread_roots() {
1113 ShenandoahUpdateThreadClosure cl;
1114 Handshake::execute(&cl);
1115 }
1116
1117 void ShenandoahConcurrentGC::op_final_updaterefs() {
1118 ShenandoahHeap* const heap = ShenandoahHeap::heap();
1119 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
1120 assert(!heap->_update_refs_iterator.has_next(), "Should have finished update references");
1121
1122 heap->finish_concurrent_roots();
1123
1124 // Clear cancelled GC, if set. On cancellation path, the block before would handle
1125 // everything.
1126 if (heap->cancelled_gc()) {
1127 heap->clear_cancelled_gc(true /* clear oom handler */);
1128 }
1129
1130 // Has to be done before cset is clear
1131 if (ShenandoahVerify) {
1132 heap->verifier()->verify_roots_in_to_space();
1133 }
1134
1135 // If we are running in generational mode and this is an aging cycle, this will also age active
1136 // regions that haven't been used for allocation.
1137 heap->update_heap_region_states(true /*concurrent*/);
1138
1139 heap->set_update_refs_in_progress(false);
1140 heap->set_has_forwarded_objects(false);
1141
1142 if (heap->mode()->is_generational() && heap->is_concurrent_old_mark_in_progress()) {
1143 // When the SATB barrier is left on to support concurrent old gen mark, it may pick up writes to
1144 // objects in the collection set. After those objects are evacuated, the pointers in the
1145 // SATB are no longer safe. Once we have finished update references, we are guaranteed that
1146 // no more writes to the collection set are possible.
1147 //
1148 // This will transfer any old pointers in _active_ regions from the SATB to the old gen
1149 // mark queues. All other pointers will be discarded. This would also discard any pointers
1150 // in old regions that were included in a mixed evacuation. We aren't using the SATB filter
1151 // methods here because we cannot control when they execute. If the SATB filter runs _after_
1152 // a region has been recycled, we will not be able to detect the bad pointer.
1153 //
1154 // We are not concerned about skipping this step in abbreviated cycles because regions
1155 // with no live objects cannot have been written to and so cannot have entries in the SATB
1156 // buffers.
1157 heap->old_generation()->transfer_pointers_from_satb();
1158
1159 // Aging_cycle is only relevant during evacuation cycle for individual objects and during final mark for
1160 // entire regions. Both of these relevant operations occur before final update refs.
1161 ShenandoahGenerationalHeap::heap()->set_aging_cycle(false);
1162 }
1163
1164 if (ShenandoahVerify) {
1165 heap->verifier()->verify_after_updaterefs();
1166 }
1167
1168 if (VerifyAfterGC) {
1169 Universe::verify();
1170 }
1171
1172 heap->rebuild_free_set(true /*concurrent*/);
1173 }
1174
1175 void ShenandoahConcurrentGC::op_final_roots() {
1176
1177 ShenandoahHeap *heap = ShenandoahHeap::heap();
1178 heap->set_concurrent_weak_root_in_progress(false);
1179 heap->set_evacuation_in_progress(false);
1180
1181 if (heap->mode()->is_generational()) {
1182 // If the cycle was shortened for having enough immediate garbage, this could be
1183 // the last GC safepoint before concurrent marking of old resumes. We must be sure
1184 // that old mark threads don't see any pointers to garbage in the SATB buffers.
1185 if (heap->is_concurrent_old_mark_in_progress()) {
1186 heap->old_generation()->transfer_pointers_from_satb();
1187 }
1188
1189 if (!_generation->is_old()) {
1190 ShenandoahGenerationalHeap::heap()->update_region_ages(_generation->complete_marking_context());
1191 }
1192 }
1193 }
1194
1195 void ShenandoahConcurrentGC::op_cleanup_complete() {
1196 ShenandoahHeap::heap()->free_set()->recycle_trash();
1197 }
1198
1199 bool ShenandoahConcurrentGC::check_cancellation_and_abort(ShenandoahDegenPoint point) {
1200 if (ShenandoahHeap::heap()->cancelled_gc()) {
1201 _degen_point = point;
1202 return true;
1203 }
1204 return false;
1205 }
1206
1207 const char* ShenandoahConcurrentGC::init_mark_event_message() const {
1208 ShenandoahHeap* const heap = ShenandoahHeap::heap();
1209 assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here");
1210 if (heap->unload_classes()) {
1211 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Init Mark", " (unload classes)");
1212 } else {
1213 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Init Mark", "");
1214 }
1215 }
1216
1217 const char* ShenandoahConcurrentGC::final_mark_event_message() const {
1218 ShenandoahHeap* const heap = ShenandoahHeap::heap();
1219 assert(!heap->has_forwarded_objects() || heap->is_concurrent_old_mark_in_progress(),
1220 "Should not have forwarded objects during final mark, unless old gen concurrent mark is running");
1221
1222 if (heap->unload_classes()) {
1223 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Final Mark", " (unload classes)");
1224 } else {
1225 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Final Mark", "");
1226 }
1227 }
1228
1229 const char* ShenandoahConcurrentGC::conc_mark_event_message() const {
1230 ShenandoahHeap* const heap = ShenandoahHeap::heap();
1231 assert(!heap->has_forwarded_objects() || heap->is_concurrent_old_mark_in_progress(),
1232 "Should not have forwarded objects concurrent mark, unless old gen concurrent mark is running");
1233 if (heap->unload_classes()) {
1234 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent marking", " (unload classes)");
1235 } else {
1236 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent marking", "");
1237 }
1238 }
1239
1240 const char* ShenandoahConcurrentGC::conc_reset_event_message() const {
1241 if (ShenandoahHeap::heap()->unload_classes()) {
1242 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent reset", " (unload classes)");
1243 } else {
1244 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent reset", "");
1245 }
1246 }
1247
1248 const char* ShenandoahConcurrentGC::final_roots_event_message() const {
1249 if (ShenandoahHeap::heap()->unload_classes()) {
1250 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Final Roots", " (unload classes)");
1251 } else {
1252 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Final Roots", "");
1253 }
1254 }
1255
1256 const char* ShenandoahConcurrentGC::conc_weak_refs_event_message() const {
1257 if (ShenandoahHeap::heap()->unload_classes()) {
1258 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent weak references", " (unload classes)");
1259 } else {
1260 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent weak references", "");
1261 }
1262 }
1263
1264 const char* ShenandoahConcurrentGC::conc_weak_roots_event_message() const {
1265 if (ShenandoahHeap::heap()->unload_classes()) {
1266 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent weak roots", " (unload classes)");
1267 } else {
1268 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent weak roots", "");
1269 }
1270 }
1271
1272 const char* ShenandoahConcurrentGC::conc_cleanup_event_message() const {
1273 if (ShenandoahHeap::heap()->unload_classes()) {
1274 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent cleanup", " (unload classes)");
1275 } else {
1276 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent cleanup", "");
1277 }
1278 }
|