1 /*
2 * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2021, 2022, Red Hat, Inc. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27
28 #include "gc/shared/barrierSetNMethod.hpp"
29 #include "gc/shared/collectorCounters.hpp"
30 #include "gc/shared/continuationGCSupport.inline.hpp"
31 #include "gc/shenandoah/shenandoahBreakpoint.hpp"
32 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
33 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
34 #include "gc/shenandoah/shenandoahFreeSet.hpp"
35 #include "gc/shenandoah/shenandoahLock.hpp"
36 #include "gc/shenandoah/shenandoahMark.inline.hpp"
37 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
38 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
39 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
40 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
41 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
42 #include "gc/shenandoah/shenandoahStackWatermark.hpp"
43 #include "gc/shenandoah/shenandoahUtils.hpp"
44 #include "gc/shenandoah/shenandoahVerifier.hpp"
45 #include "gc/shenandoah/shenandoahVMOperations.hpp"
46 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
47 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
48 #include "memory/allocation.hpp"
49 #include "prims/jvmtiTagMap.hpp"
50 #include "runtime/vmThread.hpp"
51 #include "utilities/events.hpp"
52
53 // Breakpoint support
54 class ShenandoahBreakpointGCScope : public StackObj {
55 private:
56 const GCCause::Cause _cause;
57 public:
58 ShenandoahBreakpointGCScope(GCCause::Cause cause) : _cause(cause) {
69 }
70 };
71
72 class ShenandoahBreakpointMarkScope : public StackObj {
73 private:
74 const GCCause::Cause _cause;
75 public:
76 ShenandoahBreakpointMarkScope(GCCause::Cause cause) : _cause(cause) {
77 if (_cause == GCCause::_wb_breakpoint) {
78 ShenandoahBreakpoint::at_after_marking_started();
79 }
80 }
81
82 ~ShenandoahBreakpointMarkScope() {
83 if (_cause == GCCause::_wb_breakpoint) {
84 ShenandoahBreakpoint::at_before_marking_completed();
85 }
86 }
87 };
88
89 ShenandoahConcurrentGC::ShenandoahConcurrentGC() :
90 _mark(),
91 _degen_point(ShenandoahDegenPoint::_degenerated_unset) {
92 }
93
94 ShenandoahGC::ShenandoahDegenPoint ShenandoahConcurrentGC::degen_point() const {
95 return _degen_point;
96 }
97
98 void ShenandoahConcurrentGC::cancel() {
99 ShenandoahConcurrentMark::cancel();
100 }
101
102 bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) {
103 ShenandoahHeap* const heap = ShenandoahHeap::heap();
104 ShenandoahBreakpointGCScope breakpoint_gc_scope(cause);
105
106 // Reset for upcoming marking
107 entry_reset();
108
109 // Start initial mark under STW
110 vmop_entry_init_mark();
111
112 {
113 ShenandoahBreakpointMarkScope breakpoint_mark_scope(cause);
114 // Concurrent mark roots
115 entry_mark_roots();
116 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_outside_cycle)) return false;
117
118 // Continue concurrent mark
119 entry_mark();
120 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_mark)) return false;
121 }
122
123 // Complete marking under STW, and start evacuation
124 vmop_entry_final_mark();
125
126 // Concurrent stack processing
127 if (heap->is_evacuation_in_progress()) {
128 entry_thread_roots();
129 }
130
131 // Process weak roots that might still point to regions that would be broken by cleanup
132 if (heap->is_concurrent_weak_root_in_progress()) {
133 entry_weak_refs();
134 entry_weak_roots();
135 }
136
137 // Final mark might have reclaimed some immediate garbage, kick cleanup to reclaim
138 // the space. This would be the last action if there is nothing to evacuate.
139 entry_cleanup_early();
140
141 {
142 ShenandoahHeapLocker locker(heap->lock());
143 heap->free_set()->log_status();
144 }
145
146 // Perform concurrent class unloading
147 if (heap->unload_classes() &&
148 heap->is_concurrent_weak_root_in_progress()) {
149 entry_class_unloading();
150 }
151
152 // Processing strong roots
153 // This may be skipped if there is nothing to update/evacuate.
154 // If so, strong_root_in_progress would be unset.
155 if (heap->is_concurrent_strong_root_in_progress()) {
156 entry_strong_roots();
157 }
158
159 // Continue the cycle with evacuation and optional update-refs.
160 // This may be skipped if there is nothing to evacuate.
161 // If so, evac_in_progress would be unset by collection set preparation code.
162 if (heap->is_evacuation_in_progress()) {
163 // Concurrently evacuate
164 entry_evacuate();
165 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_evac)) return false;
166
167 // Perform update-refs phase.
168 vmop_entry_init_updaterefs();
169 entry_updaterefs();
170 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_updaterefs)) return false;
171
172 // Concurrent update thread roots
173 entry_update_thread_roots();
174 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_updaterefs)) return false;
175
176 vmop_entry_final_updaterefs();
177
178 // Update references freed up collection set, kick the cleanup to reclaim the space.
179 entry_cleanup_complete();
180 } else {
181 vmop_entry_final_roots();
182 }
183
184 return true;
185 }
186
187 void ShenandoahConcurrentGC::vmop_entry_init_mark() {
188 ShenandoahHeap* const heap = ShenandoahHeap::heap();
189 TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
190 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_mark_gross);
191
192 heap->try_inject_alloc_failure();
193 VM_ShenandoahInitMark op(this);
194 VMThread::execute(&op); // jump to entry_init_mark() under safepoint
195 }
196
197 void ShenandoahConcurrentGC::vmop_entry_final_mark() {
198 ShenandoahHeap* const heap = ShenandoahHeap::heap();
199 TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
200 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_mark_gross);
201
202 heap->try_inject_alloc_failure();
203 VM_ShenandoahFinalMarkStartEvac op(this);
204 VMThread::execute(&op); // jump to entry_final_mark under safepoint
205 }
206
207 void ShenandoahConcurrentGC::vmop_entry_init_updaterefs() {
208 ShenandoahHeap* const heap = ShenandoahHeap::heap();
209 TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
210 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_update_refs_gross);
211
212 heap->try_inject_alloc_failure();
213 VM_ShenandoahInitUpdateRefs op(this);
214 VMThread::execute(&op);
215 }
216
217 void ShenandoahConcurrentGC::vmop_entry_final_updaterefs() {
218 ShenandoahHeap* const heap = ShenandoahHeap::heap();
219 TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
220 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_update_refs_gross);
221
222 heap->try_inject_alloc_failure();
223 VM_ShenandoahFinalUpdateRefs op(this);
224 VMThread::execute(&op);
225 }
226
227 void ShenandoahConcurrentGC::vmop_entry_final_roots() {
228 ShenandoahHeap* const heap = ShenandoahHeap::heap();
229 TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
230 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_roots_gross);
231
232 // This phase does not use workers, no need for setup
233 heap->try_inject_alloc_failure();
234 VM_ShenandoahFinalRoots op(this);
235 VMThread::execute(&op);
236 }
237
238 void ShenandoahConcurrentGC::entry_init_mark() {
239 const char* msg = init_mark_event_message();
240 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::init_mark);
241 EventMark em("%s", msg);
242
243 ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
244 ShenandoahWorkerPolicy::calc_workers_for_init_marking(),
245 "init marking");
246
247 op_init_mark();
248 }
249
250 void ShenandoahConcurrentGC::entry_final_mark() {
251 const char* msg = final_mark_event_message();
252 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_mark);
253 EventMark em("%s", msg);
254
255 ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
256 ShenandoahWorkerPolicy::calc_workers_for_final_marking(),
257 "final marking");
258
259 op_final_mark();
260 }
261
262 void ShenandoahConcurrentGC::entry_init_updaterefs() {
263 static const char* msg = "Pause Init Update Refs";
264 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::init_update_refs);
265 EventMark em("%s", msg);
266
267 // No workers used in this phase, no setup required
268 op_init_updaterefs();
269 }
270
271 void ShenandoahConcurrentGC::entry_final_updaterefs() {
272 static const char* msg = "Pause Final Update Refs";
273 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_update_refs);
274 EventMark em("%s", msg);
275
276 ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
277 ShenandoahWorkerPolicy::calc_workers_for_final_update_ref(),
278 "final reference update");
279
280 op_final_updaterefs();
281 }
282
283 void ShenandoahConcurrentGC::entry_final_roots() {
284 static const char* msg = "Pause Final Roots";
285 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_roots);
286 EventMark em("%s", msg);
287
288 op_final_roots();
289 }
290
291 void ShenandoahConcurrentGC::entry_reset() {
292 ShenandoahHeap* const heap = ShenandoahHeap::heap();
293 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
294 static const char* msg = "Concurrent reset";
295 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_reset);
296 EventMark em("%s", msg);
297
298 ShenandoahWorkerScope scope(heap->workers(),
299 ShenandoahWorkerPolicy::calc_workers_for_conc_reset(),
300 "concurrent reset");
301
302 heap->try_inject_alloc_failure();
303 op_reset();
304 }
305
306 void ShenandoahConcurrentGC::entry_mark_roots() {
307 ShenandoahHeap* const heap = ShenandoahHeap::heap();
308 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
309 const char* msg = "Concurrent marking roots";
310 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_mark_roots);
311 EventMark em("%s", msg);
312
313 ShenandoahWorkerScope scope(heap->workers(),
314 ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
315 "concurrent marking roots");
316
317 heap->try_inject_alloc_failure();
318 op_mark_roots();
319 }
320
321 void ShenandoahConcurrentGC::entry_mark() {
322 ShenandoahHeap* const heap = ShenandoahHeap::heap();
323 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
332 heap->try_inject_alloc_failure();
333 op_mark();
334 }
335
336 void ShenandoahConcurrentGC::entry_thread_roots() {
337 ShenandoahHeap* const heap = ShenandoahHeap::heap();
338 static const char* msg = "Concurrent thread roots";
339 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_thread_roots);
340 EventMark em("%s", msg);
341
342 ShenandoahWorkerScope scope(heap->workers(),
343 ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
344 msg);
345
346 heap->try_inject_alloc_failure();
347 op_thread_roots();
348 }
349
350 void ShenandoahConcurrentGC::entry_weak_refs() {
351 ShenandoahHeap* const heap = ShenandoahHeap::heap();
352 static const char* msg = "Concurrent weak references";
353 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_weak_refs);
354 EventMark em("%s", msg);
355
356 ShenandoahWorkerScope scope(heap->workers(),
357 ShenandoahWorkerPolicy::calc_workers_for_conc_refs_processing(),
358 "concurrent weak references");
359
360 heap->try_inject_alloc_failure();
361 op_weak_refs();
362 }
363
364 void ShenandoahConcurrentGC::entry_weak_roots() {
365 ShenandoahHeap* const heap = ShenandoahHeap::heap();
366 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
367 static const char* msg = "Concurrent weak roots";
368 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_weak_roots);
369 EventMark em("%s", msg);
370
371 ShenandoahWorkerScope scope(heap->workers(),
372 ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
373 "concurrent weak root");
374
375 heap->try_inject_alloc_failure();
376 op_weak_roots();
377 }
378
379 void ShenandoahConcurrentGC::entry_class_unloading() {
380 ShenandoahHeap* const heap = ShenandoahHeap::heap();
381 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
382 static const char* msg = "Concurrent class unloading";
383 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_class_unload);
384 EventMark em("%s", msg);
385
386 ShenandoahWorkerScope scope(heap->workers(),
387 ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
394 void ShenandoahConcurrentGC::entry_strong_roots() {
395 ShenandoahHeap* const heap = ShenandoahHeap::heap();
396 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
397 static const char* msg = "Concurrent strong roots";
398 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_strong_roots);
399 EventMark em("%s", msg);
400
401 ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_strong_roots);
402
403 ShenandoahWorkerScope scope(heap->workers(),
404 ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
405 "concurrent strong root");
406
407 heap->try_inject_alloc_failure();
408 op_strong_roots();
409 }
410
411 void ShenandoahConcurrentGC::entry_cleanup_early() {
412 ShenandoahHeap* const heap = ShenandoahHeap::heap();
413 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
414 static const char* msg = "Concurrent cleanup";
415 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_cleanup_early, true /* log_heap_usage */);
416 EventMark em("%s", msg);
417
418 // This phase does not use workers, no need for setup
419 heap->try_inject_alloc_failure();
420 op_cleanup_early();
421 }
422
423 void ShenandoahConcurrentGC::entry_evacuate() {
424 ShenandoahHeap* const heap = ShenandoahHeap::heap();
425 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
426
427 static const char* msg = "Concurrent evacuation";
428 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_evac);
429 EventMark em("%s", msg);
430
431 ShenandoahWorkerScope scope(heap->workers(),
432 ShenandoahWorkerPolicy::calc_workers_for_conc_evac(),
433 "concurrent evacuation");
434
435 heap->try_inject_alloc_failure();
436 op_evacuate();
437 }
438
439 void ShenandoahConcurrentGC::entry_update_thread_roots() {
440 ShenandoahHeap* const heap = ShenandoahHeap::heap();
441 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
442
443 static const char* msg = "Concurrent update thread roots";
444 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_update_thread_roots);
445 EventMark em("%s", msg);
446
447 // No workers used in this phase, no setup required
448 heap->try_inject_alloc_failure();
449 op_update_thread_roots();
450 }
451
452 void ShenandoahConcurrentGC::entry_updaterefs() {
453 ShenandoahHeap* const heap = ShenandoahHeap::heap();
454 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
455 static const char* msg = "Concurrent update references";
456 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_update_refs);
457 EventMark em("%s", msg);
458
459 ShenandoahWorkerScope scope(heap->workers(),
460 ShenandoahWorkerPolicy::calc_workers_for_conc_update_ref(),
461 "concurrent reference update");
462
463 heap->try_inject_alloc_failure();
464 op_updaterefs();
465 }
466
467 void ShenandoahConcurrentGC::entry_cleanup_complete() {
468 ShenandoahHeap* const heap = ShenandoahHeap::heap();
469 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
470 static const char* msg = "Concurrent cleanup";
471 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_cleanup_complete, true /* log_heap_usage */);
472 EventMark em("%s", msg);
473
474 // This phase does not use workers, no need for setup
475 heap->try_inject_alloc_failure();
476 op_cleanup_complete();
477 }
478
479 void ShenandoahConcurrentGC::op_reset() {
480 ShenandoahHeap* const heap = ShenandoahHeap::heap();
481 if (ShenandoahPacing) {
482 heap->pacer()->setup_for_reset();
483 }
484
485 heap->prepare_gc();
486 }
487
488 class ShenandoahInitMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
489 private:
490 ShenandoahMarkingContext* const _ctx;
491 public:
492 ShenandoahInitMarkUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
493
494 void heap_region_do(ShenandoahHeapRegion* r) {
495 assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index());
496 if (r->is_active()) {
497 // Check if region needs updating its TAMS. We have updated it already during concurrent
498 // reset, so it is very likely we don't need to do another write here.
499 if (_ctx->top_at_mark_start(r) != r->top()) {
500 _ctx->capture_top_at_mark_start(r);
501 }
502 } else {
503 assert(_ctx->top_at_mark_start(r) == r->top(),
504 "Region " SIZE_FORMAT " should already have correct TAMS", r->index());
505 }
506 }
507
508 bool is_thread_safe() { return true; }
509 };
510
511 void ShenandoahConcurrentGC::start_mark() {
512 _mark.start_mark();
513 }
514
515 void ShenandoahConcurrentGC::op_init_mark() {
516 ShenandoahHeap* const heap = ShenandoahHeap::heap();
517 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
518 assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
519
520 assert(heap->marking_context()->is_bitmap_clear(), "need clear marking bitmap");
521 assert(!heap->marking_context()->is_complete(), "should not be complete");
522 assert(!heap->has_forwarded_objects(), "No forwarded objects on this path");
523
524 if (ShenandoahVerify) {
525 heap->verifier()->verify_before_concmark();
526 }
527
528 if (VerifyBeforeGC) {
529 Universe::verify();
530 }
531
532 heap->set_concurrent_mark_in_progress(true);
533
534 start_mark();
535
536 {
537 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states);
538 ShenandoahInitMarkUpdateRegionStateClosure cl;
539 heap->parallel_heap_region_iterate(&cl);
540 }
541
542 // Weak reference processing
543 ShenandoahReferenceProcessor* rp = heap->ref_processor();
544 rp->reset_thread_locals();
545 rp->set_soft_reference_policy(heap->soft_ref_policy()->should_clear_all_soft_refs());
546
547 // Make above changes visible to worker threads
548 OrderAccess::fence();
549
550 // Arm nmethods for concurrent mark
551 ShenandoahCodeRoots::arm_nmethods_for_mark();
552
553 ShenandoahStackWatermark::change_epoch_id();
554 if (ShenandoahPacing) {
555 heap->pacer()->setup_for_mark();
556 }
557 }
558
559 void ShenandoahConcurrentGC::op_mark_roots() {
560 _mark.mark_concurrent_roots();
561 }
562
563 void ShenandoahConcurrentGC::op_mark() {
564 _mark.concurrent_mark();
565 }
566
567 void ShenandoahConcurrentGC::op_final_mark() {
568 ShenandoahHeap* const heap = ShenandoahHeap::heap();
569 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
570 assert(!heap->has_forwarded_objects(), "No forwarded objects on this path");
571
572 if (ShenandoahVerify) {
573 heap->verifier()->verify_roots_no_forwarded();
574 }
575
576 if (!heap->cancelled_gc()) {
577 _mark.finish_mark();
578 assert(!heap->cancelled_gc(), "STW mark cannot OOM");
579
580 // Notify JVMTI that the tagmap table will need cleaning.
581 JvmtiTagMap::set_needs_cleaning();
582
583 heap->prepare_regions_and_collection_set(true /*concurrent*/);
584
585 // Has to be done after cset selection
586 heap->prepare_concurrent_roots();
587
588 if (!heap->collection_set()->is_empty()) {
589 if (ShenandoahVerify) {
590 heap->verifier()->verify_before_evacuation();
591 }
592
593 heap->set_evacuation_in_progress(true);
594 // From here on, we need to update references.
595 heap->set_has_forwarded_objects(true);
596
597 // Verify before arming for concurrent processing.
598 // Otherwise, verification can trigger stack processing.
599 if (ShenandoahVerify) {
600 heap->verifier()->verify_during_evacuation();
601 }
602
603 // Arm nmethods/stack for concurrent processing
604 ShenandoahCodeRoots::arm_nmethods_for_evac();
605 ShenandoahStackWatermark::change_epoch_id();
606
607 if (ShenandoahPacing) {
608 heap->pacer()->setup_for_evac();
609 }
610 } else {
611 if (ShenandoahVerify) {
612 heap->verifier()->verify_after_concmark();
613 }
614
615 if (VerifyAfterGC) {
616 Universe::verify();
617 }
618 }
619 }
620 }
621
622 class ShenandoahConcurrentEvacThreadClosure : public ThreadClosure {
623 private:
624 OopClosure* const _oops;
625
626 public:
627 ShenandoahConcurrentEvacThreadClosure(OopClosure* oops);
628 void do_thread(Thread* thread);
629 };
630
631 ShenandoahConcurrentEvacThreadClosure::ShenandoahConcurrentEvacThreadClosure(OopClosure* oops) :
632 _oops(oops) {
633 }
634
635 void ShenandoahConcurrentEvacThreadClosure::do_thread(Thread* thread) {
636 JavaThread* const jt = JavaThread::cast(thread);
637 StackWatermarkSet::finish_processing(jt, _oops, StackWatermarkKind::gc);
638 }
639
640 class ShenandoahConcurrentEvacUpdateThreadTask : public WorkerTask {
641 private:
642 ShenandoahJavaThreadsIterator _java_threads;
643
644 public:
645 ShenandoahConcurrentEvacUpdateThreadTask(uint n_workers) :
646 WorkerTask("Shenandoah Evacuate/Update Concurrent Thread Roots"),
647 _java_threads(ShenandoahPhaseTimings::conc_thread_roots, n_workers) {
648 }
649
650 void work(uint worker_id) {
651 // ShenandoahEvacOOMScope has to be setup by ShenandoahContextEvacuateUpdateRootsClosure.
652 // Otherwise, may deadlock with watermark lock
653 ShenandoahContextEvacuateUpdateRootsClosure oops_cl;
654 ShenandoahConcurrentEvacThreadClosure thr_cl(&oops_cl);
655 _java_threads.threads_do(&thr_cl, worker_id);
656 }
657 };
658
659 void ShenandoahConcurrentGC::op_thread_roots() {
660 ShenandoahHeap* const heap = ShenandoahHeap::heap();
661 assert(heap->is_evacuation_in_progress(), "Checked by caller");
662 ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_thread_roots);
663 ShenandoahConcurrentEvacUpdateThreadTask task(heap->workers()->active_workers());
664 heap->workers()->run_task(&task);
665 }
666
667 void ShenandoahConcurrentGC::op_weak_refs() {
668 ShenandoahHeap* const heap = ShenandoahHeap::heap();
669 assert(heap->is_concurrent_weak_root_in_progress(), "Only during this phase");
670 // Concurrent weak refs processing
671 ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_refs);
672 if (heap->gc_cause() == GCCause::_wb_breakpoint) {
673 ShenandoahBreakpoint::at_after_reference_processing_started();
674 }
675 heap->ref_processor()->process_references(ShenandoahPhaseTimings::conc_weak_refs, heap->workers(), true /* concurrent */);
676 }
677
678 class ShenandoahEvacUpdateCleanupOopStorageRootsClosure : public BasicOopIterateClosure {
679 private:
680 ShenandoahHeap* const _heap;
681 ShenandoahMarkingContext* const _mark_context;
682 bool _evac_in_progress;
683 Thread* const _thread;
684
685 public:
686 ShenandoahEvacUpdateCleanupOopStorageRootsClosure();
687 void do_oop(oop* p);
688 void do_oop(narrowOop* p);
689 };
690
691 ShenandoahEvacUpdateCleanupOopStorageRootsClosure::ShenandoahEvacUpdateCleanupOopStorageRootsClosure() :
692 _heap(ShenandoahHeap::heap()),
693 _mark_context(ShenandoahHeap::heap()->marking_context()),
694 _evac_in_progress(ShenandoahHeap::heap()->is_evacuation_in_progress()),
695 _thread(Thread::current()) {
696 }
697
698 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(oop* p) {
699 const oop obj = RawAccess<>::oop_load(p);
700 if (!CompressedOops::is_null(obj)) {
701 if (!_mark_context->is_marked(obj)) {
702 // Note: The obj is dead here. Do not touch it, just clear.
703 ShenandoahHeap::atomic_clear_oop(p, obj);
704 } else if (_evac_in_progress && _heap->in_collection_set(obj)) {
705 oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
706 if (resolved == obj) {
707 resolved = _heap->evacuate_object(obj, _thread);
708 }
709 ShenandoahHeap::atomic_update_oop(resolved, p, obj);
710 assert(_heap->cancelled_gc() ||
711 _mark_context->is_marked(resolved) && !_heap->in_collection_set(resolved),
712 "Sanity");
713 }
714 }
715 }
716
717 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(narrowOop* p) {
718 ShouldNotReachHere();
719 }
720
721 class ShenandoahIsCLDAliveClosure : public CLDClosure {
722 public:
723 void do_cld(ClassLoaderData* cld) {
724 cld->is_alive();
725 }
726 };
727
728 class ShenandoahIsNMethodAliveClosure: public NMethodClosure {
729 public:
730 void do_nmethod(nmethod* n) {
731 n->is_unloading();
732 }
752 _nmethod_itr(ShenandoahCodeRoots::table()),
753 _phase(phase) {}
754
755 ~ShenandoahConcurrentWeakRootsEvacUpdateTask() {
756 // Notify runtime data structures of potentially dead oops
757 _vm_roots.report_num_dead();
758 }
759
760 void work(uint worker_id) {
761 ShenandoahConcurrentWorkerSession worker_session(worker_id);
762 ShenandoahSuspendibleThreadSetJoiner sts_join;
763 {
764 ShenandoahEvacOOMScope oom;
765 // jni_roots and weak_roots are OopStorage backed roots, concurrent iteration
766 // may race against OopStorage::release() calls.
767 ShenandoahEvacUpdateCleanupOopStorageRootsClosure cl;
768 _vm_roots.oops_do(&cl, worker_id);
769 }
770
771 // If we are going to perform concurrent class unloading later on, we need to
772 // cleanup the weak oops in CLD and determinate nmethod's unloading state, so that we
773 // can cleanup immediate garbage sooner.
774 if (ShenandoahHeap::heap()->unload_classes()) {
775 // Applies ShenandoahIsCLDAlive closure to CLDs, native barrier will either null the
776 // CLD's holder or evacuate it.
777 {
778 ShenandoahIsCLDAliveClosure is_cld_alive;
779 _cld_roots.cld_do(&is_cld_alive, worker_id);
780 }
781
782 // Applies ShenandoahIsNMethodAliveClosure to registered nmethods.
783 // The closure calls nmethod->is_unloading(). The is_unloading
784 // state is cached, therefore, during concurrent class unloading phase,
785 // we will not touch the metadata of unloading nmethods
786 {
787 ShenandoahWorkerTimingsTracker timer(_phase, ShenandoahPhaseTimings::CodeCacheRoots, worker_id);
788 ShenandoahIsNMethodAliveClosure is_nmethod_alive;
789 _nmethod_itr.nmethods_do(&is_nmethod_alive);
790 }
791 }
792 }
793 };
794
795 void ShenandoahConcurrentGC::op_weak_roots() {
796 ShenandoahHeap* const heap = ShenandoahHeap::heap();
797 assert(heap->is_concurrent_weak_root_in_progress(), "Only during this phase");
798 // Concurrent weak root processing
799 {
800 ShenandoahTimingsTracker t(ShenandoahPhaseTimings::conc_weak_roots_work);
801 ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_roots_work);
802 ShenandoahConcurrentWeakRootsEvacUpdateTask task(ShenandoahPhaseTimings::conc_weak_roots_work);
803 heap->workers()->run_task(&task);
804 }
805
806 // Perform handshake to flush out dead oops
807 {
808 ShenandoahTimingsTracker t(ShenandoahPhaseTimings::conc_weak_roots_rendezvous);
809 heap->rendezvous_threads("Shenandoah Concurrent Weak Roots");
810 }
811 }
812
813 void ShenandoahConcurrentGC::op_class_unloading() {
814 ShenandoahHeap* const heap = ShenandoahHeap::heap();
815 assert (heap->is_concurrent_weak_root_in_progress() &&
816 heap->unload_classes(),
817 "Checked by caller");
818 heap->do_class_unloading();
819 }
820
821 class ShenandoahEvacUpdateCodeCacheClosure : public NMethodClosure {
822 private:
823 BarrierSetNMethod* const _bs;
824 ShenandoahEvacuateUpdateMetadataClosure _cl;
825
826 public:
827 ShenandoahEvacUpdateCodeCacheClosure() :
875 }
876
877 // Cannot setup ShenandoahEvacOOMScope here, due to potential deadlock with nmethod_entry_barrier.
878 if (!ShenandoahHeap::heap()->unload_classes()) {
879 ShenandoahWorkerTimingsTracker timer(_phase, ShenandoahPhaseTimings::CodeCacheRoots, worker_id);
880 ShenandoahEvacUpdateCodeCacheClosure cl;
881 _nmethod_itr.nmethods_do(&cl);
882 }
883 }
884 };
885
886 void ShenandoahConcurrentGC::op_strong_roots() {
887 ShenandoahHeap* const heap = ShenandoahHeap::heap();
888 assert(heap->is_concurrent_strong_root_in_progress(), "Checked by caller");
889 ShenandoahConcurrentRootsEvacUpdateTask task(ShenandoahPhaseTimings::conc_strong_roots);
890 heap->workers()->run_task(&task);
891 heap->set_concurrent_strong_root_in_progress(false);
892 }
893
894 void ShenandoahConcurrentGC::op_cleanup_early() {
895 ShenandoahHeap::heap()->free_set()->recycle_trash();
896 }
897
898 void ShenandoahConcurrentGC::op_evacuate() {
899 ShenandoahHeap::heap()->evacuate_collection_set(true /*concurrent*/);
900 }
901
902 void ShenandoahConcurrentGC::op_init_updaterefs() {
903 ShenandoahHeap* const heap = ShenandoahHeap::heap();
904 heap->set_evacuation_in_progress(false);
905 heap->set_concurrent_weak_root_in_progress(false);
906 heap->prepare_update_heap_references(true /*concurrent*/);
907 heap->set_update_refs_in_progress(true);
908
909 if (ShenandoahPacing) {
910 heap->pacer()->setup_for_updaterefs();
911 }
912 }
913
914 void ShenandoahConcurrentGC::op_updaterefs() {
915 ShenandoahHeap::heap()->update_heap_references(true /*concurrent*/);
916 }
917
918 class ShenandoahUpdateThreadClosure : public HandshakeClosure {
919 private:
920 ShenandoahUpdateRefsClosure _cl;
921 public:
922 ShenandoahUpdateThreadClosure();
923 void do_thread(Thread* thread);
924 };
925
926 ShenandoahUpdateThreadClosure::ShenandoahUpdateThreadClosure() :
927 HandshakeClosure("Shenandoah Update Thread Roots") {
928 }
929
930 void ShenandoahUpdateThreadClosure::do_thread(Thread* thread) {
931 if (thread->is_Java_thread()) {
932 JavaThread* jt = JavaThread::cast(thread);
933 ResourceMark rm;
934 jt->oops_do(&_cl, nullptr);
935 }
936 }
937
938 void ShenandoahConcurrentGC::op_update_thread_roots() {
939 ShenandoahUpdateThreadClosure cl;
940 Handshake::execute(&cl);
941 }
942
943 void ShenandoahConcurrentGC::op_final_updaterefs() {
944 ShenandoahHeap* const heap = ShenandoahHeap::heap();
945 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
946 assert(!heap->_update_refs_iterator.has_next(), "Should have finished update references");
947
948 heap->finish_concurrent_roots();
949
950 // Clear cancelled GC, if set. On cancellation path, the block before would handle
951 // everything.
952 if (heap->cancelled_gc()) {
953 heap->clear_cancelled_gc();
954 }
955
956 // Has to be done before cset is clear
957 if (ShenandoahVerify) {
958 heap->verifier()->verify_roots_in_to_space();
959 }
960
961 heap->update_heap_region_states(true /*concurrent*/);
962
963 heap->set_update_refs_in_progress(false);
964 heap->set_has_forwarded_objects(false);
965
966 if (ShenandoahVerify) {
967 heap->verifier()->verify_after_updaterefs();
968 }
969
970 if (VerifyAfterGC) {
971 Universe::verify();
972 }
973
974 heap->rebuild_free_set(true /*concurrent*/);
975 }
976
977 void ShenandoahConcurrentGC::op_final_roots() {
978 ShenandoahHeap::heap()->set_concurrent_weak_root_in_progress(false);
979 }
980
981 void ShenandoahConcurrentGC::op_cleanup_complete() {
982 ShenandoahHeap::heap()->free_set()->recycle_trash();
983 }
984
985 bool ShenandoahConcurrentGC::check_cancellation_and_abort(ShenandoahDegenPoint point) {
986 if (ShenandoahHeap::heap()->cancelled_gc()) {
987 _degen_point = point;
988 return true;
989 }
990 return false;
991 }
992
993 const char* ShenandoahConcurrentGC::init_mark_event_message() const {
994 ShenandoahHeap* const heap = ShenandoahHeap::heap();
995 assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here");
996 if (heap->unload_classes()) {
997 return "Pause Init Mark (unload classes)";
998 } else {
999 return "Pause Init Mark";
1000 }
1001 }
1002
1003 const char* ShenandoahConcurrentGC::final_mark_event_message() const {
1004 ShenandoahHeap* const heap = ShenandoahHeap::heap();
1005 assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here");
1006 if (heap->unload_classes()) {
1007 return "Pause Final Mark (unload classes)";
1008 } else {
1009 return "Pause Final Mark";
1010 }
1011 }
1012
1013 const char* ShenandoahConcurrentGC::conc_mark_event_message() const {
1014 ShenandoahHeap* const heap = ShenandoahHeap::heap();
1015 assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here");
1016 if (heap->unload_classes()) {
1017 return "Concurrent marking (unload classes)";
1018 } else {
1019 return "Concurrent marking";
1020 }
1021 }
|
1 /*
2 * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2021, 2022, Red Hat, Inc. All rights reserved.
4 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
5 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 *
7 * This code is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 only, as
9 * published by the Free Software Foundation.
10 *
11 * This code is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * version 2 for more details (a copy is included in the LICENSE file that
15 * accompanied this code).
16 *
17 * You should have received a copy of the GNU General Public License version
18 * 2 along with this work; if not, write to the Free Software Foundation,
19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20 *
21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
22 * or visit www.oracle.com if you need additional information or have any
23 * questions.
24 *
25 */
26
27 #include "precompiled.hpp"
28
29 #include "gc/shared/barrierSetNMethod.hpp"
30 #include "gc/shared/collectorCounters.hpp"
31 #include "gc/shared/continuationGCSupport.inline.hpp"
32 #include "gc/shenandoah/shenandoahBreakpoint.hpp"
33 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
34 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
35 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
36 #include "gc/shenandoah/shenandoahFreeSet.hpp"
37 #include "gc/shenandoah/shenandoahGeneration.hpp"
38 #include "gc/shenandoah/shenandoahGenerationalHeap.hpp"
39 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
40 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
41 #include "gc/shenandoah/shenandoahLock.hpp"
42 #include "gc/shenandoah/shenandoahMark.inline.hpp"
43 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
44 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
45 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
46 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
47 #include "gc/shenandoah/shenandoahStackWatermark.hpp"
48 #include "gc/shenandoah/shenandoahUtils.hpp"
49 #include "gc/shenandoah/shenandoahVerifier.hpp"
50 #include "gc/shenandoah/shenandoahVMOperations.hpp"
51 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
52 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
53 #include "memory/allocation.hpp"
54 #include "prims/jvmtiTagMap.hpp"
55 #include "runtime/vmThread.hpp"
56 #include "utilities/events.hpp"
57
58 // Breakpoint support
59 class ShenandoahBreakpointGCScope : public StackObj {
60 private:
61 const GCCause::Cause _cause;
62 public:
63 ShenandoahBreakpointGCScope(GCCause::Cause cause) : _cause(cause) {
74 }
75 };
76
77 class ShenandoahBreakpointMarkScope : public StackObj {
78 private:
79 const GCCause::Cause _cause;
80 public:
81 ShenandoahBreakpointMarkScope(GCCause::Cause cause) : _cause(cause) {
82 if (_cause == GCCause::_wb_breakpoint) {
83 ShenandoahBreakpoint::at_after_marking_started();
84 }
85 }
86
87 ~ShenandoahBreakpointMarkScope() {
88 if (_cause == GCCause::_wb_breakpoint) {
89 ShenandoahBreakpoint::at_before_marking_completed();
90 }
91 }
92 };
93
94 ShenandoahConcurrentGC::ShenandoahConcurrentGC(ShenandoahGeneration* generation, bool do_old_gc_bootstrap) :
95 _mark(generation),
96 _generation(generation),
97 _degen_point(ShenandoahDegenPoint::_degenerated_unset),
98 _abbreviated(false),
99 _do_old_gc_bootstrap(do_old_gc_bootstrap) {
100 }
101
102 ShenandoahGC::ShenandoahDegenPoint ShenandoahConcurrentGC::degen_point() const {
103 return _degen_point;
104 }
105
106 void ShenandoahConcurrentGC::entry_concurrent_update_refs_prepare(ShenandoahHeap* const heap) {
107 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
108 const char* msg = conc_init_update_refs_event_message();
109 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_update_refs_prepare);
110 EventMark em("%s", msg);
111
112 // Evacuation is complete, retire gc labs and change gc state
113 heap->concurrent_prepare_for_update_refs();
114 }
115
116 bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) {
117 ShenandoahHeap* const heap = ShenandoahHeap::heap();
118
119 ShenandoahBreakpointGCScope breakpoint_gc_scope(cause);
120
121 // Reset for upcoming marking
122 entry_reset();
123
124 // Start initial mark under STW
125 vmop_entry_init_mark();
126
127 {
128 ShenandoahBreakpointMarkScope breakpoint_mark_scope(cause);
129
130 // Reset task queue stats here, rather than in mark_concurrent_roots,
131 // because remembered set scan will `push` oops into the queues and
132 // resetting after this happens will lose those counts.
133 TASKQUEUE_STATS_ONLY(_mark.task_queues()->reset_taskqueue_stats());
134
135 // Concurrent remembered set scanning
136 entry_scan_remembered_set();
137
138 // Concurrent mark roots
139 entry_mark_roots();
140 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_roots)) {
141 return false;
142 }
143
144 // Continue concurrent mark
145 entry_mark();
146 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_mark)) {
147 return false;
148 }
149 }
150
151 // Complete marking under STW, and start evacuation
152 vmop_entry_final_mark();
153
154 // If the GC was cancelled before final mark, nothing happens on the safepoint. We are still
155 // in the marking phase and must resume the degenerated cycle from there. If the GC was cancelled
156 // after final mark, then we've entered the evacuation phase and must resume the degenerated cycle
157 // from that phase.
158 if (_generation->is_concurrent_mark_in_progress()) {
159 bool cancelled = check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_mark);
160 assert(cancelled, "GC must have been cancelled between concurrent and final mark");
161 return false;
162 }
163
164 assert(heap->is_concurrent_weak_root_in_progress(), "Must be doing weak roots now");
165
166 // Concurrent stack processing
167 if (heap->is_evacuation_in_progress()) {
168 entry_thread_roots();
169 }
170
171 // Process weak roots that might still point to regions that would be broken by cleanup.
172 // We cannot recycle regions because weak roots need to know what is marked in trashed regions.
173 entry_weak_refs();
174 entry_weak_roots();
175
176 // Perform concurrent class unloading before any regions get recycled. Class unloading may
177 // need to inspect unmarked objects in trashed regions.
178 if (heap->unload_classes()) {
179 entry_class_unloading();
180 }
181
182 // Final mark might have reclaimed some immediate garbage, kick cleanup to reclaim
183 // the space. This would be the last action if there is nothing to evacuate. Note that
184 // we will not age young-gen objects in the case that we skip evacuation.
185 entry_cleanup_early();
186
187 heap->free_set()->log_status_under_lock();
188
189 // Processing strong roots
190 // This may be skipped if there is nothing to update/evacuate.
191 // If so, strong_root_in_progress would be unset.
192 if (heap->is_concurrent_strong_root_in_progress()) {
193 entry_strong_roots();
194 }
195
196 // Continue the cycle with evacuation and optional update-refs.
197 // This may be skipped if there is nothing to evacuate.
198 // If so, evac_in_progress would be unset by collection set preparation code.
199 if (heap->is_evacuation_in_progress()) {
200 // Concurrently evacuate
201 entry_evacuate();
202 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_evac)) {
203 return false;
204 }
205
206 entry_concurrent_update_refs_prepare(heap);
207
208 // Perform update-refs phase.
209 if (ShenandoahVerify || ShenandoahPacing) {
210 vmop_entry_init_update_refs();
211 }
212
213 entry_update_refs();
214 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_update_refs)) {
215 return false;
216 }
217
218 // Concurrent update thread roots
219 entry_update_thread_roots();
220 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_update_refs)) {
221 return false;
222 }
223
224 vmop_entry_final_update_refs();
225
226 // Update references freed up collection set, kick the cleanup to reclaim the space.
227 entry_cleanup_complete();
228 } else {
229 if (!entry_final_roots()) {
230 assert(_degen_point != _degenerated_unset, "Need to know where to start degenerated cycle");
231 return false;
232 }
233
234 if (VerifyAfterGC) {
235 vmop_entry_verify_final_roots();
236 }
237 _abbreviated = true;
238 }
239
240 // We defer generation resizing actions until after cset regions have been recycled. We do this even following an
241 // abbreviated cycle.
242 if (heap->mode()->is_generational()) {
243 ShenandoahGenerationalHeap::heap()->complete_concurrent_cycle();
244 }
245
246 // Instead of always resetting immediately before the start of a new GC, we can often reset at the end of the
247 // previous GC. This allows us to start the next GC cycle more quickly after a trigger condition is detected,
248 // reducing the likelihood that GC will degenerate.
249 entry_reset_after_collect();
250
251 return true;
252 }
253
254 bool ShenandoahConcurrentGC::complete_abbreviated_cycle() {
255 shenandoah_assert_generational();
256
257 ShenandoahGenerationalHeap* const heap = ShenandoahGenerationalHeap::heap();
258
259 // We chose not to evacuate because we found sufficient immediate garbage.
260 // However, there may still be regions to promote in place, so do that now.
261 if (heap->old_generation()->has_in_place_promotions()) {
262 entry_promote_in_place();
263
264 // If the promote-in-place operation was cancelled, we can have the degenerated
265 // cycle complete the operation. It will see that no evacuations are in progress,
266 // and that there are regions wanting promotion. The risk with not handling the
267 // cancellation would be failing to restore top for these regions and leaving
268 // them unable to serve allocations for the old generation.This will leave the weak
269 // roots flag set (the degenerated cycle will unset it).
270 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_evac)) {
271 return false;
272 }
273 }
274
275 // At this point, the cycle is effectively complete. If the cycle has been cancelled here,
276 // the control thread will detect it on its next iteration and run a degenerated young cycle.
277 if (!_generation->is_old()) {
278 heap->update_region_ages(_generation->complete_marking_context());
279 }
280
281 if (!heap->is_concurrent_old_mark_in_progress()) {
282 heap->concurrent_final_roots();
283 } else {
284 // Since the cycle was shortened for having enough immediate garbage, this will be
285 // the last phase before concurrent marking of old resumes. We must be sure
286 // that old mark threads don't see any pointers to garbage in the SATB queues. Even
287 // though nothing was evacuated, overwriting unreachable weak roots with null may still
288 // put pointers to regions that become trash in the SATB queues. The following will
289 // piggyback flushing the thread local SATB queues on the same handshake that propagates
290 // the gc state change.
291 ShenandoahSATBMarkQueueSet& satb_queues = ShenandoahBarrierSet::satb_mark_queue_set();
292 ShenandoahFlushSATBHandshakeClosure complete_thread_local_satb_buffers(satb_queues);
293 heap->concurrent_final_roots(&complete_thread_local_satb_buffers);
294 heap->old_generation()->concurrent_transfer_pointers_from_satb();
295 }
296 return true;
297 }
298
299
300 void ShenandoahConcurrentGC::vmop_entry_init_mark() {
301 ShenandoahHeap* const heap = ShenandoahHeap::heap();
302 TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
303 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_mark_gross);
304
305 heap->try_inject_alloc_failure();
306 VM_ShenandoahInitMark op(this);
307 VMThread::execute(&op); // jump to entry_init_mark() under safepoint
308 }
309
310 void ShenandoahConcurrentGC::vmop_entry_final_mark() {
311 ShenandoahHeap* const heap = ShenandoahHeap::heap();
312 TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
313 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_mark_gross);
314
315 heap->try_inject_alloc_failure();
316 VM_ShenandoahFinalMarkStartEvac op(this);
317 VMThread::execute(&op); // jump to entry_final_mark under safepoint
318 }
319
320 void ShenandoahConcurrentGC::vmop_entry_init_update_refs() {
321 ShenandoahHeap* const heap = ShenandoahHeap::heap();
322 TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
323 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_update_refs_gross);
324
325 heap->try_inject_alloc_failure();
326 VM_ShenandoahInitUpdateRefs op(this);
327 VMThread::execute(&op);
328 }
329
330 void ShenandoahConcurrentGC::vmop_entry_final_update_refs() {
331 ShenandoahHeap* const heap = ShenandoahHeap::heap();
332 TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
333 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_update_refs_gross);
334
335 heap->try_inject_alloc_failure();
336 VM_ShenandoahFinalUpdateRefs op(this);
337 VMThread::execute(&op);
338 }
339
340 void ShenandoahConcurrentGC::vmop_entry_verify_final_roots() {
341 ShenandoahHeap* const heap = ShenandoahHeap::heap();
342 TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
343 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_roots_gross);
344
345 // This phase does not use workers, no need for setup
346 heap->try_inject_alloc_failure();
347 VM_ShenandoahFinalRoots op(this);
348 VMThread::execute(&op);
349 }
350
351 void ShenandoahConcurrentGC::entry_init_mark() {
352 const char* msg = init_mark_event_message();
353 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::init_mark);
354 EventMark em("%s", msg);
355
356 ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
357 ShenandoahWorkerPolicy::calc_workers_for_init_marking(),
358 "init marking");
359
360 op_init_mark();
361 }
362
363 void ShenandoahConcurrentGC::entry_final_mark() {
364 const char* msg = final_mark_event_message();
365 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_mark);
366 EventMark em("%s", msg);
367
368 ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
369 ShenandoahWorkerPolicy::calc_workers_for_final_marking(),
370 "final marking");
371
372 op_final_mark();
373 }
374
375 void ShenandoahConcurrentGC::entry_init_update_refs() {
376 static const char* msg = "Pause Init Update Refs";
377 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::init_update_refs);
378 EventMark em("%s", msg);
379
380 // No workers used in this phase, no setup required
381 op_init_update_refs();
382 }
383
384 void ShenandoahConcurrentGC::entry_final_update_refs() {
385 static const char* msg = "Pause Final Update Refs";
386 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_update_refs);
387 EventMark em("%s", msg);
388
389 ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
390 ShenandoahWorkerPolicy::calc_workers_for_final_update_ref(),
391 "final reference update");
392
393 op_final_update_refs();
394 }
395
396 void ShenandoahConcurrentGC::entry_verify_final_roots() {
397 const char* msg = verify_final_roots_event_message();
398 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_roots);
399 EventMark em("%s", msg);
400
401 op_verify_final_roots();
402 }
403
404 void ShenandoahConcurrentGC::entry_reset() {
405 ShenandoahHeap* const heap = ShenandoahHeap::heap();
406 heap->try_inject_alloc_failure();
407
408 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
409 {
410 const char* msg = conc_reset_event_message();
411 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_reset);
412 EventMark em("%s", msg);
413
414 ShenandoahWorkerScope scope(heap->workers(),
415 ShenandoahWorkerPolicy::calc_workers_for_conc_reset(),
416 msg);
417 op_reset();
418 }
419
420 if (heap->mode()->is_generational()) {
421 heap->old_generation()->card_scan()->mark_read_table_as_clean();
422 }
423 }
424
425 void ShenandoahConcurrentGC::entry_scan_remembered_set() {
426 if (_generation->is_young()) {
427 ShenandoahHeap* const heap = ShenandoahHeap::heap();
428 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
429 const char* msg = "Concurrent remembered set scanning";
430 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::init_scan_rset);
431 EventMark em("%s", msg);
432
433 ShenandoahWorkerScope scope(heap->workers(),
434 ShenandoahWorkerPolicy::calc_workers_for_rs_scanning(),
435 msg);
436
437 heap->try_inject_alloc_failure();
438 _generation->scan_remembered_set(true /* is_concurrent */);
439 }
440 }
441
442 void ShenandoahConcurrentGC::entry_mark_roots() {
443 ShenandoahHeap* const heap = ShenandoahHeap::heap();
444 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
445 const char* msg = "Concurrent marking roots";
446 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_mark_roots);
447 EventMark em("%s", msg);
448
449 ShenandoahWorkerScope scope(heap->workers(),
450 ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
451 "concurrent marking roots");
452
453 heap->try_inject_alloc_failure();
454 op_mark_roots();
455 }
456
457 void ShenandoahConcurrentGC::entry_mark() {
458 ShenandoahHeap* const heap = ShenandoahHeap::heap();
459 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
468 heap->try_inject_alloc_failure();
469 op_mark();
470 }
471
472 void ShenandoahConcurrentGC::entry_thread_roots() {
473 ShenandoahHeap* const heap = ShenandoahHeap::heap();
474 static const char* msg = "Concurrent thread roots";
475 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_thread_roots);
476 EventMark em("%s", msg);
477
478 ShenandoahWorkerScope scope(heap->workers(),
479 ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
480 msg);
481
482 heap->try_inject_alloc_failure();
483 op_thread_roots();
484 }
485
486 void ShenandoahConcurrentGC::entry_weak_refs() {
487 ShenandoahHeap* const heap = ShenandoahHeap::heap();
488 const char* msg = conc_weak_refs_event_message();
489 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_weak_refs);
490 EventMark em("%s", msg);
491
492 ShenandoahWorkerScope scope(heap->workers(),
493 ShenandoahWorkerPolicy::calc_workers_for_conc_refs_processing(),
494 "concurrent weak references");
495
496 heap->try_inject_alloc_failure();
497 op_weak_refs();
498 }
499
500 void ShenandoahConcurrentGC::entry_weak_roots() {
501 ShenandoahHeap* const heap = ShenandoahHeap::heap();
502 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
503 const char* msg = conc_weak_roots_event_message();
504 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_weak_roots);
505 EventMark em("%s", msg);
506
507 ShenandoahWorkerScope scope(heap->workers(),
508 ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
509 "concurrent weak root");
510
511 heap->try_inject_alloc_failure();
512 op_weak_roots();
513 }
514
515 void ShenandoahConcurrentGC::entry_class_unloading() {
516 ShenandoahHeap* const heap = ShenandoahHeap::heap();
517 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
518 static const char* msg = "Concurrent class unloading";
519 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_class_unload);
520 EventMark em("%s", msg);
521
522 ShenandoahWorkerScope scope(heap->workers(),
523 ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
530 void ShenandoahConcurrentGC::entry_strong_roots() {
531 ShenandoahHeap* const heap = ShenandoahHeap::heap();
532 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
533 static const char* msg = "Concurrent strong roots";
534 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_strong_roots);
535 EventMark em("%s", msg);
536
537 ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_strong_roots);
538
539 ShenandoahWorkerScope scope(heap->workers(),
540 ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
541 "concurrent strong root");
542
543 heap->try_inject_alloc_failure();
544 op_strong_roots();
545 }
546
547 void ShenandoahConcurrentGC::entry_cleanup_early() {
548 ShenandoahHeap* const heap = ShenandoahHeap::heap();
549 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
550 const char* msg = conc_cleanup_event_message();
551 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_cleanup_early, true /* log_heap_usage */);
552 EventMark em("%s", msg);
553
554 // This phase does not use workers, no need for setup
555 heap->try_inject_alloc_failure();
556 op_cleanup_early();
557 }
558
559 void ShenandoahConcurrentGC::entry_evacuate() {
560 ShenandoahHeap* const heap = ShenandoahHeap::heap();
561 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
562
563 static const char* msg = "Concurrent evacuation";
564 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_evac);
565 EventMark em("%s", msg);
566
567 ShenandoahWorkerScope scope(heap->workers(),
568 ShenandoahWorkerPolicy::calc_workers_for_conc_evac(),
569 "concurrent evacuation");
570
571 heap->try_inject_alloc_failure();
572 op_evacuate();
573 }
574
575 void ShenandoahConcurrentGC::entry_promote_in_place() const {
576 shenandoah_assert_generational();
577
578 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::promote_in_place);
579 ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::promote_in_place);
580 EventMark em("%s", "Promote in place");
581
582 ShenandoahGenerationalHeap::heap()->promote_regions_in_place(true);
583 }
584
585 void ShenandoahConcurrentGC::entry_update_thread_roots() {
586 ShenandoahHeap* const heap = ShenandoahHeap::heap();
587 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
588
589 static const char* msg = "Concurrent update thread roots";
590 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_update_thread_roots);
591 EventMark em("%s", msg);
592
593 // No workers used in this phase, no setup required
594 heap->try_inject_alloc_failure();
595 op_update_thread_roots();
596 }
597
598 void ShenandoahConcurrentGC::entry_update_refs() {
599 ShenandoahHeap* const heap = ShenandoahHeap::heap();
600 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
601 static const char* msg = "Concurrent update references";
602 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_update_refs);
603 EventMark em("%s", msg);
604
605 ShenandoahWorkerScope scope(heap->workers(),
606 ShenandoahWorkerPolicy::calc_workers_for_conc_update_ref(),
607 "concurrent reference update");
608
609 heap->try_inject_alloc_failure();
610 op_update_refs();
611 }
612
613 void ShenandoahConcurrentGC::entry_cleanup_complete() {
614 ShenandoahHeap* const heap = ShenandoahHeap::heap();
615 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
616 const char* msg = conc_cleanup_event_message();
617 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_cleanup_complete, true /* log_heap_usage */);
618 EventMark em("%s", msg);
619
620 // This phase does not use workers, no need for setup
621 heap->try_inject_alloc_failure();
622 op_cleanup_complete();
623 }
624
625 void ShenandoahConcurrentGC::entry_reset_after_collect() {
626 ShenandoahHeap* const heap = ShenandoahHeap::heap();
627 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
628 const char* msg = conc_reset_after_collect_event_message();
629 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_reset_after_collect);
630 EventMark em("%s", msg);
631
632 op_reset_after_collect();
633 }
634
635 void ShenandoahConcurrentGC::op_reset() {
636 ShenandoahHeap* const heap = ShenandoahHeap::heap();
637 if (ShenandoahPacing) {
638 heap->pacer()->setup_for_reset();
639 }
640 // If it is old GC bootstrap cycle, always clear bitmap for global gen
641 // to ensure bitmap for old gen is clear for old GC cycle after this.
642 if (_do_old_gc_bootstrap) {
643 assert(!heap->is_prepare_for_old_mark_in_progress(), "Cannot reset old without making it parsable");
644 heap->global_generation()->prepare_gc();
645 } else {
646 _generation->prepare_gc();
647 }
648 }
649
650 class ShenandoahInitMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
651 private:
652 ShenandoahMarkingContext* const _ctx;
653 public:
654 ShenandoahInitMarkUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
655
656 void heap_region_do(ShenandoahHeapRegion* r) {
657 assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index());
658 if (r->is_active()) {
659 // Check if region needs updating its TAMS. We have updated it already during concurrent
660 // reset, so it is very likely we don't need to do another write here. Since most regions
661 // are not "active", this path is relatively rare.
662 if (_ctx->top_at_mark_start(r) != r->top()) {
663 _ctx->capture_top_at_mark_start(r);
664 }
665 } else {
666 assert(_ctx->top_at_mark_start(r) == r->top(),
667 "Region " SIZE_FORMAT " should already have correct TAMS", r->index());
668 }
669 }
670
671 bool is_thread_safe() { return true; }
672 };
673
674 void ShenandoahConcurrentGC::start_mark() {
675 _mark.start_mark();
676 }
677
678 void ShenandoahConcurrentGC::op_init_mark() {
679 ShenandoahHeap* const heap = ShenandoahHeap::heap();
680 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
681 assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
682
683 assert(_generation->is_bitmap_clear(), "need clear marking bitmap");
684 assert(!_generation->is_mark_complete(), "should not be complete");
685 assert(!heap->has_forwarded_objects(), "No forwarded objects on this path");
686
687 if (heap->mode()->is_generational()) {
688
689 if (_generation->is_global()) {
690 heap->old_generation()->cancel_gc();
691 } else if (heap->is_concurrent_old_mark_in_progress()) {
692 // Purge the SATB buffers, transferring any valid, old pointers to the
693 // old generation mark queue. Any pointers in a young region will be
694 // abandoned.
695 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_transfer_satb);
696 heap->old_generation()->transfer_pointers_from_satb();
697 }
698 {
699 // After we swap card table below, the write-table is all clean, and the read table holds
700 // cards dirty prior to the start of GC. Young and bootstrap collection will update
701 // the write card table as a side effect of remembered set scanning. Global collection will
702 // update the card table as a side effect of global marking of old objects.
703 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_swap_rset);
704 _generation->swap_card_tables();
705 }
706 }
707
708 if (ShenandoahVerify) {
709 ShenandoahTimingsTracker v(ShenandoahPhaseTimings::init_mark_verify);
710 heap->verifier()->verify_before_concmark();
711 }
712
713 if (VerifyBeforeGC) {
714 Universe::verify();
715 }
716
717 _generation->set_concurrent_mark_in_progress(true);
718
719 start_mark();
720
721 if (_do_old_gc_bootstrap) {
722 shenandoah_assert_generational();
723 // Update region state for both young and old regions
724 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states);
725 ShenandoahInitMarkUpdateRegionStateClosure cl;
726 heap->parallel_heap_region_iterate(&cl);
727 heap->old_generation()->ref_processor()->reset_thread_locals();
728 } else {
729 // Update region state for only young regions
730 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states);
731 ShenandoahInitMarkUpdateRegionStateClosure cl;
732 _generation->parallel_heap_region_iterate(&cl);
733 }
734
735 // Weak reference processing
736 ShenandoahReferenceProcessor* rp = _generation->ref_processor();
737 rp->reset_thread_locals();
738 rp->set_soft_reference_policy(heap->soft_ref_policy()->should_clear_all_soft_refs());
739
740 // Make above changes visible to worker threads
741 OrderAccess::fence();
742
743 // Arm nmethods for concurrent mark
744 ShenandoahCodeRoots::arm_nmethods_for_mark();
745
746 ShenandoahStackWatermark::change_epoch_id();
747 if (ShenandoahPacing) {
748 heap->pacer()->setup_for_mark();
749 }
750
751 {
752 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_propagate_gc_state);
753 heap->propagate_gc_state_to_all_threads();
754 }
755 }
756
757 void ShenandoahConcurrentGC::op_mark_roots() {
758 _mark.mark_concurrent_roots();
759 }
760
761 void ShenandoahConcurrentGC::op_mark() {
762 _mark.concurrent_mark();
763 }
764
765 void ShenandoahConcurrentGC::op_final_mark() {
766 ShenandoahHeap* const heap = ShenandoahHeap::heap();
767 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
768 assert(!heap->has_forwarded_objects(), "No forwarded objects on this path");
769
770 if (ShenandoahVerify) {
771 heap->verifier()->verify_roots_no_forwarded();
772 }
773
774 if (!heap->cancelled_gc()) {
775 _mark.finish_mark();
776 assert(!heap->cancelled_gc(), "STW mark cannot OOM");
777
778 // Notify JVMTI that the tagmap table will need cleaning.
779 JvmtiTagMap::set_needs_cleaning();
780
781 // The collection set is chosen by prepare_regions_and_collection_set(). Additionally, certain parameters have been
782 // established to govern the evacuation efforts that are about to begin. Refer to comments on reserve members in
783 // ShenandoahGeneration and ShenandoahOldGeneration for more detail.
784 _generation->prepare_regions_and_collection_set(true /*concurrent*/);
785
786 // Has to be done after cset selection
787 heap->prepare_concurrent_roots();
788
789 if (!heap->collection_set()->is_empty()) {
790 LogTarget(Debug, gc, cset) lt;
791 if (lt.is_enabled()) {
792 ResourceMark rm;
793 LogStream ls(lt);
794 heap->collection_set()->print_on(&ls);
795 }
796
797 if (ShenandoahVerify) {
798 ShenandoahTimingsTracker v(ShenandoahPhaseTimings::final_mark_verify);
799 heap->verifier()->verify_before_evacuation();
800 }
801
802 heap->set_evacuation_in_progress(true);
803 // From here on, we need to update references.
804 heap->set_has_forwarded_objects(true);
805
806 // Arm nmethods/stack for concurrent processing
807 ShenandoahCodeRoots::arm_nmethods_for_evac();
808 ShenandoahStackWatermark::change_epoch_id();
809
810 if (ShenandoahPacing) {
811 heap->pacer()->setup_for_evac();
812 }
813 } else {
814 if (ShenandoahVerify) {
815 ShenandoahTimingsTracker v(ShenandoahPhaseTimings::final_mark_verify);
816 if (has_in_place_promotions(heap)) {
817 heap->verifier()->verify_after_concmark_with_promotions();
818 } else {
819 heap->verifier()->verify_after_concmark();
820 }
821 }
822 }
823 }
824
825 {
826 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_mark_propagate_gc_state);
827 heap->propagate_gc_state_to_all_threads();
828 }
829 }
830
831 bool ShenandoahConcurrentGC::has_in_place_promotions(ShenandoahHeap* heap) {
832 return heap->mode()->is_generational() && heap->old_generation()->has_in_place_promotions();
833 }
834
835 template<bool GENERATIONAL>
836 class ShenandoahConcurrentEvacThreadClosure : public ThreadClosure {
837 private:
838 OopClosure* const _oops;
839 public:
840 explicit ShenandoahConcurrentEvacThreadClosure(OopClosure* oops) : _oops(oops) {}
841
842 void do_thread(Thread* thread) override {
843 JavaThread* const jt = JavaThread::cast(thread);
844 StackWatermarkSet::finish_processing(jt, _oops, StackWatermarkKind::gc);
845 if (GENERATIONAL) {
846 ShenandoahThreadLocalData::enable_plab_promotions(thread);
847 }
848 }
849 };
850
851 template<bool GENERATIONAL>
852 class ShenandoahConcurrentEvacUpdateThreadTask : public WorkerTask {
853 private:
854 ShenandoahJavaThreadsIterator _java_threads;
855
856 public:
857 explicit ShenandoahConcurrentEvacUpdateThreadTask(uint n_workers) :
858 WorkerTask("Shenandoah Evacuate/Update Concurrent Thread Roots"),
859 _java_threads(ShenandoahPhaseTimings::conc_thread_roots, n_workers) {
860 }
861
862 void work(uint worker_id) override {
863 if (GENERATIONAL) {
864 Thread* worker_thread = Thread::current();
865 ShenandoahThreadLocalData::enable_plab_promotions(worker_thread);
866 }
867
868 // ShenandoahEvacOOMScope has to be setup by ShenandoahContextEvacuateUpdateRootsClosure.
869 // Otherwise, may deadlock with watermark lock
870 ShenandoahContextEvacuateUpdateRootsClosure oops_cl;
871 ShenandoahConcurrentEvacThreadClosure<GENERATIONAL> thr_cl(&oops_cl);
872 _java_threads.threads_do(&thr_cl, worker_id);
873 }
874 };
875
876 void ShenandoahConcurrentGC::op_thread_roots() {
877 ShenandoahHeap* const heap = ShenandoahHeap::heap();
878 assert(heap->is_evacuation_in_progress(), "Checked by caller");
879 ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_thread_roots);
880 if (heap->mode()->is_generational()) {
881 ShenandoahConcurrentEvacUpdateThreadTask<true> task(heap->workers()->active_workers());
882 heap->workers()->run_task(&task);
883 } else {
884 ShenandoahConcurrentEvacUpdateThreadTask<false> task(heap->workers()->active_workers());
885 heap->workers()->run_task(&task);
886 }
887 }
888
889 void ShenandoahConcurrentGC::op_weak_refs() {
890 ShenandoahHeap* const heap = ShenandoahHeap::heap();
891 assert(heap->is_concurrent_weak_root_in_progress(), "Only during this phase");
892 // Concurrent weak refs processing
893 ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_refs);
894 if (heap->gc_cause() == GCCause::_wb_breakpoint) {
895 ShenandoahBreakpoint::at_after_reference_processing_started();
896 }
897 _generation->ref_processor()->process_references(ShenandoahPhaseTimings::conc_weak_refs, heap->workers(), true /* concurrent */);
898 }
899
900 class ShenandoahEvacUpdateCleanupOopStorageRootsClosure : public BasicOopIterateClosure {
901 private:
902 ShenandoahHeap* const _heap;
903 ShenandoahMarkingContext* const _mark_context;
904 bool _evac_in_progress;
905 Thread* const _thread;
906
907 public:
908 ShenandoahEvacUpdateCleanupOopStorageRootsClosure();
909 void do_oop(oop* p);
910 void do_oop(narrowOop* p);
911 };
912
913 ShenandoahEvacUpdateCleanupOopStorageRootsClosure::ShenandoahEvacUpdateCleanupOopStorageRootsClosure() :
914 _heap(ShenandoahHeap::heap()),
915 _mark_context(ShenandoahHeap::heap()->marking_context()),
916 _evac_in_progress(ShenandoahHeap::heap()->is_evacuation_in_progress()),
917 _thread(Thread::current()) {
918 }
919
920 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(oop* p) {
921 const oop obj = RawAccess<>::oop_load(p);
922 if (!CompressedOops::is_null(obj)) {
923 if (!_mark_context->is_marked(obj)) {
924 shenandoah_assert_generations_reconciled();
925 if (_heap->is_in_active_generation(obj)) {
926 // Note: The obj is dead here. Do not touch it, just clear.
927 ShenandoahHeap::atomic_clear_oop(p, obj);
928 }
929 } else if (_evac_in_progress && _heap->in_collection_set(obj)) {
930 oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
931 if (resolved == obj) {
932 resolved = _heap->evacuate_object(obj, _thread);
933 }
934 shenandoah_assert_not_in_cset_except(p, resolved, _heap->cancelled_gc());
935 ShenandoahHeap::atomic_update_oop(resolved, p, obj);
936 }
937 }
938 }
939
940 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(narrowOop* p) {
941 ShouldNotReachHere();
942 }
943
944 class ShenandoahIsCLDAliveClosure : public CLDClosure {
945 public:
946 void do_cld(ClassLoaderData* cld) {
947 cld->is_alive();
948 }
949 };
950
951 class ShenandoahIsNMethodAliveClosure: public NMethodClosure {
952 public:
953 void do_nmethod(nmethod* n) {
954 n->is_unloading();
955 }
975 _nmethod_itr(ShenandoahCodeRoots::table()),
976 _phase(phase) {}
977
978 ~ShenandoahConcurrentWeakRootsEvacUpdateTask() {
979 // Notify runtime data structures of potentially dead oops
980 _vm_roots.report_num_dead();
981 }
982
983 void work(uint worker_id) {
984 ShenandoahConcurrentWorkerSession worker_session(worker_id);
985 ShenandoahSuspendibleThreadSetJoiner sts_join;
986 {
987 ShenandoahEvacOOMScope oom;
988 // jni_roots and weak_roots are OopStorage backed roots, concurrent iteration
989 // may race against OopStorage::release() calls.
990 ShenandoahEvacUpdateCleanupOopStorageRootsClosure cl;
991 _vm_roots.oops_do(&cl, worker_id);
992 }
993
994 // If we are going to perform concurrent class unloading later on, we need to
995 // clean up the weak oops in CLD and determine nmethod's unloading state, so that we
996 // can clean up immediate garbage sooner.
997 if (ShenandoahHeap::heap()->unload_classes()) {
998 // Applies ShenandoahIsCLDAlive closure to CLDs, native barrier will either null the
999 // CLD's holder or evacuate it.
1000 {
1001 ShenandoahIsCLDAliveClosure is_cld_alive;
1002 _cld_roots.cld_do(&is_cld_alive, worker_id);
1003 }
1004
1005 // Applies ShenandoahIsNMethodAliveClosure to registered nmethods.
1006 // The closure calls nmethod->is_unloading(). The is_unloading
1007 // state is cached, therefore, during concurrent class unloading phase,
1008 // we will not touch the metadata of unloading nmethods
1009 {
1010 ShenandoahWorkerTimingsTracker timer(_phase, ShenandoahPhaseTimings::CodeCacheRoots, worker_id);
1011 ShenandoahIsNMethodAliveClosure is_nmethod_alive;
1012 _nmethod_itr.nmethods_do(&is_nmethod_alive);
1013 }
1014 }
1015 }
1016 };
1017
1018 void ShenandoahConcurrentGC::op_weak_roots() {
1019 ShenandoahHeap* const heap = ShenandoahHeap::heap();
1020 assert(heap->is_concurrent_weak_root_in_progress(), "Only during this phase");
1021 {
1022 // Concurrent weak root processing
1023 ShenandoahTimingsTracker t(ShenandoahPhaseTimings::conc_weak_roots_work);
1024 ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_roots_work);
1025 ShenandoahConcurrentWeakRootsEvacUpdateTask task(ShenandoahPhaseTimings::conc_weak_roots_work);
1026 heap->workers()->run_task(&task);
1027 }
1028
1029 {
1030 // It is possible for mutators executing the load reference barrier to have
1031 // loaded an oop through a weak handle that has since been nulled out by
1032 // weak root processing. Handshaking here forces them to complete the
1033 // barrier before the GC cycle continues and does something that would
1034 // change the evaluation of the barrier (for example, resetting the TAMS
1035 // on trashed regions could make an oop appear to be marked _after_ the
1036 // region has been recycled).
1037 ShenandoahTimingsTracker t(ShenandoahPhaseTimings::conc_weak_roots_rendezvous);
1038 heap->rendezvous_threads("Shenandoah Concurrent Weak Roots");
1039 }
1040 }
1041
1042 void ShenandoahConcurrentGC::op_class_unloading() {
1043 ShenandoahHeap* const heap = ShenandoahHeap::heap();
1044 assert (heap->is_concurrent_weak_root_in_progress() &&
1045 heap->unload_classes(),
1046 "Checked by caller");
1047 heap->do_class_unloading();
1048 }
1049
1050 class ShenandoahEvacUpdateCodeCacheClosure : public NMethodClosure {
1051 private:
1052 BarrierSetNMethod* const _bs;
1053 ShenandoahEvacuateUpdateMetadataClosure _cl;
1054
1055 public:
1056 ShenandoahEvacUpdateCodeCacheClosure() :
1104 }
1105
1106 // Cannot setup ShenandoahEvacOOMScope here, due to potential deadlock with nmethod_entry_barrier.
1107 if (!ShenandoahHeap::heap()->unload_classes()) {
1108 ShenandoahWorkerTimingsTracker timer(_phase, ShenandoahPhaseTimings::CodeCacheRoots, worker_id);
1109 ShenandoahEvacUpdateCodeCacheClosure cl;
1110 _nmethod_itr.nmethods_do(&cl);
1111 }
1112 }
1113 };
1114
1115 void ShenandoahConcurrentGC::op_strong_roots() {
1116 ShenandoahHeap* const heap = ShenandoahHeap::heap();
1117 assert(heap->is_concurrent_strong_root_in_progress(), "Checked by caller");
1118 ShenandoahConcurrentRootsEvacUpdateTask task(ShenandoahPhaseTimings::conc_strong_roots);
1119 heap->workers()->run_task(&task);
1120 heap->set_concurrent_strong_root_in_progress(false);
1121 }
1122
1123 void ShenandoahConcurrentGC::op_cleanup_early() {
1124 ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
1125 ShenandoahWorkerPolicy::calc_workers_for_conc_cleanup(),
1126 "cleanup early.");
1127 ShenandoahHeap::heap()->recycle_trash();
1128 }
1129
1130 void ShenandoahConcurrentGC::op_evacuate() {
1131 ShenandoahHeap::heap()->evacuate_collection_set(true /*concurrent*/);
1132 }
1133
1134 void ShenandoahConcurrentGC::op_init_update_refs() {
1135 ShenandoahHeap* const heap = ShenandoahHeap::heap();
1136 if (ShenandoahVerify) {
1137 ShenandoahTimingsTracker v(ShenandoahPhaseTimings::init_update_refs_verify);
1138 heap->verifier()->verify_before_update_refs();
1139 }
1140 if (ShenandoahPacing) {
1141 heap->pacer()->setup_for_update_refs();
1142 }
1143 }
1144
1145 void ShenandoahConcurrentGC::op_update_refs() {
1146 ShenandoahHeap::heap()->update_heap_references(true /*concurrent*/);
1147 }
1148
1149 class ShenandoahUpdateThreadClosure : public HandshakeClosure {
1150 private:
1151 ShenandoahUpdateRefsClosure _cl;
1152 public:
1153 ShenandoahUpdateThreadClosure();
1154 void do_thread(Thread* thread);
1155 };
1156
1157 ShenandoahUpdateThreadClosure::ShenandoahUpdateThreadClosure() :
1158 HandshakeClosure("Shenandoah Update Thread Roots") {
1159 }
1160
1161 void ShenandoahUpdateThreadClosure::do_thread(Thread* thread) {
1162 if (thread->is_Java_thread()) {
1163 JavaThread* jt = JavaThread::cast(thread);
1164 ResourceMark rm;
1165 jt->oops_do(&_cl, nullptr);
1166 }
1167 }
1168
1169 void ShenandoahConcurrentGC::op_update_thread_roots() {
1170 ShenandoahUpdateThreadClosure cl;
1171 Handshake::execute(&cl);
1172 }
1173
1174 void ShenandoahConcurrentGC::op_final_update_refs() {
1175 ShenandoahHeap* const heap = ShenandoahHeap::heap();
1176 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
1177 assert(!heap->_update_refs_iterator.has_next(), "Should have finished update references");
1178
1179 heap->finish_concurrent_roots();
1180
1181 // Clear cancelled GC, if set. On cancellation path, the block before would handle
1182 // everything.
1183 if (heap->cancelled_gc()) {
1184 heap->clear_cancelled_gc(true /* clear oom handler */);
1185 }
1186
1187 // Has to be done before cset is clear
1188 if (ShenandoahVerify) {
1189 heap->verifier()->verify_roots_in_to_space();
1190 }
1191
1192 // If we are running in generational mode and this is an aging cycle, this will also age active
1193 // regions that haven't been used for allocation.
1194 heap->update_heap_region_states(true /*concurrent*/);
1195
1196 heap->set_update_refs_in_progress(false);
1197 heap->set_has_forwarded_objects(false);
1198
1199 if (heap->mode()->is_generational() && heap->is_concurrent_old_mark_in_progress()) {
1200 // When the SATB barrier is left on to support concurrent old gen mark, it may pick up writes to
1201 // objects in the collection set. After those objects are evacuated, the pointers in the
1202 // SATB are no longer safe. Once we have finished update references, we are guaranteed that
1203 // no more writes to the collection set are possible.
1204 //
1205 // This will transfer any old pointers in _active_ regions from the SATB to the old gen
1206 // mark queues. All other pointers will be discarded. This would also discard any pointers
1207 // in old regions that were included in a mixed evacuation. We aren't using the SATB filter
1208 // methods here because we cannot control when they execute. If the SATB filter runs _after_
1209 // a region has been recycled, we will not be able to detect the bad pointer.
1210 //
1211 // We are not concerned about skipping this step in abbreviated cycles because regions
1212 // with no live objects cannot have been written to and so cannot have entries in the SATB
1213 // buffers.
1214 heap->old_generation()->transfer_pointers_from_satb();
1215
1216 // Aging_cycle is only relevant during evacuation cycle for individual objects and during final mark for
1217 // entire regions. Both of these relevant operations occur before final update refs.
1218 ShenandoahGenerationalHeap::heap()->set_aging_cycle(false);
1219 }
1220
1221 if (ShenandoahVerify) {
1222 ShenandoahTimingsTracker v(ShenandoahPhaseTimings::final_update_refs_verify);
1223 heap->verifier()->verify_after_update_refs();
1224 }
1225
1226 if (VerifyAfterGC) {
1227 Universe::verify();
1228 }
1229
1230 heap->rebuild_free_set(true /*concurrent*/);
1231
1232 {
1233 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_update_refs_propagate_gc_state);
1234 heap->propagate_gc_state_to_all_threads();
1235 }
1236 }
1237
1238 bool ShenandoahConcurrentGC::entry_final_roots() {
1239 ShenandoahHeap* const heap = ShenandoahHeap::heap();
1240 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
1241
1242
1243 const char* msg = conc_final_roots_event_message();
1244 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_final_roots);
1245 EventMark em("%s", msg);
1246 ShenandoahWorkerScope scope(heap->workers(),
1247 ShenandoahWorkerPolicy::calc_workers_for_conc_evac(),
1248 msg);
1249
1250 if (!heap->mode()->is_generational()) {
1251 heap->concurrent_final_roots();
1252 } else {
1253 if (!complete_abbreviated_cycle()) {
1254 return false;
1255 }
1256 }
1257 return true;
1258 }
1259
1260 void ShenandoahConcurrentGC::op_verify_final_roots() {
1261 if (VerifyAfterGC) {
1262 Universe::verify();
1263 }
1264 }
1265
1266 void ShenandoahConcurrentGC::op_cleanup_complete() {
1267 ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
1268 ShenandoahWorkerPolicy::calc_workers_for_conc_cleanup(),
1269 "cleanup complete.");
1270 ShenandoahHeap::heap()->recycle_trash();
1271 }
1272
1273 void ShenandoahConcurrentGC::op_reset_after_collect() {
1274 ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
1275 ShenandoahWorkerPolicy::calc_workers_for_conc_reset(),
1276 "reset after collection.");
1277
1278 ShenandoahHeap* const heap = ShenandoahHeap::heap();
1279 if (heap->mode()->is_generational()) {
1280 // If we are in the midst of an old gc bootstrap or an old marking, we want to leave the mark bit map of
1281 // the young generation intact. In particular, reference processing in the old generation may potentially
1282 // need the reachability of a young generation referent of a Reference object in the old generation.
1283 if (!_do_old_gc_bootstrap && !heap->is_concurrent_old_mark_in_progress()) {
1284 heap->young_generation()->reset_mark_bitmap<false>();
1285 }
1286 } else {
1287 _generation->reset_mark_bitmap<false>();
1288 }
1289 }
1290
1291 bool ShenandoahConcurrentGC::check_cancellation_and_abort(ShenandoahDegenPoint point) {
1292 if (ShenandoahHeap::heap()->cancelled_gc()) {
1293 _degen_point = point;
1294 return true;
1295 }
1296 return false;
1297 }
1298
1299 const char* ShenandoahConcurrentGC::init_mark_event_message() const {
1300 ShenandoahHeap* const heap = ShenandoahHeap::heap();
1301 assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here");
1302 if (heap->unload_classes()) {
1303 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Init Mark", " (unload classes)");
1304 } else {
1305 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Init Mark", "");
1306 }
1307 }
1308
1309 const char* ShenandoahConcurrentGC::final_mark_event_message() const {
1310 ShenandoahHeap* const heap = ShenandoahHeap::heap();
1311 assert(!heap->has_forwarded_objects() || heap->is_concurrent_old_mark_in_progress(),
1312 "Should not have forwarded objects during final mark, unless old gen concurrent mark is running");
1313
1314 if (heap->unload_classes()) {
1315 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Final Mark", " (unload classes)");
1316 } else {
1317 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Final Mark", "");
1318 }
1319 }
1320
1321 const char* ShenandoahConcurrentGC::conc_mark_event_message() const {
1322 ShenandoahHeap* const heap = ShenandoahHeap::heap();
1323 assert(!heap->has_forwarded_objects() || heap->is_concurrent_old_mark_in_progress(),
1324 "Should not have forwarded objects concurrent mark, unless old gen concurrent mark is running");
1325 if (heap->unload_classes()) {
1326 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent marking", " (unload classes)");
1327 } else {
1328 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent marking", "");
1329 }
1330 }
1331
1332 const char* ShenandoahConcurrentGC::conc_reset_event_message() const {
1333 if (ShenandoahHeap::heap()->unload_classes()) {
1334 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent reset", " (unload classes)");
1335 } else {
1336 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent reset", "");
1337 }
1338 }
1339
1340 const char* ShenandoahConcurrentGC::conc_reset_after_collect_event_message() const {
1341 if (ShenandoahHeap::heap()->unload_classes()) {
1342 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent reset after collect", " (unload classes)");
1343 } else {
1344 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent reset after collect", "");
1345 }
1346 }
1347
1348 const char* ShenandoahConcurrentGC::verify_final_roots_event_message() const {
1349 if (ShenandoahHeap::heap()->unload_classes()) {
1350 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Verify Final Roots", " (unload classes)");
1351 } else {
1352 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Verify Final Roots", "");
1353 }
1354 }
1355
1356 const char* ShenandoahConcurrentGC::conc_final_roots_event_message() const {
1357 if (ShenandoahHeap::heap()->unload_classes()) {
1358 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent Final Roots", " (unload classes)");
1359 } else {
1360 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent Final Roots", "");
1361 }
1362 }
1363
1364 const char* ShenandoahConcurrentGC::conc_weak_refs_event_message() const {
1365 if (ShenandoahHeap::heap()->unload_classes()) {
1366 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent weak references", " (unload classes)");
1367 } else {
1368 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent weak references", "");
1369 }
1370 }
1371
1372 const char* ShenandoahConcurrentGC::conc_weak_roots_event_message() const {
1373 if (ShenandoahHeap::heap()->unload_classes()) {
1374 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent weak roots", " (unload classes)");
1375 } else {
1376 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent weak roots", "");
1377 }
1378 }
1379
1380 const char* ShenandoahConcurrentGC::conc_cleanup_event_message() const {
1381 if (ShenandoahHeap::heap()->unload_classes()) {
1382 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent cleanup", " (unload classes)");
1383 } else {
1384 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent cleanup", "");
1385 }
1386 }
1387
1388 const char* ShenandoahConcurrentGC::conc_init_update_refs_event_message() const {
1389 if (ShenandoahHeap::heap()->unload_classes()) {
1390 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent Init Update Refs", " (unload classes)");
1391 } else {
1392 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent Init Update Refs", "");
1393 }
1394 }
|