1 /*
2 * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2021, 2022, Red Hat, Inc. All rights reserved.
4 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
5 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 *
7 * This code is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 only, as
9 * published by the Free Software Foundation.
10 *
11 * This code is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * version 2 for more details (a copy is included in the LICENSE file that
15 * accompanied this code).
16 *
17 * You should have received a copy of the GNU General Public License version
18 * 2 along with this work; if not, write to the Free Software Foundation,
19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20 *
21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
22 * or visit www.oracle.com if you need additional information or have any
23 * questions.
24 *
25 */
26
27 #include "precompiled.hpp"
28
29 #include "gc/shared/barrierSetNMethod.hpp"
30 #include "gc/shared/collectorCounters.hpp"
31 #include "gc/shared/continuationGCSupport.inline.hpp"
32 #include "gc/shenandoah/shenandoahBreakpoint.hpp"
33 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
34 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
35 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
36 #include "gc/shenandoah/shenandoahFreeSet.hpp"
37 #include "gc/shenandoah/shenandoahGeneration.hpp"
38 #include "gc/shenandoah/shenandoahGenerationalHeap.hpp"
39 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
40 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
41 #include "gc/shenandoah/shenandoahLock.hpp"
42 #include "gc/shenandoah/shenandoahMark.inline.hpp"
43 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
44 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
45 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
46 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
47 #include "gc/shenandoah/shenandoahStackWatermark.hpp"
48 #include "gc/shenandoah/shenandoahUtils.hpp"
49 #include "gc/shenandoah/shenandoahVerifier.hpp"
50 #include "gc/shenandoah/shenandoahVMOperations.hpp"
51 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
52 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
53 #include "memory/allocation.hpp"
54 #include "prims/jvmtiTagMap.hpp"
55 #include "runtime/vmThread.hpp"
56 #include "utilities/events.hpp"
57
58 // Breakpoint support
59 class ShenandoahBreakpointGCScope : public StackObj {
60 private:
61 const GCCause::Cause _cause;
62 public:
63 ShenandoahBreakpointGCScope(GCCause::Cause cause) : _cause(cause) {
64 if (cause == GCCause::_wb_breakpoint) {
65 ShenandoahBreakpoint::start_gc();
66 ShenandoahBreakpoint::at_before_gc();
67 }
68 }
69
70 ~ShenandoahBreakpointGCScope() {
71 if (_cause == GCCause::_wb_breakpoint) {
72 ShenandoahBreakpoint::at_after_gc();
73 }
74 }
75 };
76
77 class ShenandoahBreakpointMarkScope : public StackObj {
78 private:
79 const GCCause::Cause _cause;
80 public:
81 ShenandoahBreakpointMarkScope(GCCause::Cause cause) : _cause(cause) {
82 if (_cause == GCCause::_wb_breakpoint) {
83 ShenandoahBreakpoint::at_after_marking_started();
84 }
85 }
86
87 ~ShenandoahBreakpointMarkScope() {
88 if (_cause == GCCause::_wb_breakpoint) {
89 ShenandoahBreakpoint::at_before_marking_completed();
90 }
91 }
92 };
93
94 ShenandoahConcurrentGC::ShenandoahConcurrentGC(ShenandoahGeneration* generation, bool do_old_gc_bootstrap) :
95 _mark(generation),
96 _generation(generation),
97 _degen_point(ShenandoahDegenPoint::_degenerated_unset),
98 _abbreviated(false),
99 _do_old_gc_bootstrap(do_old_gc_bootstrap) {
100 }
101
102 ShenandoahGC::ShenandoahDegenPoint ShenandoahConcurrentGC::degen_point() const {
103 return _degen_point;
104 }
105
106 void ShenandoahConcurrentGC::entry_concurrent_update_refs_prepare(ShenandoahHeap* const heap) {
107 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
108 const char* msg = conc_init_update_refs_event_message();
109 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_update_refs_prepare);
110 EventMark em("%s", msg);
111
112 // Evacuation is complete, retire gc labs and change gc state
113 heap->concurrent_prepare_for_update_refs();
114 }
115
116 bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) {
117 ShenandoahHeap* const heap = ShenandoahHeap::heap();
118
119 ShenandoahBreakpointGCScope breakpoint_gc_scope(cause);
120
121 // Reset for upcoming marking
122 entry_reset();
123
124 // Start initial mark under STW
125 vmop_entry_init_mark();
126
127 {
128 ShenandoahBreakpointMarkScope breakpoint_mark_scope(cause);
129
130 // Reset task queue stats here, rather than in mark_concurrent_roots,
131 // because remembered set scan will `push` oops into the queues and
132 // resetting after this happens will lose those counts.
133 TASKQUEUE_STATS_ONLY(_mark.task_queues()->reset_taskqueue_stats());
134
135 // Concurrent remembered set scanning
136 entry_scan_remembered_set();
137
138 // Concurrent mark roots
139 entry_mark_roots();
140 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_roots)) {
141 return false;
142 }
143
144 // Continue concurrent mark
145 entry_mark();
146 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_mark)) {
147 return false;
148 }
149 }
150
151 // Complete marking under STW, and start evacuation
152 vmop_entry_final_mark();
153
154 // If the GC was cancelled before final mark, nothing happens on the safepoint. We are still
155 // in the marking phase and must resume the degenerated cycle from there. If the GC was cancelled
156 // after final mark, then we've entered the evacuation phase and must resume the degenerated cycle
157 // from that phase.
158 if (_generation->is_concurrent_mark_in_progress()) {
159 bool cancelled = check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_mark);
160 assert(cancelled, "GC must have been cancelled between concurrent and final mark");
161 return false;
162 }
163
164 assert(heap->is_concurrent_weak_root_in_progress(), "Must be doing weak roots now");
165
166 // Concurrent stack processing
167 if (heap->is_evacuation_in_progress()) {
168 entry_thread_roots();
169 }
170
171 // Process weak roots that might still point to regions that would be broken by cleanup.
172 // We cannot recycle regions because weak roots need to know what is marked in trashed regions.
173 entry_weak_refs();
174 entry_weak_roots();
175
176 // Perform concurrent class unloading before any regions get recycled. Class unloading may
177 // need to inspect unmarked objects in trashed regions.
178 if (heap->unload_classes()) {
179 entry_class_unloading();
180 }
181
182 // Final mark might have reclaimed some immediate garbage, kick cleanup to reclaim
183 // the space. This would be the last action if there is nothing to evacuate. Note that
184 // we will not age young-gen objects in the case that we skip evacuation.
185 entry_cleanup_early();
186
187 heap->free_set()->log_status_under_lock();
188
189 // Processing strong roots
190 // This may be skipped if there is nothing to update/evacuate.
191 // If so, strong_root_in_progress would be unset.
192 if (heap->is_concurrent_strong_root_in_progress()) {
193 entry_strong_roots();
194 }
195
196 // Continue the cycle with evacuation and optional update-refs.
197 // This may be skipped if there is nothing to evacuate.
198 // If so, evac_in_progress would be unset by collection set preparation code.
199 if (heap->is_evacuation_in_progress()) {
200 // Concurrently evacuate
201 entry_evacuate();
202 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_evac)) {
203 return false;
204 }
205
206 entry_concurrent_update_refs_prepare(heap);
207
208 // Perform update-refs phase.
209 if (ShenandoahVerify || ShenandoahPacing) {
210 vmop_entry_init_update_refs();
211 }
212
213 entry_update_refs();
214 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_update_refs)) {
215 return false;
216 }
217
218 // Concurrent update thread roots
219 entry_update_thread_roots();
220 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_update_refs)) {
221 return false;
222 }
223
224 vmop_entry_final_update_refs();
225
226 // Update references freed up collection set, kick the cleanup to reclaim the space.
227 entry_cleanup_complete();
228 } else {
229 if (!entry_final_roots()) {
230 assert(_degen_point != _degenerated_unset, "Need to know where to start degenerated cycle");
231 return false;
232 }
233
234 if (VerifyAfterGC) {
235 vmop_entry_verify_final_roots();
236 }
237 _abbreviated = true;
238 }
239
240 // We defer generation resizing actions until after cset regions have been recycled. We do this even following an
241 // abbreviated cycle.
242 if (heap->mode()->is_generational()) {
243 ShenandoahGenerationalHeap::heap()->complete_concurrent_cycle();
244 }
245
246 // Instead of always resetting immediately before the start of a new GC, we can often reset at the end of the
247 // previous GC. This allows us to start the next GC cycle more quickly after a trigger condition is detected,
248 // reducing the likelihood that GC will degenerate.
249 entry_reset_after_collect();
250
251 return true;
252 }
253
254 bool ShenandoahConcurrentGC::complete_abbreviated_cycle() {
255 shenandoah_assert_generational();
256
257 ShenandoahGenerationalHeap* const heap = ShenandoahGenerationalHeap::heap();
258
259 // We chose not to evacuate because we found sufficient immediate garbage.
260 // However, there may still be regions to promote in place, so do that now.
261 if (heap->old_generation()->has_in_place_promotions()) {
262 entry_promote_in_place();
263
264 // If the promote-in-place operation was cancelled, we can have the degenerated
265 // cycle complete the operation. It will see that no evacuations are in progress,
266 // and that there are regions wanting promotion. The risk with not handling the
267 // cancellation would be failing to restore top for these regions and leaving
268 // them unable to serve allocations for the old generation.This will leave the weak
269 // roots flag set (the degenerated cycle will unset it).
270 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_evac)) {
271 return false;
272 }
273 }
274
275 // At this point, the cycle is effectively complete. If the cycle has been cancelled here,
276 // the control thread will detect it on its next iteration and run a degenerated young cycle.
277 if (!_generation->is_old()) {
278 heap->update_region_ages(_generation->complete_marking_context());
279 }
280
281 if (!heap->is_concurrent_old_mark_in_progress()) {
282 heap->concurrent_final_roots();
283 } else {
284 // Since the cycle was shortened for having enough immediate garbage, this will be
285 // the last phase before concurrent marking of old resumes. We must be sure
286 // that old mark threads don't see any pointers to garbage in the SATB queues. Even
287 // though nothing was evacuated, overwriting unreachable weak roots with null may still
288 // put pointers to regions that become trash in the SATB queues. The following will
289 // piggyback flushing the thread local SATB queues on the same handshake that propagates
290 // the gc state change.
291 ShenandoahSATBMarkQueueSet& satb_queues = ShenandoahBarrierSet::satb_mark_queue_set();
292 ShenandoahFlushSATBHandshakeClosure complete_thread_local_satb_buffers(satb_queues);
293 heap->concurrent_final_roots(&complete_thread_local_satb_buffers);
294 heap->old_generation()->concurrent_transfer_pointers_from_satb();
295 }
296 return true;
297 }
298
299
300 void ShenandoahConcurrentGC::vmop_entry_init_mark() {
301 ShenandoahHeap* const heap = ShenandoahHeap::heap();
302 TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
303 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_mark_gross);
304
305 heap->try_inject_alloc_failure();
306 VM_ShenandoahInitMark op(this);
307 VMThread::execute(&op); // jump to entry_init_mark() under safepoint
308 }
309
310 void ShenandoahConcurrentGC::vmop_entry_final_mark() {
311 ShenandoahHeap* const heap = ShenandoahHeap::heap();
312 TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
313 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_mark_gross);
314
315 heap->try_inject_alloc_failure();
316 VM_ShenandoahFinalMarkStartEvac op(this);
317 VMThread::execute(&op); // jump to entry_final_mark under safepoint
318 }
319
320 void ShenandoahConcurrentGC::vmop_entry_init_update_refs() {
321 ShenandoahHeap* const heap = ShenandoahHeap::heap();
322 TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
323 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_update_refs_gross);
324
325 heap->try_inject_alloc_failure();
326 VM_ShenandoahInitUpdateRefs op(this);
327 VMThread::execute(&op);
328 }
329
330 void ShenandoahConcurrentGC::vmop_entry_final_update_refs() {
331 ShenandoahHeap* const heap = ShenandoahHeap::heap();
332 TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
333 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_update_refs_gross);
334
335 heap->try_inject_alloc_failure();
336 VM_ShenandoahFinalUpdateRefs op(this);
337 VMThread::execute(&op);
338 }
339
340 void ShenandoahConcurrentGC::vmop_entry_verify_final_roots() {
341 ShenandoahHeap* const heap = ShenandoahHeap::heap();
342 TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
343 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_roots_gross);
344
345 // This phase does not use workers, no need for setup
346 heap->try_inject_alloc_failure();
347 VM_ShenandoahFinalRoots op(this);
348 VMThread::execute(&op);
349 }
350
351 void ShenandoahConcurrentGC::entry_init_mark() {
352 const char* msg = init_mark_event_message();
353 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::init_mark);
354 EventMark em("%s", msg);
355
356 ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
357 ShenandoahWorkerPolicy::calc_workers_for_init_marking(),
358 "init marking");
359
360 op_init_mark();
361 }
362
363 void ShenandoahConcurrentGC::entry_final_mark() {
364 const char* msg = final_mark_event_message();
365 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_mark);
366 EventMark em("%s", msg);
367
368 ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
369 ShenandoahWorkerPolicy::calc_workers_for_final_marking(),
370 "final marking");
371
372 op_final_mark();
373 }
374
375 void ShenandoahConcurrentGC::entry_init_update_refs() {
376 static const char* msg = "Pause Init Update Refs";
377 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::init_update_refs);
378 EventMark em("%s", msg);
379
380 // No workers used in this phase, no setup required
381 op_init_update_refs();
382 }
383
384 void ShenandoahConcurrentGC::entry_final_update_refs() {
385 static const char* msg = "Pause Final Update Refs";
386 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_update_refs);
387 EventMark em("%s", msg);
388
389 ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
390 ShenandoahWorkerPolicy::calc_workers_for_final_update_ref(),
391 "final reference update");
392
393 op_final_update_refs();
394 }
395
396 void ShenandoahConcurrentGC::entry_verify_final_roots() {
397 const char* msg = verify_final_roots_event_message();
398 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_roots);
399 EventMark em("%s", msg);
400
401 op_verify_final_roots();
402 }
403
404 void ShenandoahConcurrentGC::entry_reset() {
405 ShenandoahHeap* const heap = ShenandoahHeap::heap();
406 heap->try_inject_alloc_failure();
407
408 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
409 {
410 const char* msg = conc_reset_event_message();
411 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_reset);
412 EventMark em("%s", msg);
413
414 ShenandoahWorkerScope scope(heap->workers(),
415 ShenandoahWorkerPolicy::calc_workers_for_conc_reset(),
416 msg);
417 op_reset();
418 }
419 }
420
421 void ShenandoahConcurrentGC::entry_scan_remembered_set() {
422 if (_generation->is_young()) {
423 ShenandoahHeap* const heap = ShenandoahHeap::heap();
424 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
425 const char* msg = "Concurrent remembered set scanning";
426 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::init_scan_rset);
427 EventMark em("%s", msg);
428
429 ShenandoahWorkerScope scope(heap->workers(),
430 ShenandoahWorkerPolicy::calc_workers_for_rs_scanning(),
431 msg);
432
433 heap->try_inject_alloc_failure();
434 _generation->scan_remembered_set(true /* is_concurrent */);
435 }
436 }
437
438 void ShenandoahConcurrentGC::entry_mark_roots() {
439 ShenandoahHeap* const heap = ShenandoahHeap::heap();
440 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
441 const char* msg = "Concurrent marking roots";
442 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_mark_roots);
443 EventMark em("%s", msg);
444
445 ShenandoahWorkerScope scope(heap->workers(),
446 ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
447 "concurrent marking roots");
448
449 heap->try_inject_alloc_failure();
450 op_mark_roots();
451 }
452
453 void ShenandoahConcurrentGC::entry_mark() {
454 ShenandoahHeap* const heap = ShenandoahHeap::heap();
455 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
456 const char* msg = conc_mark_event_message();
457 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_mark);
458 EventMark em("%s", msg);
459
460 ShenandoahWorkerScope scope(heap->workers(),
461 ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
462 "concurrent marking");
463
464 heap->try_inject_alloc_failure();
465 op_mark();
466 }
467
468 void ShenandoahConcurrentGC::entry_thread_roots() {
469 ShenandoahHeap* const heap = ShenandoahHeap::heap();
470 static const char* msg = "Concurrent thread roots";
471 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_thread_roots);
472 EventMark em("%s", msg);
473
474 ShenandoahWorkerScope scope(heap->workers(),
475 ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
476 msg);
477
478 heap->try_inject_alloc_failure();
479 op_thread_roots();
480 }
481
482 void ShenandoahConcurrentGC::entry_weak_refs() {
483 ShenandoahHeap* const heap = ShenandoahHeap::heap();
484 const char* msg = conc_weak_refs_event_message();
485 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_weak_refs);
486 EventMark em("%s", msg);
487
488 ShenandoahWorkerScope scope(heap->workers(),
489 ShenandoahWorkerPolicy::calc_workers_for_conc_refs_processing(),
490 "concurrent weak references");
491
492 heap->try_inject_alloc_failure();
493 op_weak_refs();
494 }
495
496 void ShenandoahConcurrentGC::entry_weak_roots() {
497 ShenandoahHeap* const heap = ShenandoahHeap::heap();
498 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
499 const char* msg = conc_weak_roots_event_message();
500 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_weak_roots);
501 EventMark em("%s", msg);
502
503 ShenandoahWorkerScope scope(heap->workers(),
504 ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
505 "concurrent weak root");
506
507 heap->try_inject_alloc_failure();
508 op_weak_roots();
509 }
510
511 void ShenandoahConcurrentGC::entry_class_unloading() {
512 ShenandoahHeap* const heap = ShenandoahHeap::heap();
513 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
514 static const char* msg = "Concurrent class unloading";
515 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_class_unload);
516 EventMark em("%s", msg);
517
518 ShenandoahWorkerScope scope(heap->workers(),
519 ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
520 "concurrent class unloading");
521
522 heap->try_inject_alloc_failure();
523 op_class_unloading();
524 }
525
526 void ShenandoahConcurrentGC::entry_strong_roots() {
527 ShenandoahHeap* const heap = ShenandoahHeap::heap();
528 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
529 static const char* msg = "Concurrent strong roots";
530 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_strong_roots);
531 EventMark em("%s", msg);
532
533 ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_strong_roots);
534
535 ShenandoahWorkerScope scope(heap->workers(),
536 ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
537 "concurrent strong root");
538
539 heap->try_inject_alloc_failure();
540 op_strong_roots();
541 }
542
543 void ShenandoahConcurrentGC::entry_cleanup_early() {
544 ShenandoahHeap* const heap = ShenandoahHeap::heap();
545 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
546 const char* msg = conc_cleanup_event_message();
547 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_cleanup_early, true /* log_heap_usage */);
548 EventMark em("%s", msg);
549
550 // This phase does not use workers, no need for setup
551 heap->try_inject_alloc_failure();
552 op_cleanup_early();
553 }
554
555 void ShenandoahConcurrentGC::entry_evacuate() {
556 ShenandoahHeap* const heap = ShenandoahHeap::heap();
557 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
558
559 static const char* msg = "Concurrent evacuation";
560 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_evac);
561 EventMark em("%s", msg);
562
563 ShenandoahWorkerScope scope(heap->workers(),
564 ShenandoahWorkerPolicy::calc_workers_for_conc_evac(),
565 "concurrent evacuation");
566
567 heap->try_inject_alloc_failure();
568 op_evacuate();
569 }
570
571 void ShenandoahConcurrentGC::entry_promote_in_place() const {
572 shenandoah_assert_generational();
573
574 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::promote_in_place);
575 ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::promote_in_place);
576 EventMark em("%s", "Promote in place");
577
578 ShenandoahGenerationalHeap::heap()->promote_regions_in_place(true);
579 }
580
581 void ShenandoahConcurrentGC::entry_update_thread_roots() {
582 ShenandoahHeap* const heap = ShenandoahHeap::heap();
583 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
584
585 static const char* msg = "Concurrent update thread roots";
586 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_update_thread_roots);
587 EventMark em("%s", msg);
588
589 // No workers used in this phase, no setup required
590 heap->try_inject_alloc_failure();
591 op_update_thread_roots();
592 }
593
594 void ShenandoahConcurrentGC::entry_update_refs() {
595 ShenandoahHeap* const heap = ShenandoahHeap::heap();
596 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
597 static const char* msg = "Concurrent update references";
598 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_update_refs);
599 EventMark em("%s", msg);
600
601 ShenandoahWorkerScope scope(heap->workers(),
602 ShenandoahWorkerPolicy::calc_workers_for_conc_update_ref(),
603 "concurrent reference update");
604
605 heap->try_inject_alloc_failure();
606 op_update_refs();
607 }
608
609 void ShenandoahConcurrentGC::entry_cleanup_complete() {
610 ShenandoahHeap* const heap = ShenandoahHeap::heap();
611 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
612 const char* msg = conc_cleanup_event_message();
613 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_cleanup_complete, true /* log_heap_usage */);
614 EventMark em("%s", msg);
615
616 // This phase does not use workers, no need for setup
617 heap->try_inject_alloc_failure();
618 op_cleanup_complete();
619 }
620
621 void ShenandoahConcurrentGC::entry_reset_after_collect() {
622 ShenandoahHeap* const heap = ShenandoahHeap::heap();
623 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
624 const char* msg = conc_reset_after_collect_event_message();
625 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_reset_after_collect);
626 EventMark em("%s", msg);
627
628 op_reset_after_collect();
629 }
630
631 void ShenandoahConcurrentGC::op_reset() {
632 ShenandoahHeap* const heap = ShenandoahHeap::heap();
633 if (ShenandoahPacing) {
634 heap->pacer()->setup_for_reset();
635 }
636 // If it is old GC bootstrap cycle, always clear bitmap for global gen
637 // to ensure bitmap for old gen is clear for old GC cycle after this.
638 if (_do_old_gc_bootstrap) {
639 assert(!heap->is_prepare_for_old_mark_in_progress(), "Cannot reset old without making it parsable");
640 heap->global_generation()->prepare_gc();
641 } else {
642 _generation->prepare_gc();
643 }
644
645 if (heap->mode()->is_generational()) {
646 heap->old_generation()->card_scan()->mark_read_table_as_clean();
647 }
648 }
649
650 class ShenandoahInitMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
651 private:
652 ShenandoahMarkingContext* const _ctx;
653 public:
654 ShenandoahInitMarkUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
655
656 void heap_region_do(ShenandoahHeapRegion* r) {
657 assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index());
658 if (r->is_active()) {
659 // Check if region needs updating its TAMS. We have updated it already during concurrent
660 // reset, so it is very likely we don't need to do another write here. Since most regions
661 // are not "active", this path is relatively rare.
662 if (_ctx->top_at_mark_start(r) != r->top()) {
663 _ctx->capture_top_at_mark_start(r);
664 }
665 } else {
666 assert(_ctx->top_at_mark_start(r) == r->top(),
667 "Region " SIZE_FORMAT " should already have correct TAMS", r->index());
668 }
669 }
670
671 bool is_thread_safe() { return true; }
672 };
673
674 void ShenandoahConcurrentGC::start_mark() {
675 _mark.start_mark();
676 }
677
678 void ShenandoahConcurrentGC::op_init_mark() {
679 ShenandoahHeap* const heap = ShenandoahHeap::heap();
680 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
681 assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
682
683 assert(_generation->is_bitmap_clear(), "need clear marking bitmap");
684 assert(!_generation->is_mark_complete(), "should not be complete");
685 assert(!heap->has_forwarded_objects(), "No forwarded objects on this path");
686
687 if (heap->mode()->is_generational()) {
688
689 if (_generation->is_global()) {
690 heap->old_generation()->cancel_gc();
691 } else if (heap->is_concurrent_old_mark_in_progress()) {
692 // Purge the SATB buffers, transferring any valid, old pointers to the
693 // old generation mark queue. Any pointers in a young region will be
694 // abandoned.
695 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_transfer_satb);
696 heap->old_generation()->transfer_pointers_from_satb();
697 }
698 {
699 // After we swap card table below, the write-table is all clean, and the read table holds
700 // cards dirty prior to the start of GC. Young and bootstrap collection will update
701 // the write card table as a side effect of remembered set scanning. Global collection will
702 // update the card table as a side effect of global marking of old objects.
703 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_swap_rset);
704 _generation->swap_card_tables();
705 }
706 }
707
708 if (ShenandoahVerify) {
709 ShenandoahTimingsTracker v(ShenandoahPhaseTimings::init_mark_verify);
710 heap->verifier()->verify_before_concmark();
711 }
712
713 if (VerifyBeforeGC) {
714 Universe::verify();
715 }
716
717 _generation->set_concurrent_mark_in_progress(true);
718
719 start_mark();
720
721 if (_do_old_gc_bootstrap) {
722 shenandoah_assert_generational();
723 // Update region state for both young and old regions
724 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states);
725 ShenandoahInitMarkUpdateRegionStateClosure cl;
726 heap->parallel_heap_region_iterate(&cl);
727 heap->old_generation()->ref_processor()->reset_thread_locals();
728 } else {
729 // Update region state for only young regions
730 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states);
731 ShenandoahInitMarkUpdateRegionStateClosure cl;
732 _generation->parallel_heap_region_iterate(&cl);
733 }
734
735 // Weak reference processing
736 ShenandoahReferenceProcessor* rp = _generation->ref_processor();
737 rp->reset_thread_locals();
738 rp->set_soft_reference_policy(heap->soft_ref_policy()->should_clear_all_soft_refs());
739
740 // Make above changes visible to worker threads
741 OrderAccess::fence();
742
743 // Arm nmethods for concurrent mark
744 ShenandoahCodeRoots::arm_nmethods_for_mark();
745
746 ShenandoahStackWatermark::change_epoch_id();
747 if (ShenandoahPacing) {
748 heap->pacer()->setup_for_mark();
749 }
750
751 {
752 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_propagate_gc_state);
753 heap->propagate_gc_state_to_all_threads();
754 }
755 }
756
757 void ShenandoahConcurrentGC::op_mark_roots() {
758 _mark.mark_concurrent_roots();
759 }
760
761 void ShenandoahConcurrentGC::op_mark() {
762 _mark.concurrent_mark();
763 }
764
765 void ShenandoahConcurrentGC::op_final_mark() {
766 ShenandoahHeap* const heap = ShenandoahHeap::heap();
767 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
768 assert(!heap->has_forwarded_objects(), "No forwarded objects on this path");
769
770 if (ShenandoahVerify) {
771 heap->verifier()->verify_roots_no_forwarded();
772 }
773
774 if (!heap->cancelled_gc()) {
775 _mark.finish_mark();
776 assert(!heap->cancelled_gc(), "STW mark cannot OOM");
777
778 // Notify JVMTI that the tagmap table will need cleaning.
779 JvmtiTagMap::set_needs_cleaning();
780
781 // The collection set is chosen by prepare_regions_and_collection_set(). Additionally, certain parameters have been
782 // established to govern the evacuation efforts that are about to begin. Refer to comments on reserve members in
783 // ShenandoahGeneration and ShenandoahOldGeneration for more detail.
784 _generation->prepare_regions_and_collection_set(true /*concurrent*/);
785
786 // Has to be done after cset selection
787 heap->prepare_concurrent_roots();
788
789 if (!heap->collection_set()->is_empty()) {
790 LogTarget(Debug, gc, cset) lt;
791 if (lt.is_enabled()) {
792 ResourceMark rm;
793 LogStream ls(lt);
794 heap->collection_set()->print_on(&ls);
795 }
796
797 if (ShenandoahVerify) {
798 ShenandoahTimingsTracker v(ShenandoahPhaseTimings::final_mark_verify);
799 heap->verifier()->verify_before_evacuation();
800 }
801
802 heap->set_evacuation_in_progress(true);
803 // From here on, we need to update references.
804 heap->set_has_forwarded_objects(true);
805
806 // Arm nmethods/stack for concurrent processing
807 ShenandoahCodeRoots::arm_nmethods_for_evac();
808 ShenandoahStackWatermark::change_epoch_id();
809
810 if (ShenandoahPacing) {
811 heap->pacer()->setup_for_evac();
812 }
813 } else {
814 if (ShenandoahVerify) {
815 ShenandoahTimingsTracker v(ShenandoahPhaseTimings::final_mark_verify);
816 if (has_in_place_promotions(heap)) {
817 heap->verifier()->verify_after_concmark_with_promotions();
818 } else {
819 heap->verifier()->verify_after_concmark();
820 }
821 }
822 }
823 }
824
825 {
826 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_mark_propagate_gc_state);
827 heap->propagate_gc_state_to_all_threads();
828 }
829 }
830
831 bool ShenandoahConcurrentGC::has_in_place_promotions(ShenandoahHeap* heap) {
832 return heap->mode()->is_generational() && heap->old_generation()->has_in_place_promotions();
833 }
834
835 class ShenandoahConcurrentEvacThreadClosure : public ThreadClosure {
836 private:
837 OopClosure* const _oops;
838 public:
839 explicit ShenandoahConcurrentEvacThreadClosure(OopClosure* oops) : _oops(oops) {}
840
841 void do_thread(Thread* thread) override {
842 JavaThread* const jt = JavaThread::cast(thread);
843 StackWatermarkSet::finish_processing(jt, _oops, StackWatermarkKind::gc);
844 }
845 };
846
847 class ShenandoahConcurrentEvacUpdateThreadTask : public WorkerTask {
848 private:
849 ShenandoahJavaThreadsIterator _java_threads;
850
851 public:
852 explicit ShenandoahConcurrentEvacUpdateThreadTask(uint n_workers) :
853 WorkerTask("Shenandoah Evacuate/Update Concurrent Thread Roots"),
854 _java_threads(ShenandoahPhaseTimings::conc_thread_roots, n_workers) {
855 }
856
857 void work(uint worker_id) override {
858 // ShenandoahEvacOOMScope has to be setup by ShenandoahContextEvacuateUpdateRootsClosure.
859 // Otherwise, may deadlock with watermark lock
860 ShenandoahContextEvacuateUpdateRootsClosure oops_cl;
861 ShenandoahConcurrentEvacThreadClosure thr_cl(&oops_cl);
862 _java_threads.threads_do(&thr_cl, worker_id);
863 }
864 };
865
866 void ShenandoahConcurrentGC::op_thread_roots() {
867 const ShenandoahHeap* const heap = ShenandoahHeap::heap();
868 assert(heap->is_evacuation_in_progress(), "Checked by caller");
869 ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_thread_roots);
870 ShenandoahConcurrentEvacUpdateThreadTask task(heap->workers()->active_workers());
871 heap->workers()->run_task(&task);
872 }
873
874 void ShenandoahConcurrentGC::op_weak_refs() {
875 ShenandoahHeap* const heap = ShenandoahHeap::heap();
876 assert(heap->is_concurrent_weak_root_in_progress(), "Only during this phase");
877 // Concurrent weak refs processing
878 ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_refs);
879 if (heap->gc_cause() == GCCause::_wb_breakpoint) {
880 ShenandoahBreakpoint::at_after_reference_processing_started();
881 }
882 _generation->ref_processor()->process_references(ShenandoahPhaseTimings::conc_weak_refs, heap->workers(), true /* concurrent */);
883 }
884
885 class ShenandoahEvacUpdateCleanupOopStorageRootsClosure : public BasicOopIterateClosure {
886 private:
887 ShenandoahHeap* const _heap;
888 ShenandoahMarkingContext* const _mark_context;
889 bool _evac_in_progress;
890 Thread* const _thread;
891
892 public:
893 ShenandoahEvacUpdateCleanupOopStorageRootsClosure();
894 void do_oop(oop* p);
895 void do_oop(narrowOop* p);
896 };
897
898 ShenandoahEvacUpdateCleanupOopStorageRootsClosure::ShenandoahEvacUpdateCleanupOopStorageRootsClosure() :
899 _heap(ShenandoahHeap::heap()),
900 _mark_context(ShenandoahHeap::heap()->marking_context()),
901 _evac_in_progress(ShenandoahHeap::heap()->is_evacuation_in_progress()),
902 _thread(Thread::current()) {
903 }
904
905 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(oop* p) {
906 const oop obj = RawAccess<>::oop_load(p);
907 if (!CompressedOops::is_null(obj)) {
908 if (!_mark_context->is_marked(obj)) {
909 shenandoah_assert_generations_reconciled();
910 if (_heap->is_in_active_generation(obj)) {
911 // Note: The obj is dead here. Do not touch it, just clear.
912 ShenandoahHeap::atomic_clear_oop(p, obj);
913 }
914 } else if (_evac_in_progress && _heap->in_collection_set(obj)) {
915 oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
916 if (resolved == obj) {
917 resolved = _heap->evacuate_object(obj, _thread);
918 }
919 shenandoah_assert_not_in_cset_except(p, resolved, _heap->cancelled_gc());
920 ShenandoahHeap::atomic_update_oop(resolved, p, obj);
921 }
922 }
923 }
924
925 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(narrowOop* p) {
926 ShouldNotReachHere();
927 }
928
929 class ShenandoahIsCLDAliveClosure : public CLDClosure {
930 public:
931 void do_cld(ClassLoaderData* cld) {
932 cld->is_alive();
933 }
934 };
935
936 class ShenandoahIsNMethodAliveClosure: public NMethodClosure {
937 public:
938 void do_nmethod(nmethod* n) {
939 n->is_unloading();
940 }
941 };
942
943 // This task not only evacuates/updates marked weak roots, but also "null"
944 // dead weak roots.
945 class ShenandoahConcurrentWeakRootsEvacUpdateTask : public WorkerTask {
946 private:
947 ShenandoahVMWeakRoots<true /*concurrent*/> _vm_roots;
948
949 // Roots related to concurrent class unloading
950 ShenandoahClassLoaderDataRoots<true /* concurrent */>
951 _cld_roots;
952 ShenandoahConcurrentNMethodIterator _nmethod_itr;
953 ShenandoahPhaseTimings::Phase _phase;
954
955 public:
956 ShenandoahConcurrentWeakRootsEvacUpdateTask(ShenandoahPhaseTimings::Phase phase) :
957 WorkerTask("Shenandoah Evacuate/Update Concurrent Weak Roots"),
958 _vm_roots(phase),
959 _cld_roots(phase, ShenandoahHeap::heap()->workers()->active_workers(), false /*heap iteration*/),
960 _nmethod_itr(ShenandoahCodeRoots::table()),
961 _phase(phase) {}
962
963 ~ShenandoahConcurrentWeakRootsEvacUpdateTask() {
964 // Notify runtime data structures of potentially dead oops
965 _vm_roots.report_num_dead();
966 }
967
968 void work(uint worker_id) {
969 ShenandoahConcurrentWorkerSession worker_session(worker_id);
970 ShenandoahSuspendibleThreadSetJoiner sts_join;
971 {
972 ShenandoahEvacOOMScope oom;
973 // jni_roots and weak_roots are OopStorage backed roots, concurrent iteration
974 // may race against OopStorage::release() calls.
975 ShenandoahEvacUpdateCleanupOopStorageRootsClosure cl;
976 _vm_roots.oops_do(&cl, worker_id);
977 }
978
979 // If we are going to perform concurrent class unloading later on, we need to
980 // clean up the weak oops in CLD and determine nmethod's unloading state, so that we
981 // can clean up immediate garbage sooner.
982 if (ShenandoahHeap::heap()->unload_classes()) {
983 // Applies ShenandoahIsCLDAlive closure to CLDs, native barrier will either null the
984 // CLD's holder or evacuate it.
985 {
986 ShenandoahIsCLDAliveClosure is_cld_alive;
987 _cld_roots.cld_do(&is_cld_alive, worker_id);
988 }
989
990 // Applies ShenandoahIsNMethodAliveClosure to registered nmethods.
991 // The closure calls nmethod->is_unloading(). The is_unloading
992 // state is cached, therefore, during concurrent class unloading phase,
993 // we will not touch the metadata of unloading nmethods
994 {
995 ShenandoahWorkerTimingsTracker timer(_phase, ShenandoahPhaseTimings::CodeCacheRoots, worker_id);
996 ShenandoahIsNMethodAliveClosure is_nmethod_alive;
997 _nmethod_itr.nmethods_do(&is_nmethod_alive);
998 }
999 }
1000 }
1001 };
1002
1003 void ShenandoahConcurrentGC::op_weak_roots() {
1004 ShenandoahHeap* const heap = ShenandoahHeap::heap();
1005 assert(heap->is_concurrent_weak_root_in_progress(), "Only during this phase");
1006 {
1007 // Concurrent weak root processing
1008 ShenandoahTimingsTracker t(ShenandoahPhaseTimings::conc_weak_roots_work);
1009 ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_roots_work);
1010 ShenandoahConcurrentWeakRootsEvacUpdateTask task(ShenandoahPhaseTimings::conc_weak_roots_work);
1011 heap->workers()->run_task(&task);
1012 }
1013
1014 {
1015 // It is possible for mutators executing the load reference barrier to have
1016 // loaded an oop through a weak handle that has since been nulled out by
1017 // weak root processing. Handshaking here forces them to complete the
1018 // barrier before the GC cycle continues and does something that would
1019 // change the evaluation of the barrier (for example, resetting the TAMS
1020 // on trashed regions could make an oop appear to be marked _after_ the
1021 // region has been recycled).
1022 ShenandoahTimingsTracker t(ShenandoahPhaseTimings::conc_weak_roots_rendezvous);
1023 heap->rendezvous_threads("Shenandoah Concurrent Weak Roots");
1024 }
1025 }
1026
1027 void ShenandoahConcurrentGC::op_class_unloading() {
1028 ShenandoahHeap* const heap = ShenandoahHeap::heap();
1029 assert (heap->is_concurrent_weak_root_in_progress() &&
1030 heap->unload_classes(),
1031 "Checked by caller");
1032 heap->do_class_unloading();
1033 }
1034
1035 class ShenandoahEvacUpdateCodeCacheClosure : public NMethodClosure {
1036 private:
1037 BarrierSetNMethod* const _bs;
1038 ShenandoahEvacuateUpdateMetadataClosure _cl;
1039
1040 public:
1041 ShenandoahEvacUpdateCodeCacheClosure() :
1042 _bs(BarrierSet::barrier_set()->barrier_set_nmethod()),
1043 _cl() {
1044 }
1045
1046 void do_nmethod(nmethod* n) {
1047 ShenandoahNMethod* data = ShenandoahNMethod::gc_data(n);
1048 ShenandoahReentrantLocker locker(data->lock());
1049 // Setup EvacOOM scope below reentrant lock to avoid deadlock with
1050 // nmethod_entry_barrier
1051 ShenandoahEvacOOMScope oom;
1052 data->oops_do(&_cl, true/*fix relocation*/);
1053 _bs->disarm(n);
1054 }
1055 };
1056
1057 class ShenandoahConcurrentRootsEvacUpdateTask : public WorkerTask {
1058 private:
1059 ShenandoahPhaseTimings::Phase _phase;
1060 ShenandoahVMRoots<true /*concurrent*/> _vm_roots;
1061 ShenandoahClassLoaderDataRoots<true /*concurrent*/>
1062 _cld_roots;
1063 ShenandoahConcurrentNMethodIterator _nmethod_itr;
1064
1065 public:
1066 ShenandoahConcurrentRootsEvacUpdateTask(ShenandoahPhaseTimings::Phase phase) :
1067 WorkerTask("Shenandoah Evacuate/Update Concurrent Strong Roots"),
1068 _phase(phase),
1069 _vm_roots(phase),
1070 _cld_roots(phase, ShenandoahHeap::heap()->workers()->active_workers(), false /*heap iteration*/),
1071 _nmethod_itr(ShenandoahCodeRoots::table()) {}
1072
1073 void work(uint worker_id) {
1074 ShenandoahConcurrentWorkerSession worker_session(worker_id);
1075 {
1076 ShenandoahEvacOOMScope oom;
1077 {
1078 // vm_roots and weak_roots are OopStorage backed roots, concurrent iteration
1079 // may race against OopStorage::release() calls.
1080 ShenandoahContextEvacuateUpdateRootsClosure cl;
1081 _vm_roots.oops_do<ShenandoahContextEvacuateUpdateRootsClosure>(&cl, worker_id);
1082 }
1083
1084 {
1085 ShenandoahEvacuateUpdateMetadataClosure cl;
1086 CLDToOopClosure clds(&cl, ClassLoaderData::_claim_strong);
1087 _cld_roots.cld_do(&clds, worker_id);
1088 }
1089 }
1090
1091 // Cannot setup ShenandoahEvacOOMScope here, due to potential deadlock with nmethod_entry_barrier.
1092 if (!ShenandoahHeap::heap()->unload_classes()) {
1093 ShenandoahWorkerTimingsTracker timer(_phase, ShenandoahPhaseTimings::CodeCacheRoots, worker_id);
1094 ShenandoahEvacUpdateCodeCacheClosure cl;
1095 _nmethod_itr.nmethods_do(&cl);
1096 }
1097 }
1098 };
1099
1100 void ShenandoahConcurrentGC::op_strong_roots() {
1101 ShenandoahHeap* const heap = ShenandoahHeap::heap();
1102 assert(heap->is_concurrent_strong_root_in_progress(), "Checked by caller");
1103 ShenandoahConcurrentRootsEvacUpdateTask task(ShenandoahPhaseTimings::conc_strong_roots);
1104 heap->workers()->run_task(&task);
1105 heap->set_concurrent_strong_root_in_progress(false);
1106 }
1107
1108 void ShenandoahConcurrentGC::op_cleanup_early() {
1109 ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
1110 ShenandoahWorkerPolicy::calc_workers_for_conc_cleanup(),
1111 "cleanup early.");
1112 ShenandoahHeap::heap()->recycle_trash();
1113 }
1114
1115 void ShenandoahConcurrentGC::op_evacuate() {
1116 ShenandoahHeap::heap()->evacuate_collection_set(true /*concurrent*/);
1117 }
1118
1119 void ShenandoahConcurrentGC::op_init_update_refs() {
1120 ShenandoahHeap* const heap = ShenandoahHeap::heap();
1121 if (ShenandoahVerify) {
1122 ShenandoahTimingsTracker v(ShenandoahPhaseTimings::init_update_refs_verify);
1123 heap->verifier()->verify_before_update_refs();
1124 }
1125 if (ShenandoahPacing) {
1126 heap->pacer()->setup_for_update_refs();
1127 }
1128 }
1129
1130 void ShenandoahConcurrentGC::op_update_refs() {
1131 ShenandoahHeap::heap()->update_heap_references(true /*concurrent*/);
1132 }
1133
1134 class ShenandoahUpdateThreadClosure : public HandshakeClosure {
1135 private:
1136 ShenandoahUpdateRefsClosure _cl;
1137 public:
1138 ShenandoahUpdateThreadClosure();
1139 void do_thread(Thread* thread);
1140 };
1141
1142 ShenandoahUpdateThreadClosure::ShenandoahUpdateThreadClosure() :
1143 HandshakeClosure("Shenandoah Update Thread Roots") {
1144 }
1145
1146 void ShenandoahUpdateThreadClosure::do_thread(Thread* thread) {
1147 if (thread->is_Java_thread()) {
1148 JavaThread* jt = JavaThread::cast(thread);
1149 ResourceMark rm;
1150 jt->oops_do(&_cl, nullptr);
1151 }
1152 }
1153
1154 void ShenandoahConcurrentGC::op_update_thread_roots() {
1155 ShenandoahUpdateThreadClosure cl;
1156 Handshake::execute(&cl);
1157 }
1158
1159 void ShenandoahConcurrentGC::op_final_update_refs() {
1160 ShenandoahHeap* const heap = ShenandoahHeap::heap();
1161 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
1162 assert(!heap->_update_refs_iterator.has_next(), "Should have finished update references");
1163
1164 heap->finish_concurrent_roots();
1165
1166 // Clear cancelled GC, if set. On cancellation path, the block before would handle
1167 // everything.
1168 if (heap->cancelled_gc()) {
1169 heap->clear_cancelled_gc(true /* clear oom handler */);
1170 }
1171
1172 // Has to be done before cset is clear
1173 if (ShenandoahVerify) {
1174 heap->verifier()->verify_roots_in_to_space();
1175 }
1176
1177 // If we are running in generational mode and this is an aging cycle, this will also age active
1178 // regions that haven't been used for allocation.
1179 heap->update_heap_region_states(true /*concurrent*/);
1180
1181 heap->set_update_refs_in_progress(false);
1182 heap->set_has_forwarded_objects(false);
1183
1184 if (heap->mode()->is_generational() && heap->is_concurrent_old_mark_in_progress()) {
1185 // When the SATB barrier is left on to support concurrent old gen mark, it may pick up writes to
1186 // objects in the collection set. After those objects are evacuated, the pointers in the
1187 // SATB are no longer safe. Once we have finished update references, we are guaranteed that
1188 // no more writes to the collection set are possible.
1189 //
1190 // This will transfer any old pointers in _active_ regions from the SATB to the old gen
1191 // mark queues. All other pointers will be discarded. This would also discard any pointers
1192 // in old regions that were included in a mixed evacuation. We aren't using the SATB filter
1193 // methods here because we cannot control when they execute. If the SATB filter runs _after_
1194 // a region has been recycled, we will not be able to detect the bad pointer.
1195 //
1196 // We are not concerned about skipping this step in abbreviated cycles because regions
1197 // with no live objects cannot have been written to and so cannot have entries in the SATB
1198 // buffers.
1199 ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs_transfer_satb);
1200 heap->old_generation()->transfer_pointers_from_satb();
1201
1202 // Aging_cycle is only relevant during evacuation cycle for individual objects and during final mark for
1203 // entire regions. Both of these relevant operations occur before final update refs.
1204 ShenandoahGenerationalHeap::heap()->set_aging_cycle(false);
1205 }
1206
1207 if (ShenandoahVerify) {
1208 ShenandoahTimingsTracker v(ShenandoahPhaseTimings::final_update_refs_verify);
1209 heap->verifier()->verify_after_update_refs();
1210 }
1211
1212 if (VerifyAfterGC) {
1213 Universe::verify();
1214 }
1215
1216 heap->rebuild_free_set(true /*concurrent*/);
1217
1218 {
1219 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_update_refs_propagate_gc_state);
1220 heap->propagate_gc_state_to_all_threads();
1221 }
1222 }
1223
1224 bool ShenandoahConcurrentGC::entry_final_roots() {
1225 ShenandoahHeap* const heap = ShenandoahHeap::heap();
1226 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
1227
1228
1229 const char* msg = conc_final_roots_event_message();
1230 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_final_roots);
1231 EventMark em("%s", msg);
1232 ShenandoahWorkerScope scope(heap->workers(),
1233 ShenandoahWorkerPolicy::calc_workers_for_conc_evac(),
1234 msg);
1235
1236 if (!heap->mode()->is_generational()) {
1237 heap->concurrent_final_roots();
1238 } else {
1239 if (!complete_abbreviated_cycle()) {
1240 return false;
1241 }
1242 }
1243 return true;
1244 }
1245
1246 void ShenandoahConcurrentGC::op_verify_final_roots() {
1247 if (VerifyAfterGC) {
1248 Universe::verify();
1249 }
1250 }
1251
1252 void ShenandoahConcurrentGC::op_cleanup_complete() {
1253 ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
1254 ShenandoahWorkerPolicy::calc_workers_for_conc_cleanup(),
1255 "cleanup complete.");
1256 ShenandoahHeap::heap()->recycle_trash();
1257 }
1258
1259 void ShenandoahConcurrentGC::op_reset_after_collect() {
1260 ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
1261 ShenandoahWorkerPolicy::calc_workers_for_conc_reset(),
1262 "reset after collection.");
1263
1264 ShenandoahHeap* const heap = ShenandoahHeap::heap();
1265 if (heap->mode()->is_generational()) {
1266 // If we are in the midst of an old gc bootstrap or an old marking, we want to leave the mark bit map of
1267 // the young generation intact. In particular, reference processing in the old generation may potentially
1268 // need the reachability of a young generation referent of a Reference object in the old generation.
1269 if (!_do_old_gc_bootstrap && !heap->is_concurrent_old_mark_in_progress()) {
1270 heap->young_generation()->reset_mark_bitmap<false>();
1271 }
1272 } else {
1273 _generation->reset_mark_bitmap<false>();
1274 }
1275 }
1276
1277 bool ShenandoahConcurrentGC::check_cancellation_and_abort(ShenandoahDegenPoint point) {
1278 if (ShenandoahHeap::heap()->cancelled_gc()) {
1279 _degen_point = point;
1280 return true;
1281 }
1282 return false;
1283 }
1284
1285 const char* ShenandoahConcurrentGC::init_mark_event_message() const {
1286 ShenandoahHeap* const heap = ShenandoahHeap::heap();
1287 assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here");
1288 if (heap->unload_classes()) {
1289 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Init Mark", " (unload classes)");
1290 } else {
1291 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Init Mark", "");
1292 }
1293 }
1294
1295 const char* ShenandoahConcurrentGC::final_mark_event_message() const {
1296 ShenandoahHeap* const heap = ShenandoahHeap::heap();
1297 assert(!heap->has_forwarded_objects() || heap->is_concurrent_old_mark_in_progress(),
1298 "Should not have forwarded objects during final mark, unless old gen concurrent mark is running");
1299
1300 if (heap->unload_classes()) {
1301 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Final Mark", " (unload classes)");
1302 } else {
1303 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Final Mark", "");
1304 }
1305 }
1306
1307 const char* ShenandoahConcurrentGC::conc_mark_event_message() const {
1308 ShenandoahHeap* const heap = ShenandoahHeap::heap();
1309 assert(!heap->has_forwarded_objects() || heap->is_concurrent_old_mark_in_progress(),
1310 "Should not have forwarded objects concurrent mark, unless old gen concurrent mark is running");
1311 if (heap->unload_classes()) {
1312 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent marking", " (unload classes)");
1313 } else {
1314 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent marking", "");
1315 }
1316 }
1317
1318 const char* ShenandoahConcurrentGC::conc_reset_event_message() const {
1319 if (ShenandoahHeap::heap()->unload_classes()) {
1320 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent reset", " (unload classes)");
1321 } else {
1322 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent reset", "");
1323 }
1324 }
1325
1326 const char* ShenandoahConcurrentGC::conc_reset_after_collect_event_message() const {
1327 if (ShenandoahHeap::heap()->unload_classes()) {
1328 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent reset after collect", " (unload classes)");
1329 } else {
1330 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent reset after collect", "");
1331 }
1332 }
1333
1334 const char* ShenandoahConcurrentGC::verify_final_roots_event_message() const {
1335 if (ShenandoahHeap::heap()->unload_classes()) {
1336 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Verify Final Roots", " (unload classes)");
1337 } else {
1338 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Verify Final Roots", "");
1339 }
1340 }
1341
1342 const char* ShenandoahConcurrentGC::conc_final_roots_event_message() const {
1343 if (ShenandoahHeap::heap()->unload_classes()) {
1344 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent Final Roots", " (unload classes)");
1345 } else {
1346 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent Final Roots", "");
1347 }
1348 }
1349
1350 const char* ShenandoahConcurrentGC::conc_weak_refs_event_message() const {
1351 if (ShenandoahHeap::heap()->unload_classes()) {
1352 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent weak references", " (unload classes)");
1353 } else {
1354 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent weak references", "");
1355 }
1356 }
1357
1358 const char* ShenandoahConcurrentGC::conc_weak_roots_event_message() const {
1359 if (ShenandoahHeap::heap()->unload_classes()) {
1360 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent weak roots", " (unload classes)");
1361 } else {
1362 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent weak roots", "");
1363 }
1364 }
1365
1366 const char* ShenandoahConcurrentGC::conc_cleanup_event_message() const {
1367 if (ShenandoahHeap::heap()->unload_classes()) {
1368 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent cleanup", " (unload classes)");
1369 } else {
1370 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent cleanup", "");
1371 }
1372 }
1373
1374 const char* ShenandoahConcurrentGC::conc_init_update_refs_event_message() const {
1375 if (ShenandoahHeap::heap()->unload_classes()) {
1376 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent Init Update Refs", " (unload classes)");
1377 } else {
1378 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent Init Update Refs", "");
1379 }
1380 }