1 /* 2 * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2021, 2022, Red Hat, Inc. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 28 #include "gc/shared/barrierSetNMethod.hpp" 29 #include "gc/shared/collectorCounters.hpp" 30 #include "gc/shared/continuationGCSupport.inline.hpp" 31 #include "gc/shenandoah/shenandoahBreakpoint.hpp" 32 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp" 33 #include "gc/shenandoah/shenandoahConcurrentGC.hpp" 34 #include "gc/shenandoah/shenandoahFreeSet.hpp" 35 #include "gc/shenandoah/shenandoahLock.hpp" 36 #include "gc/shenandoah/shenandoahMark.inline.hpp" 37 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp" 38 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp" 39 #include "gc/shenandoah/shenandoahPhaseTimings.hpp" 40 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp" 41 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp" 42 #include "gc/shenandoah/shenandoahStackWatermark.hpp" 43 #include "gc/shenandoah/shenandoahUtils.hpp" 44 #include "gc/shenandoah/shenandoahVerifier.hpp" 45 #include "gc/shenandoah/shenandoahVMOperations.hpp" 46 #include "gc/shenandoah/shenandoahWorkGroup.hpp" 47 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp" 48 #include "memory/allocation.hpp" 49 #include "prims/jvmtiTagMap.hpp" 50 #include "runtime/vmThread.hpp" 51 #include "utilities/events.hpp" 52 53 // Breakpoint support 54 class ShenandoahBreakpointGCScope : public StackObj { 55 private: 56 const GCCause::Cause _cause; 57 public: 58 ShenandoahBreakpointGCScope(GCCause::Cause cause) : _cause(cause) { 59 if (cause == GCCause::_wb_breakpoint) { 60 ShenandoahBreakpoint::start_gc(); 61 ShenandoahBreakpoint::at_before_gc(); 62 } 63 } 64 65 ~ShenandoahBreakpointGCScope() { 66 if (_cause == GCCause::_wb_breakpoint) { 67 ShenandoahBreakpoint::at_after_gc(); 68 } 69 } 70 }; 71 72 class ShenandoahBreakpointMarkScope : public StackObj { 73 private: 74 const GCCause::Cause _cause; 75 public: 76 ShenandoahBreakpointMarkScope(GCCause::Cause cause) : _cause(cause) { 77 if (_cause == GCCause::_wb_breakpoint) { 78 ShenandoahBreakpoint::at_after_marking_started(); 79 } 80 } 81 82 ~ShenandoahBreakpointMarkScope() { 83 if (_cause == GCCause::_wb_breakpoint) { 84 ShenandoahBreakpoint::at_before_marking_completed(); 85 } 86 } 87 }; 88 89 ShenandoahConcurrentGC::ShenandoahConcurrentGC() : 90 _mark(), 91 _degen_point(ShenandoahDegenPoint::_degenerated_unset) { 92 } 93 94 ShenandoahGC::ShenandoahDegenPoint ShenandoahConcurrentGC::degen_point() const { 95 return _degen_point; 96 } 97 98 void ShenandoahConcurrentGC::cancel() { 99 ShenandoahConcurrentMark::cancel(); 100 } 101 102 bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) { 103 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 104 ShenandoahBreakpointGCScope breakpoint_gc_scope(cause); 105 106 // Reset for upcoming marking 107 entry_reset(); 108 109 // Start initial mark under STW 110 vmop_entry_init_mark(); 111 112 { 113 ShenandoahBreakpointMarkScope breakpoint_mark_scope(cause); 114 // Concurrent mark roots 115 entry_mark_roots(); 116 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_outside_cycle)) return false; 117 118 // Continue concurrent mark 119 entry_mark(); 120 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_mark)) return false; 121 } 122 123 // Complete marking under STW, and start evacuation 124 vmop_entry_final_mark(); 125 126 // Concurrent stack processing 127 if (heap->is_evacuation_in_progress()) { 128 entry_thread_roots(); 129 } 130 131 // Process weak roots that might still point to regions that would be broken by cleanup 132 if (heap->is_concurrent_weak_root_in_progress()) { 133 entry_weak_refs(); 134 entry_weak_roots(); 135 } 136 137 // Final mark might have reclaimed some immediate garbage, kick cleanup to reclaim 138 // the space. This would be the last action if there is nothing to evacuate. 139 entry_cleanup_early(); 140 141 { 142 ShenandoahHeapLocker locker(heap->lock()); 143 heap->free_set()->log_status(); 144 } 145 146 // Perform concurrent class unloading 147 if (heap->unload_classes() && 148 heap->is_concurrent_weak_root_in_progress()) { 149 entry_class_unloading(); 150 } 151 152 // Processing strong roots 153 // This may be skipped if there is nothing to update/evacuate. 154 // If so, strong_root_in_progress would be unset. 155 if (heap->is_concurrent_strong_root_in_progress()) { 156 entry_strong_roots(); 157 } 158 159 // Continue the cycle with evacuation and optional update-refs. 160 // This may be skipped if there is nothing to evacuate. 161 // If so, evac_in_progress would be unset by collection set preparation code. 162 if (heap->is_evacuation_in_progress()) { 163 // Concurrently evacuate 164 entry_evacuate(); 165 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_evac)) return false; 166 167 // Perform update-refs phase. 168 vmop_entry_init_updaterefs(); 169 entry_updaterefs(); 170 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_updaterefs)) return false; 171 172 // Concurrent update thread roots 173 entry_update_thread_roots(); 174 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_updaterefs)) return false; 175 176 vmop_entry_final_updaterefs(); 177 178 // Update references freed up collection set, kick the cleanup to reclaim the space. 179 entry_cleanup_complete(); 180 } else { 181 vmop_entry_final_roots(); 182 } 183 184 return true; 185 } 186 187 void ShenandoahConcurrentGC::vmop_entry_init_mark() { 188 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 189 TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters()); 190 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_mark_gross); 191 192 heap->try_inject_alloc_failure(); 193 VM_ShenandoahInitMark op(this); 194 VMThread::execute(&op); // jump to entry_init_mark() under safepoint 195 } 196 197 void ShenandoahConcurrentGC::vmop_entry_final_mark() { 198 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 199 TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters()); 200 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_mark_gross); 201 202 heap->try_inject_alloc_failure(); 203 VM_ShenandoahFinalMarkStartEvac op(this); 204 VMThread::execute(&op); // jump to entry_final_mark under safepoint 205 } 206 207 void ShenandoahConcurrentGC::vmop_entry_init_updaterefs() { 208 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 209 TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters()); 210 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_update_refs_gross); 211 212 heap->try_inject_alloc_failure(); 213 VM_ShenandoahInitUpdateRefs op(this); 214 VMThread::execute(&op); 215 } 216 217 void ShenandoahConcurrentGC::vmop_entry_final_updaterefs() { 218 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 219 TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters()); 220 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_update_refs_gross); 221 222 heap->try_inject_alloc_failure(); 223 VM_ShenandoahFinalUpdateRefs op(this); 224 VMThread::execute(&op); 225 } 226 227 void ShenandoahConcurrentGC::vmop_entry_final_roots() { 228 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 229 TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters()); 230 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_roots_gross); 231 232 // This phase does not use workers, no need for setup 233 heap->try_inject_alloc_failure(); 234 VM_ShenandoahFinalRoots op(this); 235 VMThread::execute(&op); 236 } 237 238 void ShenandoahConcurrentGC::entry_init_mark() { 239 const char* msg = init_mark_event_message(); 240 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::init_mark); 241 EventMark em("%s", msg); 242 243 ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(), 244 ShenandoahWorkerPolicy::calc_workers_for_init_marking(), 245 "init marking"); 246 247 op_init_mark(); 248 } 249 250 void ShenandoahConcurrentGC::entry_final_mark() { 251 const char* msg = final_mark_event_message(); 252 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_mark); 253 EventMark em("%s", msg); 254 255 ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(), 256 ShenandoahWorkerPolicy::calc_workers_for_final_marking(), 257 "final marking"); 258 259 op_final_mark(); 260 } 261 262 void ShenandoahConcurrentGC::entry_init_updaterefs() { 263 static const char* msg = "Pause Init Update Refs"; 264 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::init_update_refs); 265 EventMark em("%s", msg); 266 267 // No workers used in this phase, no setup required 268 op_init_updaterefs(); 269 } 270 271 void ShenandoahConcurrentGC::entry_final_updaterefs() { 272 static const char* msg = "Pause Final Update Refs"; 273 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_update_refs); 274 EventMark em("%s", msg); 275 276 ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(), 277 ShenandoahWorkerPolicy::calc_workers_for_final_update_ref(), 278 "final reference update"); 279 280 op_final_updaterefs(); 281 } 282 283 void ShenandoahConcurrentGC::entry_final_roots() { 284 static const char* msg = "Pause Final Roots"; 285 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_roots); 286 EventMark em("%s", msg); 287 288 op_final_roots(); 289 } 290 291 void ShenandoahConcurrentGC::entry_reset() { 292 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 293 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 294 static const char* msg = "Concurrent reset"; 295 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_reset); 296 EventMark em("%s", msg); 297 298 ShenandoahWorkerScope scope(heap->workers(), 299 ShenandoahWorkerPolicy::calc_workers_for_conc_reset(), 300 "concurrent reset"); 301 302 heap->try_inject_alloc_failure(); 303 op_reset(); 304 } 305 306 void ShenandoahConcurrentGC::entry_mark_roots() { 307 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 308 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 309 const char* msg = "Concurrent marking roots"; 310 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_mark_roots); 311 EventMark em("%s", msg); 312 313 ShenandoahWorkerScope scope(heap->workers(), 314 ShenandoahWorkerPolicy::calc_workers_for_conc_marking(), 315 "concurrent marking roots"); 316 317 heap->try_inject_alloc_failure(); 318 op_mark_roots(); 319 } 320 321 void ShenandoahConcurrentGC::entry_mark() { 322 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 323 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 324 const char* msg = conc_mark_event_message(); 325 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_mark); 326 EventMark em("%s", msg); 327 328 ShenandoahWorkerScope scope(heap->workers(), 329 ShenandoahWorkerPolicy::calc_workers_for_conc_marking(), 330 "concurrent marking"); 331 332 heap->try_inject_alloc_failure(); 333 op_mark(); 334 } 335 336 void ShenandoahConcurrentGC::entry_thread_roots() { 337 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 338 static const char* msg = "Concurrent thread roots"; 339 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_thread_roots); 340 EventMark em("%s", msg); 341 342 ShenandoahWorkerScope scope(heap->workers(), 343 ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(), 344 msg); 345 346 heap->try_inject_alloc_failure(); 347 op_thread_roots(); 348 } 349 350 void ShenandoahConcurrentGC::entry_weak_refs() { 351 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 352 static const char* msg = "Concurrent weak references"; 353 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_weak_refs); 354 EventMark em("%s", msg); 355 356 ShenandoahWorkerScope scope(heap->workers(), 357 ShenandoahWorkerPolicy::calc_workers_for_conc_refs_processing(), 358 "concurrent weak references"); 359 360 heap->try_inject_alloc_failure(); 361 op_weak_refs(); 362 } 363 364 void ShenandoahConcurrentGC::entry_weak_roots() { 365 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 366 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 367 static const char* msg = "Concurrent weak roots"; 368 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_weak_roots); 369 EventMark em("%s", msg); 370 371 ShenandoahWorkerScope scope(heap->workers(), 372 ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(), 373 "concurrent weak root"); 374 375 heap->try_inject_alloc_failure(); 376 op_weak_roots(); 377 } 378 379 void ShenandoahConcurrentGC::entry_class_unloading() { 380 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 381 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 382 static const char* msg = "Concurrent class unloading"; 383 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_class_unload); 384 EventMark em("%s", msg); 385 386 ShenandoahWorkerScope scope(heap->workers(), 387 ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(), 388 "concurrent class unloading"); 389 390 heap->try_inject_alloc_failure(); 391 op_class_unloading(); 392 } 393 394 void ShenandoahConcurrentGC::entry_strong_roots() { 395 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 396 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 397 static const char* msg = "Concurrent strong roots"; 398 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_strong_roots); 399 EventMark em("%s", msg); 400 401 ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_strong_roots); 402 403 ShenandoahWorkerScope scope(heap->workers(), 404 ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(), 405 "concurrent strong root"); 406 407 heap->try_inject_alloc_failure(); 408 op_strong_roots(); 409 } 410 411 void ShenandoahConcurrentGC::entry_cleanup_early() { 412 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 413 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 414 static const char* msg = "Concurrent cleanup"; 415 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_cleanup_early, true /* log_heap_usage */); 416 EventMark em("%s", msg); 417 418 // This phase does not use workers, no need for setup 419 heap->try_inject_alloc_failure(); 420 op_cleanup_early(); 421 } 422 423 void ShenandoahConcurrentGC::entry_evacuate() { 424 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 425 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 426 427 static const char* msg = "Concurrent evacuation"; 428 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_evac); 429 EventMark em("%s", msg); 430 431 ShenandoahWorkerScope scope(heap->workers(), 432 ShenandoahWorkerPolicy::calc_workers_for_conc_evac(), 433 "concurrent evacuation"); 434 435 heap->try_inject_alloc_failure(); 436 op_evacuate(); 437 } 438 439 void ShenandoahConcurrentGC::entry_update_thread_roots() { 440 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 441 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 442 443 static const char* msg = "Concurrent update thread roots"; 444 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_update_thread_roots); 445 EventMark em("%s", msg); 446 447 // No workers used in this phase, no setup required 448 heap->try_inject_alloc_failure(); 449 op_update_thread_roots(); 450 } 451 452 void ShenandoahConcurrentGC::entry_updaterefs() { 453 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 454 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 455 static const char* msg = "Concurrent update references"; 456 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_update_refs); 457 EventMark em("%s", msg); 458 459 ShenandoahWorkerScope scope(heap->workers(), 460 ShenandoahWorkerPolicy::calc_workers_for_conc_update_ref(), 461 "concurrent reference update"); 462 463 heap->try_inject_alloc_failure(); 464 op_updaterefs(); 465 } 466 467 void ShenandoahConcurrentGC::entry_cleanup_complete() { 468 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 469 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 470 static const char* msg = "Concurrent cleanup"; 471 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_cleanup_complete, true /* log_heap_usage */); 472 EventMark em("%s", msg); 473 474 // This phase does not use workers, no need for setup 475 heap->try_inject_alloc_failure(); 476 op_cleanup_complete(); 477 } 478 479 void ShenandoahConcurrentGC::op_reset() { 480 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 481 if (ShenandoahPacing) { 482 heap->pacer()->setup_for_reset(); 483 } 484 485 heap->prepare_gc(); 486 } 487 488 class ShenandoahInitMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure { 489 private: 490 ShenandoahMarkingContext* const _ctx; 491 public: 492 ShenandoahInitMarkUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {} 493 494 void heap_region_do(ShenandoahHeapRegion* r) { 495 assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index()); 496 if (r->is_active()) { 497 // Check if region needs updating its TAMS. We have updated it already during concurrent 498 // reset, so it is very likely we don't need to do another write here. 499 if (_ctx->top_at_mark_start(r) != r->top()) { 500 _ctx->capture_top_at_mark_start(r); 501 } 502 } else { 503 assert(_ctx->top_at_mark_start(r) == r->top(), 504 "Region " SIZE_FORMAT " should already have correct TAMS", r->index()); 505 } 506 } 507 508 bool is_thread_safe() { return true; } 509 }; 510 511 void ShenandoahConcurrentGC::start_mark() { 512 _mark.start_mark(); 513 } 514 515 void ShenandoahConcurrentGC::op_init_mark() { 516 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 517 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint"); 518 assert(Thread::current()->is_VM_thread(), "can only do this in VMThread"); 519 520 assert(heap->marking_context()->is_bitmap_clear(), "need clear marking bitmap"); 521 assert(!heap->marking_context()->is_complete(), "should not be complete"); 522 assert(!heap->has_forwarded_objects(), "No forwarded objects on this path"); 523 524 if (ShenandoahVerify) { 525 heap->verifier()->verify_before_concmark(); 526 } 527 528 if (VerifyBeforeGC) { 529 Universe::verify(); 530 } 531 532 heap->set_concurrent_mark_in_progress(true); 533 534 start_mark(); 535 536 { 537 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states); 538 ShenandoahInitMarkUpdateRegionStateClosure cl; 539 heap->parallel_heap_region_iterate(&cl); 540 } 541 542 // Weak reference processing 543 ShenandoahReferenceProcessor* rp = heap->ref_processor(); 544 rp->reset_thread_locals(); 545 rp->set_soft_reference_policy(heap->soft_ref_policy()->should_clear_all_soft_refs()); 546 547 // Make above changes visible to worker threads 548 OrderAccess::fence(); 549 550 // Arm nmethods for concurrent mark 551 ShenandoahCodeRoots::arm_nmethods_for_mark(); 552 553 ShenandoahStackWatermark::change_epoch_id(); 554 if (ShenandoahPacing) { 555 heap->pacer()->setup_for_mark(); 556 } 557 } 558 559 void ShenandoahConcurrentGC::op_mark_roots() { 560 _mark.mark_concurrent_roots(); 561 } 562 563 void ShenandoahConcurrentGC::op_mark() { 564 _mark.concurrent_mark(); 565 } 566 567 void ShenandoahConcurrentGC::op_final_mark() { 568 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 569 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint"); 570 assert(!heap->has_forwarded_objects(), "No forwarded objects on this path"); 571 572 if (ShenandoahVerify) { 573 heap->verifier()->verify_roots_no_forwarded(); 574 } 575 576 if (!heap->cancelled_gc()) { 577 _mark.finish_mark(); 578 assert(!heap->cancelled_gc(), "STW mark cannot OOM"); 579 580 // Notify JVMTI that the tagmap table will need cleaning. 581 JvmtiTagMap::set_needs_cleaning(); 582 583 heap->prepare_regions_and_collection_set(true /*concurrent*/); 584 585 // Has to be done after cset selection 586 heap->prepare_concurrent_roots(); 587 588 if (!heap->collection_set()->is_empty()) { 589 if (ShenandoahVerify) { 590 heap->verifier()->verify_before_evacuation(); 591 } 592 593 heap->set_evacuation_in_progress(true); 594 // From here on, we need to update references. 595 heap->set_has_forwarded_objects(true); 596 597 // Verify before arming for concurrent processing. 598 // Otherwise, verification can trigger stack processing. 599 if (ShenandoahVerify) { 600 heap->verifier()->verify_during_evacuation(); 601 } 602 603 // Arm nmethods/stack for concurrent processing 604 ShenandoahCodeRoots::arm_nmethods_for_evac(); 605 ShenandoahStackWatermark::change_epoch_id(); 606 607 if (ShenandoahPacing) { 608 heap->pacer()->setup_for_evac(); 609 } 610 } else { 611 if (ShenandoahVerify) { 612 heap->verifier()->verify_after_concmark(); 613 } 614 615 if (VerifyAfterGC) { 616 Universe::verify(); 617 } 618 } 619 } 620 } 621 622 class ShenandoahConcurrentEvacThreadClosure : public ThreadClosure { 623 private: 624 OopClosure* const _oops; 625 626 public: 627 ShenandoahConcurrentEvacThreadClosure(OopClosure* oops); 628 void do_thread(Thread* thread); 629 }; 630 631 ShenandoahConcurrentEvacThreadClosure::ShenandoahConcurrentEvacThreadClosure(OopClosure* oops) : 632 _oops(oops) { 633 } 634 635 void ShenandoahConcurrentEvacThreadClosure::do_thread(Thread* thread) { 636 JavaThread* const jt = JavaThread::cast(thread); 637 StackWatermarkSet::finish_processing(jt, _oops, StackWatermarkKind::gc); 638 } 639 640 class ShenandoahConcurrentEvacUpdateThreadTask : public WorkerTask { 641 private: 642 ShenandoahJavaThreadsIterator _java_threads; 643 644 public: 645 ShenandoahConcurrentEvacUpdateThreadTask(uint n_workers) : 646 WorkerTask("Shenandoah Evacuate/Update Concurrent Thread Roots"), 647 _java_threads(ShenandoahPhaseTimings::conc_thread_roots, n_workers) { 648 } 649 650 void work(uint worker_id) { 651 // ShenandoahEvacOOMScope has to be setup by ShenandoahContextEvacuateUpdateRootsClosure. 652 // Otherwise, may deadlock with watermark lock 653 ShenandoahContextEvacuateUpdateRootsClosure oops_cl; 654 ShenandoahConcurrentEvacThreadClosure thr_cl(&oops_cl); 655 _java_threads.threads_do(&thr_cl, worker_id); 656 } 657 }; 658 659 void ShenandoahConcurrentGC::op_thread_roots() { 660 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 661 assert(heap->is_evacuation_in_progress(), "Checked by caller"); 662 ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_thread_roots); 663 ShenandoahConcurrentEvacUpdateThreadTask task(heap->workers()->active_workers()); 664 heap->workers()->run_task(&task); 665 } 666 667 void ShenandoahConcurrentGC::op_weak_refs() { 668 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 669 assert(heap->is_concurrent_weak_root_in_progress(), "Only during this phase"); 670 // Concurrent weak refs processing 671 ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_refs); 672 if (heap->gc_cause() == GCCause::_wb_breakpoint) { 673 ShenandoahBreakpoint::at_after_reference_processing_started(); 674 } 675 heap->ref_processor()->process_references(ShenandoahPhaseTimings::conc_weak_refs, heap->workers(), true /* concurrent */); 676 } 677 678 class ShenandoahEvacUpdateCleanupOopStorageRootsClosure : public BasicOopIterateClosure { 679 private: 680 ShenandoahHeap* const _heap; 681 ShenandoahMarkingContext* const _mark_context; 682 bool _evac_in_progress; 683 Thread* const _thread; 684 685 public: 686 ShenandoahEvacUpdateCleanupOopStorageRootsClosure(); 687 void do_oop(oop* p); 688 void do_oop(narrowOop* p); 689 }; 690 691 ShenandoahEvacUpdateCleanupOopStorageRootsClosure::ShenandoahEvacUpdateCleanupOopStorageRootsClosure() : 692 _heap(ShenandoahHeap::heap()), 693 _mark_context(ShenandoahHeap::heap()->marking_context()), 694 _evac_in_progress(ShenandoahHeap::heap()->is_evacuation_in_progress()), 695 _thread(Thread::current()) { 696 } 697 698 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(oop* p) { 699 const oop obj = RawAccess<>::oop_load(p); 700 if (!CompressedOops::is_null(obj)) { 701 if (!_mark_context->is_marked(obj)) { 702 shenandoah_assert_correct(p, obj); 703 ShenandoahHeap::atomic_clear_oop(p, obj); 704 } else if (_evac_in_progress && _heap->in_collection_set(obj)) { 705 oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj); 706 if (resolved == obj) { 707 resolved = _heap->evacuate_object(obj, _thread); 708 } 709 ShenandoahHeap::atomic_update_oop(resolved, p, obj); 710 assert(_heap->cancelled_gc() || 711 _mark_context->is_marked(resolved) && !_heap->in_collection_set(resolved), 712 "Sanity"); 713 } 714 } 715 } 716 717 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(narrowOop* p) { 718 ShouldNotReachHere(); 719 } 720 721 class ShenandoahIsCLDAliveClosure : public CLDClosure { 722 public: 723 void do_cld(ClassLoaderData* cld) { 724 cld->is_alive(); 725 } 726 }; 727 728 class ShenandoahIsNMethodAliveClosure: public NMethodClosure { 729 public: 730 void do_nmethod(nmethod* n) { 731 n->is_unloading(); 732 } 733 }; 734 735 // This task not only evacuates/updates marked weak roots, but also "null" 736 // dead weak roots. 737 class ShenandoahConcurrentWeakRootsEvacUpdateTask : public WorkerTask { 738 private: 739 ShenandoahVMWeakRoots<true /*concurrent*/> _vm_roots; 740 741 // Roots related to concurrent class unloading 742 ShenandoahClassLoaderDataRoots<true /* concurrent */> 743 _cld_roots; 744 ShenandoahConcurrentNMethodIterator _nmethod_itr; 745 ShenandoahPhaseTimings::Phase _phase; 746 747 public: 748 ShenandoahConcurrentWeakRootsEvacUpdateTask(ShenandoahPhaseTimings::Phase phase) : 749 WorkerTask("Shenandoah Evacuate/Update Concurrent Weak Roots"), 750 _vm_roots(phase), 751 _cld_roots(phase, ShenandoahHeap::heap()->workers()->active_workers(), false /*heap iteration*/), 752 _nmethod_itr(ShenandoahCodeRoots::table()), 753 _phase(phase) {} 754 755 ~ShenandoahConcurrentWeakRootsEvacUpdateTask() { 756 // Notify runtime data structures of potentially dead oops 757 _vm_roots.report_num_dead(); 758 } 759 760 void work(uint worker_id) { 761 ShenandoahConcurrentWorkerSession worker_session(worker_id); 762 ShenandoahSuspendibleThreadSetJoiner sts_join; 763 { 764 ShenandoahEvacOOMScope oom; 765 // jni_roots and weak_roots are OopStorage backed roots, concurrent iteration 766 // may race against OopStorage::release() calls. 767 ShenandoahEvacUpdateCleanupOopStorageRootsClosure cl; 768 _vm_roots.oops_do(&cl, worker_id); 769 } 770 771 // If we are going to perform concurrent class unloading later on, we need to 772 // cleanup the weak oops in CLD and determinate nmethod's unloading state, so that we 773 // can cleanup immediate garbage sooner. 774 if (ShenandoahHeap::heap()->unload_classes()) { 775 // Applies ShenandoahIsCLDAlive closure to CLDs, native barrier will either null the 776 // CLD's holder or evacuate it. 777 { 778 ShenandoahIsCLDAliveClosure is_cld_alive; 779 _cld_roots.cld_do(&is_cld_alive, worker_id); 780 } 781 782 // Applies ShenandoahIsNMethodAliveClosure to registered nmethods. 783 // The closure calls nmethod->is_unloading(). The is_unloading 784 // state is cached, therefore, during concurrent class unloading phase, 785 // we will not touch the metadata of unloading nmethods 786 { 787 ShenandoahWorkerTimingsTracker timer(_phase, ShenandoahPhaseTimings::CodeCacheRoots, worker_id); 788 ShenandoahIsNMethodAliveClosure is_nmethod_alive; 789 _nmethod_itr.nmethods_do(&is_nmethod_alive); 790 } 791 } 792 } 793 }; 794 795 void ShenandoahConcurrentGC::op_weak_roots() { 796 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 797 assert(heap->is_concurrent_weak_root_in_progress(), "Only during this phase"); 798 // Concurrent weak root processing 799 { 800 ShenandoahTimingsTracker t(ShenandoahPhaseTimings::conc_weak_roots_work); 801 ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_roots_work); 802 ShenandoahConcurrentWeakRootsEvacUpdateTask task(ShenandoahPhaseTimings::conc_weak_roots_work); 803 heap->workers()->run_task(&task); 804 } 805 806 // Perform handshake to flush out dead oops 807 { 808 ShenandoahTimingsTracker t(ShenandoahPhaseTimings::conc_weak_roots_rendezvous); 809 heap->rendezvous_threads(); 810 } 811 } 812 813 void ShenandoahConcurrentGC::op_class_unloading() { 814 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 815 assert (heap->is_concurrent_weak_root_in_progress() && 816 heap->unload_classes(), 817 "Checked by caller"); 818 heap->do_class_unloading(); 819 } 820 821 class ShenandoahEvacUpdateCodeCacheClosure : public NMethodClosure { 822 private: 823 BarrierSetNMethod* const _bs; 824 ShenandoahEvacuateUpdateMetadataClosure _cl; 825 826 public: 827 ShenandoahEvacUpdateCodeCacheClosure() : 828 _bs(BarrierSet::barrier_set()->barrier_set_nmethod()), 829 _cl() { 830 } 831 832 void do_nmethod(nmethod* n) { 833 ShenandoahNMethod* data = ShenandoahNMethod::gc_data(n); 834 ShenandoahReentrantLocker locker(data->lock()); 835 // Setup EvacOOM scope below reentrant lock to avoid deadlock with 836 // nmethod_entry_barrier 837 ShenandoahEvacOOMScope oom; 838 data->oops_do(&_cl, true/*fix relocation*/); 839 _bs->disarm(n); 840 } 841 }; 842 843 class ShenandoahConcurrentRootsEvacUpdateTask : public WorkerTask { 844 private: 845 ShenandoahPhaseTimings::Phase _phase; 846 ShenandoahVMRoots<true /*concurrent*/> _vm_roots; 847 ShenandoahClassLoaderDataRoots<true /*concurrent*/> 848 _cld_roots; 849 ShenandoahConcurrentNMethodIterator _nmethod_itr; 850 851 public: 852 ShenandoahConcurrentRootsEvacUpdateTask(ShenandoahPhaseTimings::Phase phase) : 853 WorkerTask("Shenandoah Evacuate/Update Concurrent Strong Roots"), 854 _phase(phase), 855 _vm_roots(phase), 856 _cld_roots(phase, ShenandoahHeap::heap()->workers()->active_workers(), false /*heap iteration*/), 857 _nmethod_itr(ShenandoahCodeRoots::table()) {} 858 859 void work(uint worker_id) { 860 ShenandoahConcurrentWorkerSession worker_session(worker_id); 861 { 862 ShenandoahEvacOOMScope oom; 863 { 864 // vm_roots and weak_roots are OopStorage backed roots, concurrent iteration 865 // may race against OopStorage::release() calls. 866 ShenandoahContextEvacuateUpdateRootsClosure cl; 867 _vm_roots.oops_do<ShenandoahContextEvacuateUpdateRootsClosure>(&cl, worker_id); 868 } 869 870 { 871 ShenandoahEvacuateUpdateMetadataClosure cl; 872 CLDToOopClosure clds(&cl, ClassLoaderData::_claim_strong); 873 _cld_roots.cld_do(&clds, worker_id); 874 } 875 } 876 877 // Cannot setup ShenandoahEvacOOMScope here, due to potential deadlock with nmethod_entry_barrier. 878 if (!ShenandoahHeap::heap()->unload_classes()) { 879 ShenandoahWorkerTimingsTracker timer(_phase, ShenandoahPhaseTimings::CodeCacheRoots, worker_id); 880 ShenandoahEvacUpdateCodeCacheClosure cl; 881 _nmethod_itr.nmethods_do(&cl); 882 } 883 } 884 }; 885 886 void ShenandoahConcurrentGC::op_strong_roots() { 887 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 888 assert(heap->is_concurrent_strong_root_in_progress(), "Checked by caller"); 889 ShenandoahConcurrentRootsEvacUpdateTask task(ShenandoahPhaseTimings::conc_strong_roots); 890 heap->workers()->run_task(&task); 891 heap->set_concurrent_strong_root_in_progress(false); 892 } 893 894 void ShenandoahConcurrentGC::op_cleanup_early() { 895 ShenandoahHeap::heap()->free_set()->recycle_trash(); 896 } 897 898 void ShenandoahConcurrentGC::op_evacuate() { 899 ShenandoahHeap::heap()->evacuate_collection_set(true /*concurrent*/); 900 } 901 902 void ShenandoahConcurrentGC::op_init_updaterefs() { 903 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 904 heap->set_evacuation_in_progress(false); 905 heap->set_concurrent_weak_root_in_progress(false); 906 heap->prepare_update_heap_references(true /*concurrent*/); 907 heap->set_update_refs_in_progress(true); 908 909 if (ShenandoahPacing) { 910 heap->pacer()->setup_for_updaterefs(); 911 } 912 } 913 914 void ShenandoahConcurrentGC::op_updaterefs() { 915 ShenandoahHeap::heap()->update_heap_references(true /*concurrent*/); 916 } 917 918 class ShenandoahUpdateThreadClosure : public HandshakeClosure { 919 private: 920 ShenandoahUpdateRefsClosure _cl; 921 public: 922 ShenandoahUpdateThreadClosure(); 923 void do_thread(Thread* thread); 924 }; 925 926 ShenandoahUpdateThreadClosure::ShenandoahUpdateThreadClosure() : 927 HandshakeClosure("Shenandoah Update Thread Roots") { 928 } 929 930 void ShenandoahUpdateThreadClosure::do_thread(Thread* thread) { 931 if (thread->is_Java_thread()) { 932 JavaThread* jt = JavaThread::cast(thread); 933 ResourceMark rm; 934 jt->oops_do(&_cl, nullptr); 935 } 936 } 937 938 void ShenandoahConcurrentGC::op_update_thread_roots() { 939 ShenandoahUpdateThreadClosure cl; 940 Handshake::execute(&cl); 941 } 942 943 void ShenandoahConcurrentGC::op_final_updaterefs() { 944 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 945 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint"); 946 assert(!heap->_update_refs_iterator.has_next(), "Should have finished update references"); 947 948 heap->finish_concurrent_roots(); 949 950 // Clear cancelled GC, if set. On cancellation path, the block before would handle 951 // everything. 952 if (heap->cancelled_gc()) { 953 heap->clear_cancelled_gc(); 954 } 955 956 // Has to be done before cset is clear 957 if (ShenandoahVerify) { 958 heap->verifier()->verify_roots_in_to_space(); 959 } 960 961 heap->update_heap_region_states(true /*concurrent*/); 962 963 heap->set_update_refs_in_progress(false); 964 heap->set_has_forwarded_objects(false); 965 966 if (ShenandoahVerify) { 967 heap->verifier()->verify_after_updaterefs(); 968 } 969 970 if (VerifyAfterGC) { 971 Universe::verify(); 972 } 973 974 heap->rebuild_free_set(true /*concurrent*/); 975 } 976 977 void ShenandoahConcurrentGC::op_final_roots() { 978 ShenandoahHeap::heap()->set_concurrent_weak_root_in_progress(false); 979 } 980 981 void ShenandoahConcurrentGC::op_cleanup_complete() { 982 ShenandoahHeap::heap()->free_set()->recycle_trash(); 983 } 984 985 bool ShenandoahConcurrentGC::check_cancellation_and_abort(ShenandoahDegenPoint point) { 986 if (ShenandoahHeap::heap()->cancelled_gc()) { 987 _degen_point = point; 988 return true; 989 } 990 return false; 991 } 992 993 const char* ShenandoahConcurrentGC::init_mark_event_message() const { 994 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 995 assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here"); 996 if (heap->unload_classes()) { 997 return "Pause Init Mark (unload classes)"; 998 } else { 999 return "Pause Init Mark"; 1000 } 1001 } 1002 1003 const char* ShenandoahConcurrentGC::final_mark_event_message() const { 1004 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 1005 assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here"); 1006 if (heap->unload_classes()) { 1007 return "Pause Final Mark (unload classes)"; 1008 } else { 1009 return "Pause Final Mark"; 1010 } 1011 } 1012 1013 const char* ShenandoahConcurrentGC::conc_mark_event_message() const { 1014 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 1015 assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here"); 1016 if (heap->unload_classes()) { 1017 return "Concurrent marking (unload classes)"; 1018 } else { 1019 return "Concurrent marking"; 1020 } 1021 }