1 /* 2 * Copyright (c) 2021, 2022, Red Hat, Inc. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 27 #include "gc/shared/barrierSetNMethod.hpp" 28 #include "gc/shared/collectorCounters.hpp" 29 #include "gc/shared/continuationGCSupport.inline.hpp" 30 #include "gc/shenandoah/shenandoahBreakpoint.hpp" 31 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp" 32 #include "gc/shenandoah/shenandoahConcurrentGC.hpp" 33 #include "gc/shenandoah/shenandoahFreeSet.hpp" 34 #include "gc/shenandoah/shenandoahLock.hpp" 35 #include "gc/shenandoah/shenandoahMark.inline.hpp" 36 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp" 37 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp" 38 #include "gc/shenandoah/shenandoahPhaseTimings.hpp" 39 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp" 40 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp" 41 #include "gc/shenandoah/shenandoahStackWatermark.hpp" 42 #include "gc/shenandoah/shenandoahUtils.hpp" 43 #include "gc/shenandoah/shenandoahVerifier.hpp" 44 #include "gc/shenandoah/shenandoahVMOperations.hpp" 45 #include "gc/shenandoah/shenandoahWorkGroup.hpp" 46 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp" 47 #include "memory/allocation.hpp" 48 #include "prims/jvmtiTagMap.hpp" 49 #include "runtime/vmThread.hpp" 50 #include "utilities/events.hpp" 51 52 // Breakpoint support 53 class ShenandoahBreakpointGCScope : public StackObj { 54 private: 55 const GCCause::Cause _cause; 56 public: 57 ShenandoahBreakpointGCScope(GCCause::Cause cause) : _cause(cause) { 58 if (cause == GCCause::_wb_breakpoint) { 59 ShenandoahBreakpoint::start_gc(); 60 ShenandoahBreakpoint::at_before_gc(); 61 } 62 } 63 64 ~ShenandoahBreakpointGCScope() { 65 if (_cause == GCCause::_wb_breakpoint) { 66 ShenandoahBreakpoint::at_after_gc(); 67 } 68 } 69 }; 70 71 class ShenandoahBreakpointMarkScope : public StackObj { 72 private: 73 const GCCause::Cause _cause; 74 public: 75 ShenandoahBreakpointMarkScope(GCCause::Cause cause) : _cause(cause) { 76 if (_cause == GCCause::_wb_breakpoint) { 77 ShenandoahBreakpoint::at_after_marking_started(); 78 } 79 } 80 81 ~ShenandoahBreakpointMarkScope() { 82 if (_cause == GCCause::_wb_breakpoint) { 83 ShenandoahBreakpoint::at_before_marking_completed(); 84 } 85 } 86 }; 87 88 ShenandoahConcurrentGC::ShenandoahConcurrentGC() : 89 _mark(), 90 _degen_point(ShenandoahDegenPoint::_degenerated_unset) { 91 } 92 93 ShenandoahGC::ShenandoahDegenPoint ShenandoahConcurrentGC::degen_point() const { 94 return _degen_point; 95 } 96 97 void ShenandoahConcurrentGC::cancel() { 98 ShenandoahConcurrentMark::cancel(); 99 } 100 101 bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) { 102 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 103 ShenandoahBreakpointGCScope breakpoint_gc_scope(cause); 104 105 // Reset for upcoming marking 106 entry_reset(); 107 108 // Start initial mark under STW 109 vmop_entry_init_mark(); 110 111 { 112 ShenandoahBreakpointMarkScope breakpoint_mark_scope(cause); 113 // Concurrent mark roots 114 entry_mark_roots(); 115 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_outside_cycle)) return false; 116 117 // Continue concurrent mark 118 entry_mark(); 119 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_mark)) return false; 120 } 121 122 // Complete marking under STW, and start evacuation 123 vmop_entry_final_mark(); 124 125 // Concurrent stack processing 126 if (heap->is_evacuation_in_progress()) { 127 entry_thread_roots(); 128 } 129 130 // Process weak roots that might still point to regions that would be broken by cleanup 131 if (heap->is_concurrent_weak_root_in_progress()) { 132 entry_weak_refs(); 133 entry_weak_roots(); 134 } 135 136 // Final mark might have reclaimed some immediate garbage, kick cleanup to reclaim 137 // the space. This would be the last action if there is nothing to evacuate. 138 entry_cleanup_early(); 139 140 { 141 ShenandoahHeapLocker locker(heap->lock()); 142 heap->free_set()->log_status(); 143 } 144 145 // Perform concurrent class unloading 146 if (heap->unload_classes() && 147 heap->is_concurrent_weak_root_in_progress()) { 148 entry_class_unloading(); 149 } 150 151 // Processing strong roots 152 // This may be skipped if there is nothing to update/evacuate. 153 // If so, strong_root_in_progress would be unset. 154 if (heap->is_concurrent_strong_root_in_progress()) { 155 entry_strong_roots(); 156 } 157 158 // Continue the cycle with evacuation and optional update-refs. 159 // This may be skipped if there is nothing to evacuate. 160 // If so, evac_in_progress would be unset by collection set preparation code. 161 if (heap->is_evacuation_in_progress()) { 162 // Concurrently evacuate 163 entry_evacuate(); 164 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_evac)) return false; 165 166 // Perform update-refs phase. 167 vmop_entry_init_updaterefs(); 168 entry_updaterefs(); 169 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_updaterefs)) return false; 170 171 // Concurrent update thread roots 172 entry_update_thread_roots(); 173 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_updaterefs)) return false; 174 175 vmop_entry_final_updaterefs(); 176 177 // Update references freed up collection set, kick the cleanup to reclaim the space. 178 entry_cleanup_complete(); 179 } else { 180 vmop_entry_final_roots(); 181 } 182 183 return true; 184 } 185 186 void ShenandoahConcurrentGC::vmop_entry_init_mark() { 187 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 188 TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters()); 189 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_mark_gross); 190 191 heap->try_inject_alloc_failure(); 192 VM_ShenandoahInitMark op(this); 193 VMThread::execute(&op); // jump to entry_init_mark() under safepoint 194 } 195 196 void ShenandoahConcurrentGC::vmop_entry_final_mark() { 197 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 198 TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters()); 199 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_mark_gross); 200 201 heap->try_inject_alloc_failure(); 202 VM_ShenandoahFinalMarkStartEvac op(this); 203 VMThread::execute(&op); // jump to entry_final_mark under safepoint 204 } 205 206 void ShenandoahConcurrentGC::vmop_entry_init_updaterefs() { 207 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 208 TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters()); 209 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_update_refs_gross); 210 211 heap->try_inject_alloc_failure(); 212 VM_ShenandoahInitUpdateRefs op(this); 213 VMThread::execute(&op); 214 } 215 216 void ShenandoahConcurrentGC::vmop_entry_final_updaterefs() { 217 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 218 TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters()); 219 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_update_refs_gross); 220 221 heap->try_inject_alloc_failure(); 222 VM_ShenandoahFinalUpdateRefs op(this); 223 VMThread::execute(&op); 224 } 225 226 void ShenandoahConcurrentGC::vmop_entry_final_roots() { 227 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 228 TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters()); 229 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_roots_gross); 230 231 // This phase does not use workers, no need for setup 232 heap->try_inject_alloc_failure(); 233 VM_ShenandoahFinalRoots op(this); 234 VMThread::execute(&op); 235 } 236 237 void ShenandoahConcurrentGC::entry_init_mark() { 238 const char* msg = init_mark_event_message(); 239 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::init_mark); 240 EventMark em("%s", msg); 241 242 ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(), 243 ShenandoahWorkerPolicy::calc_workers_for_init_marking(), 244 "init marking"); 245 246 op_init_mark(); 247 } 248 249 void ShenandoahConcurrentGC::entry_final_mark() { 250 const char* msg = final_mark_event_message(); 251 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_mark); 252 EventMark em("%s", msg); 253 254 ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(), 255 ShenandoahWorkerPolicy::calc_workers_for_final_marking(), 256 "final marking"); 257 258 op_final_mark(); 259 } 260 261 void ShenandoahConcurrentGC::entry_init_updaterefs() { 262 static const char* msg = "Pause Init Update Refs"; 263 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::init_update_refs); 264 EventMark em("%s", msg); 265 266 // No workers used in this phase, no setup required 267 op_init_updaterefs(); 268 } 269 270 void ShenandoahConcurrentGC::entry_final_updaterefs() { 271 static const char* msg = "Pause Final Update Refs"; 272 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_update_refs); 273 EventMark em("%s", msg); 274 275 ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(), 276 ShenandoahWorkerPolicy::calc_workers_for_final_update_ref(), 277 "final reference update"); 278 279 op_final_updaterefs(); 280 } 281 282 void ShenandoahConcurrentGC::entry_final_roots() { 283 static const char* msg = "Pause Final Roots"; 284 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_roots); 285 EventMark em("%s", msg); 286 287 op_final_roots(); 288 } 289 290 void ShenandoahConcurrentGC::entry_reset() { 291 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 292 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 293 static const char* msg = "Concurrent reset"; 294 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_reset); 295 EventMark em("%s", msg); 296 297 ShenandoahWorkerScope scope(heap->workers(), 298 ShenandoahWorkerPolicy::calc_workers_for_conc_reset(), 299 "concurrent reset"); 300 301 heap->try_inject_alloc_failure(); 302 op_reset(); 303 } 304 305 void ShenandoahConcurrentGC::entry_mark_roots() { 306 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 307 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 308 const char* msg = "Concurrent marking roots"; 309 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_mark_roots); 310 EventMark em("%s", msg); 311 312 ShenandoahWorkerScope scope(heap->workers(), 313 ShenandoahWorkerPolicy::calc_workers_for_conc_marking(), 314 "concurrent marking roots"); 315 316 heap->try_inject_alloc_failure(); 317 op_mark_roots(); 318 } 319 320 void ShenandoahConcurrentGC::entry_mark() { 321 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 322 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 323 const char* msg = conc_mark_event_message(); 324 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_mark); 325 EventMark em("%s", msg); 326 327 ShenandoahWorkerScope scope(heap->workers(), 328 ShenandoahWorkerPolicy::calc_workers_for_conc_marking(), 329 "concurrent marking"); 330 331 heap->try_inject_alloc_failure(); 332 op_mark(); 333 } 334 335 void ShenandoahConcurrentGC::entry_thread_roots() { 336 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 337 static const char* msg = "Concurrent thread roots"; 338 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_thread_roots); 339 EventMark em("%s", msg); 340 341 ShenandoahWorkerScope scope(heap->workers(), 342 ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(), 343 msg); 344 345 heap->try_inject_alloc_failure(); 346 op_thread_roots(); 347 } 348 349 void ShenandoahConcurrentGC::entry_weak_refs() { 350 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 351 static const char* msg = "Concurrent weak references"; 352 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_weak_refs); 353 EventMark em("%s", msg); 354 355 ShenandoahWorkerScope scope(heap->workers(), 356 ShenandoahWorkerPolicy::calc_workers_for_conc_refs_processing(), 357 "concurrent weak references"); 358 359 heap->try_inject_alloc_failure(); 360 op_weak_refs(); 361 } 362 363 void ShenandoahConcurrentGC::entry_weak_roots() { 364 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 365 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 366 static const char* msg = "Concurrent weak roots"; 367 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_weak_roots); 368 EventMark em("%s", msg); 369 370 ShenandoahWorkerScope scope(heap->workers(), 371 ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(), 372 "concurrent weak root"); 373 374 heap->try_inject_alloc_failure(); 375 op_weak_roots(); 376 } 377 378 void ShenandoahConcurrentGC::entry_class_unloading() { 379 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 380 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 381 static const char* msg = "Concurrent class unloading"; 382 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_class_unload); 383 EventMark em("%s", msg); 384 385 ShenandoahWorkerScope scope(heap->workers(), 386 ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(), 387 "concurrent class unloading"); 388 389 heap->try_inject_alloc_failure(); 390 op_class_unloading(); 391 } 392 393 void ShenandoahConcurrentGC::entry_strong_roots() { 394 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 395 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 396 static const char* msg = "Concurrent strong roots"; 397 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_strong_roots); 398 EventMark em("%s", msg); 399 400 ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_strong_roots); 401 402 ShenandoahWorkerScope scope(heap->workers(), 403 ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(), 404 "concurrent strong root"); 405 406 heap->try_inject_alloc_failure(); 407 op_strong_roots(); 408 } 409 410 void ShenandoahConcurrentGC::entry_cleanup_early() { 411 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 412 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 413 static const char* msg = "Concurrent cleanup"; 414 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_cleanup_early, true /* log_heap_usage */); 415 EventMark em("%s", msg); 416 417 // This phase does not use workers, no need for setup 418 heap->try_inject_alloc_failure(); 419 op_cleanup_early(); 420 } 421 422 void ShenandoahConcurrentGC::entry_evacuate() { 423 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 424 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 425 426 static const char* msg = "Concurrent evacuation"; 427 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_evac); 428 EventMark em("%s", msg); 429 430 ShenandoahWorkerScope scope(heap->workers(), 431 ShenandoahWorkerPolicy::calc_workers_for_conc_evac(), 432 "concurrent evacuation"); 433 434 heap->try_inject_alloc_failure(); 435 op_evacuate(); 436 } 437 438 void ShenandoahConcurrentGC::entry_update_thread_roots() { 439 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 440 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 441 442 static const char* msg = "Concurrent update thread roots"; 443 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_update_thread_roots); 444 EventMark em("%s", msg); 445 446 // No workers used in this phase, no setup required 447 heap->try_inject_alloc_failure(); 448 op_update_thread_roots(); 449 } 450 451 void ShenandoahConcurrentGC::entry_updaterefs() { 452 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 453 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 454 static const char* msg = "Concurrent update references"; 455 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_update_refs); 456 EventMark em("%s", msg); 457 458 ShenandoahWorkerScope scope(heap->workers(), 459 ShenandoahWorkerPolicy::calc_workers_for_conc_update_ref(), 460 "concurrent reference update"); 461 462 heap->try_inject_alloc_failure(); 463 op_updaterefs(); 464 } 465 466 void ShenandoahConcurrentGC::entry_cleanup_complete() { 467 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 468 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 469 static const char* msg = "Concurrent cleanup"; 470 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_cleanup_complete, true /* log_heap_usage */); 471 EventMark em("%s", msg); 472 473 // This phase does not use workers, no need for setup 474 heap->try_inject_alloc_failure(); 475 op_cleanup_complete(); 476 } 477 478 void ShenandoahConcurrentGC::op_reset() { 479 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 480 if (ShenandoahPacing) { 481 heap->pacer()->setup_for_reset(); 482 } 483 484 heap->prepare_gc(); 485 } 486 487 class ShenandoahInitMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure { 488 private: 489 ShenandoahMarkingContext* const _ctx; 490 public: 491 ShenandoahInitMarkUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {} 492 493 void heap_region_do(ShenandoahHeapRegion* r) { 494 assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index()); 495 if (r->is_active()) { 496 // Check if region needs updating its TAMS. We have updated it already during concurrent 497 // reset, so it is very likely we don't need to do another write here. 498 if (_ctx->top_at_mark_start(r) != r->top()) { 499 _ctx->capture_top_at_mark_start(r); 500 } 501 } else { 502 assert(_ctx->top_at_mark_start(r) == r->top(), 503 "Region " SIZE_FORMAT " should already have correct TAMS", r->index()); 504 } 505 } 506 507 bool is_thread_safe() { return true; } 508 }; 509 510 void ShenandoahConcurrentGC::start_mark() { 511 _mark.start_mark(); 512 } 513 514 void ShenandoahConcurrentGC::op_init_mark() { 515 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 516 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint"); 517 assert(Thread::current()->is_VM_thread(), "can only do this in VMThread"); 518 519 assert(heap->marking_context()->is_bitmap_clear(), "need clear marking bitmap"); 520 assert(!heap->marking_context()->is_complete(), "should not be complete"); 521 assert(!heap->has_forwarded_objects(), "No forwarded objects on this path"); 522 523 if (ShenandoahVerify) { 524 heap->verifier()->verify_before_concmark(); 525 } 526 527 if (VerifyBeforeGC) { 528 Universe::verify(); 529 } 530 531 heap->set_concurrent_mark_in_progress(true); 532 533 start_mark(); 534 535 { 536 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states); 537 ShenandoahInitMarkUpdateRegionStateClosure cl; 538 heap->parallel_heap_region_iterate(&cl); 539 } 540 541 // Weak reference processing 542 ShenandoahReferenceProcessor* rp = heap->ref_processor(); 543 rp->reset_thread_locals(); 544 rp->set_soft_reference_policy(heap->soft_ref_policy()->should_clear_all_soft_refs()); 545 546 // Make above changes visible to worker threads 547 OrderAccess::fence(); 548 // Arm nmethods for concurrent marking. When a nmethod is about to be executed, 549 // we need to make sure that all its metadata are marked. alternative is to remark 550 // thread roots at final mark pause, but it can be potential latency killer. 551 if (heap->unload_classes()) { 552 ShenandoahCodeRoots::arm_nmethods(); 553 } 554 555 ShenandoahStackWatermark::change_epoch_id(); 556 if (ShenandoahPacing) { 557 heap->pacer()->setup_for_mark(); 558 } 559 } 560 561 void ShenandoahConcurrentGC::op_mark_roots() { 562 _mark.mark_concurrent_roots(); 563 } 564 565 void ShenandoahConcurrentGC::op_mark() { 566 _mark.concurrent_mark(); 567 } 568 569 void ShenandoahConcurrentGC::op_final_mark() { 570 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 571 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint"); 572 assert(!heap->has_forwarded_objects(), "No forwarded objects on this path"); 573 574 if (ShenandoahVerify) { 575 heap->verifier()->verify_roots_no_forwarded(); 576 } 577 578 if (!heap->cancelled_gc()) { 579 _mark.finish_mark(); 580 assert(!heap->cancelled_gc(), "STW mark cannot OOM"); 581 582 // Notify JVMTI that the tagmap table will need cleaning. 583 JvmtiTagMap::set_needs_cleaning(); 584 585 heap->prepare_regions_and_collection_set(true /*concurrent*/); 586 587 // Has to be done after cset selection 588 heap->prepare_concurrent_roots(); 589 590 if (!heap->collection_set()->is_empty()) { 591 if (ShenandoahVerify) { 592 heap->verifier()->verify_before_evacuation(); 593 } 594 595 heap->set_evacuation_in_progress(true); 596 // From here on, we need to update references. 597 heap->set_has_forwarded_objects(true); 598 599 // Verify before arming for concurrent processing. 600 // Otherwise, verification can trigger stack processing. 601 if (ShenandoahVerify) { 602 heap->verifier()->verify_during_evacuation(); 603 } 604 605 // Arm nmethods/stack for concurrent processing 606 ShenandoahCodeRoots::arm_nmethods(); 607 ShenandoahStackWatermark::change_epoch_id(); 608 609 if (ShenandoahPacing) { 610 heap->pacer()->setup_for_evac(); 611 } 612 } else { 613 if (ShenandoahVerify) { 614 heap->verifier()->verify_after_concmark(); 615 } 616 617 if (VerifyAfterGC) { 618 Universe::verify(); 619 } 620 } 621 } 622 } 623 624 class ShenandoahConcurrentEvacThreadClosure : public ThreadClosure { 625 private: 626 OopClosure* const _oops; 627 628 public: 629 ShenandoahConcurrentEvacThreadClosure(OopClosure* oops); 630 void do_thread(Thread* thread); 631 }; 632 633 ShenandoahConcurrentEvacThreadClosure::ShenandoahConcurrentEvacThreadClosure(OopClosure* oops) : 634 _oops(oops) { 635 } 636 637 void ShenandoahConcurrentEvacThreadClosure::do_thread(Thread* thread) { 638 JavaThread* const jt = JavaThread::cast(thread); 639 StackWatermarkSet::finish_processing(jt, _oops, StackWatermarkKind::gc); 640 } 641 642 class ShenandoahConcurrentEvacUpdateThreadTask : public WorkerTask { 643 private: 644 ShenandoahJavaThreadsIterator _java_threads; 645 646 public: 647 ShenandoahConcurrentEvacUpdateThreadTask(uint n_workers) : 648 WorkerTask("Shenandoah Evacuate/Update Concurrent Thread Roots"), 649 _java_threads(ShenandoahPhaseTimings::conc_thread_roots, n_workers) { 650 } 651 652 void work(uint worker_id) { 653 // ShenandoahEvacOOMScope has to be setup by ShenandoahContextEvacuateUpdateRootsClosure. 654 // Otherwise, may deadlock with watermark lock 655 ShenandoahContextEvacuateUpdateRootsClosure oops_cl; 656 ShenandoahConcurrentEvacThreadClosure thr_cl(&oops_cl); 657 _java_threads.threads_do(&thr_cl, worker_id); 658 } 659 }; 660 661 void ShenandoahConcurrentGC::op_thread_roots() { 662 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 663 assert(heap->is_evacuation_in_progress(), "Checked by caller"); 664 ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_thread_roots); 665 ShenandoahConcurrentEvacUpdateThreadTask task(heap->workers()->active_workers()); 666 heap->workers()->run_task(&task); 667 } 668 669 void ShenandoahConcurrentGC::op_weak_refs() { 670 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 671 assert(heap->is_concurrent_weak_root_in_progress(), "Only during this phase"); 672 // Concurrent weak refs processing 673 ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_refs); 674 if (heap->gc_cause() == GCCause::_wb_breakpoint) { 675 ShenandoahBreakpoint::at_after_reference_processing_started(); 676 } 677 heap->ref_processor()->process_references(ShenandoahPhaseTimings::conc_weak_refs, heap->workers(), true /* concurrent */); 678 } 679 680 class ShenandoahEvacUpdateCleanupOopStorageRootsClosure : public BasicOopIterateClosure { 681 private: 682 ShenandoahHeap* const _heap; 683 ShenandoahMarkingContext* const _mark_context; 684 bool _evac_in_progress; 685 Thread* const _thread; 686 687 public: 688 ShenandoahEvacUpdateCleanupOopStorageRootsClosure(); 689 void do_oop(oop* p); 690 void do_oop(narrowOop* p); 691 }; 692 693 ShenandoahEvacUpdateCleanupOopStorageRootsClosure::ShenandoahEvacUpdateCleanupOopStorageRootsClosure() : 694 _heap(ShenandoahHeap::heap()), 695 _mark_context(ShenandoahHeap::heap()->marking_context()), 696 _evac_in_progress(ShenandoahHeap::heap()->is_evacuation_in_progress()), 697 _thread(Thread::current()) { 698 } 699 700 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(oop* p) { 701 const oop obj = RawAccess<>::oop_load(p); 702 if (!CompressedOops::is_null(obj)) { 703 if (!_mark_context->is_marked(obj)) { 704 shenandoah_assert_correct(p, obj); 705 ShenandoahHeap::atomic_clear_oop(p, obj); 706 } else if (_evac_in_progress && _heap->in_collection_set(obj)) { 707 oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj); 708 if (resolved == obj) { 709 resolved = _heap->evacuate_object(obj, _thread); 710 } 711 ShenandoahHeap::atomic_update_oop(resolved, p, obj); 712 assert(_heap->cancelled_gc() || 713 _mark_context->is_marked(resolved) && !_heap->in_collection_set(resolved), 714 "Sanity"); 715 } 716 } 717 } 718 719 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(narrowOop* p) { 720 ShouldNotReachHere(); 721 } 722 723 class ShenandoahIsCLDAliveClosure : public CLDClosure { 724 public: 725 void do_cld(ClassLoaderData* cld) { 726 cld->is_alive(); 727 } 728 }; 729 730 class ShenandoahIsNMethodAliveClosure: public NMethodClosure { 731 public: 732 void do_nmethod(nmethod* n) { 733 n->is_unloading(); 734 } 735 }; 736 737 // This task not only evacuates/updates marked weak roots, but also "null" 738 // dead weak roots. 739 class ShenandoahConcurrentWeakRootsEvacUpdateTask : public WorkerTask { 740 private: 741 ShenandoahVMWeakRoots<true /*concurrent*/> _vm_roots; 742 743 // Roots related to concurrent class unloading 744 ShenandoahClassLoaderDataRoots<true /* concurrent */> 745 _cld_roots; 746 ShenandoahConcurrentNMethodIterator _nmethod_itr; 747 ShenandoahPhaseTimings::Phase _phase; 748 749 public: 750 ShenandoahConcurrentWeakRootsEvacUpdateTask(ShenandoahPhaseTimings::Phase phase) : 751 WorkerTask("Shenandoah Evacuate/Update Concurrent Weak Roots"), 752 _vm_roots(phase), 753 _cld_roots(phase, ShenandoahHeap::heap()->workers()->active_workers(), false /*heap iteration*/), 754 _nmethod_itr(ShenandoahCodeRoots::table()), 755 _phase(phase) { 756 if (ShenandoahHeap::heap()->unload_classes()) { 757 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 758 _nmethod_itr.nmethods_do_begin(); 759 } 760 } 761 762 ~ShenandoahConcurrentWeakRootsEvacUpdateTask() { 763 if (ShenandoahHeap::heap()->unload_classes()) { 764 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 765 _nmethod_itr.nmethods_do_end(); 766 } 767 // Notify runtime data structures of potentially dead oops 768 _vm_roots.report_num_dead(); 769 } 770 771 void work(uint worker_id) { 772 ShenandoahConcurrentWorkerSession worker_session(worker_id); 773 ShenandoahSuspendibleThreadSetJoiner sts_join; 774 { 775 ShenandoahEvacOOMScope oom; 776 // jni_roots and weak_roots are OopStorage backed roots, concurrent iteration 777 // may race against OopStorage::release() calls. 778 ShenandoahEvacUpdateCleanupOopStorageRootsClosure cl; 779 _vm_roots.oops_do(&cl, worker_id); 780 } 781 782 // If we are going to perform concurrent class unloading later on, we need to 783 // cleanup the weak oops in CLD and determinate nmethod's unloading state, so that we 784 // can cleanup immediate garbage sooner. 785 if (ShenandoahHeap::heap()->unload_classes()) { 786 // Applies ShenandoahIsCLDAlive closure to CLDs, native barrier will either null the 787 // CLD's holder or evacuate it. 788 { 789 ShenandoahIsCLDAliveClosure is_cld_alive; 790 _cld_roots.cld_do(&is_cld_alive, worker_id); 791 } 792 793 // Applies ShenandoahIsNMethodAliveClosure to registered nmethods. 794 // The closure calls nmethod->is_unloading(). The is_unloading 795 // state is cached, therefore, during concurrent class unloading phase, 796 // we will not touch the metadata of unloading nmethods 797 { 798 ShenandoahWorkerTimingsTracker timer(_phase, ShenandoahPhaseTimings::CodeCacheRoots, worker_id); 799 ShenandoahIsNMethodAliveClosure is_nmethod_alive; 800 _nmethod_itr.nmethods_do(&is_nmethod_alive); 801 } 802 } 803 } 804 }; 805 806 void ShenandoahConcurrentGC::op_weak_roots() { 807 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 808 assert(heap->is_concurrent_weak_root_in_progress(), "Only during this phase"); 809 // Concurrent weak root processing 810 { 811 ShenandoahTimingsTracker t(ShenandoahPhaseTimings::conc_weak_roots_work); 812 ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_roots_work); 813 ShenandoahConcurrentWeakRootsEvacUpdateTask task(ShenandoahPhaseTimings::conc_weak_roots_work); 814 heap->workers()->run_task(&task); 815 } 816 817 // Perform handshake to flush out dead oops 818 { 819 ShenandoahTimingsTracker t(ShenandoahPhaseTimings::conc_weak_roots_rendezvous); 820 heap->rendezvous_threads(); 821 } 822 } 823 824 void ShenandoahConcurrentGC::op_class_unloading() { 825 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 826 assert (heap->is_concurrent_weak_root_in_progress() && 827 heap->unload_classes(), 828 "Checked by caller"); 829 heap->do_class_unloading(); 830 } 831 832 class ShenandoahEvacUpdateCodeCacheClosure : public NMethodClosure { 833 private: 834 BarrierSetNMethod* const _bs; 835 ShenandoahEvacuateUpdateMetadataClosure _cl; 836 837 public: 838 ShenandoahEvacUpdateCodeCacheClosure() : 839 _bs(BarrierSet::barrier_set()->barrier_set_nmethod()), 840 _cl() { 841 } 842 843 void do_nmethod(nmethod* n) { 844 ShenandoahNMethod* data = ShenandoahNMethod::gc_data(n); 845 ShenandoahReentrantLocker locker(data->lock()); 846 // Setup EvacOOM scope below reentrant lock to avoid deadlock with 847 // nmethod_entry_barrier 848 ShenandoahEvacOOMScope oom; 849 data->oops_do(&_cl, true/*fix relocation*/); 850 _bs->disarm(n); 851 } 852 }; 853 854 class ShenandoahConcurrentRootsEvacUpdateTask : public WorkerTask { 855 private: 856 ShenandoahPhaseTimings::Phase _phase; 857 ShenandoahVMRoots<true /*concurrent*/> _vm_roots; 858 ShenandoahClassLoaderDataRoots<true /*concurrent*/> 859 _cld_roots; 860 ShenandoahConcurrentNMethodIterator _nmethod_itr; 861 862 public: 863 ShenandoahConcurrentRootsEvacUpdateTask(ShenandoahPhaseTimings::Phase phase) : 864 WorkerTask("Shenandoah Evacuate/Update Concurrent Strong Roots"), 865 _phase(phase), 866 _vm_roots(phase), 867 _cld_roots(phase, ShenandoahHeap::heap()->workers()->active_workers(), false /*heap iteration*/), 868 _nmethod_itr(ShenandoahCodeRoots::table()) { 869 if (!ShenandoahHeap::heap()->unload_classes()) { 870 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 871 _nmethod_itr.nmethods_do_begin(); 872 } 873 } 874 875 ~ShenandoahConcurrentRootsEvacUpdateTask() { 876 if (!ShenandoahHeap::heap()->unload_classes()) { 877 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 878 _nmethod_itr.nmethods_do_end(); 879 } 880 } 881 882 void work(uint worker_id) { 883 ShenandoahConcurrentWorkerSession worker_session(worker_id); 884 { 885 ShenandoahEvacOOMScope oom; 886 { 887 // vm_roots and weak_roots are OopStorage backed roots, concurrent iteration 888 // may race against OopStorage::release() calls. 889 ShenandoahContextEvacuateUpdateRootsClosure cl; 890 _vm_roots.oops_do<ShenandoahContextEvacuateUpdateRootsClosure>(&cl, worker_id); 891 } 892 893 { 894 ShenandoahEvacuateUpdateMetadataClosure cl; 895 CLDToOopClosure clds(&cl, ClassLoaderData::_claim_strong); 896 _cld_roots.cld_do(&clds, worker_id); 897 } 898 } 899 900 // Cannot setup ShenandoahEvacOOMScope here, due to potential deadlock with nmethod_entry_barrier. 901 if (!ShenandoahHeap::heap()->unload_classes()) { 902 ShenandoahWorkerTimingsTracker timer(_phase, ShenandoahPhaseTimings::CodeCacheRoots, worker_id); 903 ShenandoahEvacUpdateCodeCacheClosure cl; 904 _nmethod_itr.nmethods_do(&cl); 905 } 906 } 907 }; 908 909 void ShenandoahConcurrentGC::op_strong_roots() { 910 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 911 assert(heap->is_concurrent_strong_root_in_progress(), "Checked by caller"); 912 ShenandoahConcurrentRootsEvacUpdateTask task(ShenandoahPhaseTimings::conc_strong_roots); 913 heap->workers()->run_task(&task); 914 heap->set_concurrent_strong_root_in_progress(false); 915 } 916 917 void ShenandoahConcurrentGC::op_cleanup_early() { 918 ShenandoahHeap::heap()->free_set()->recycle_trash(); 919 } 920 921 void ShenandoahConcurrentGC::op_evacuate() { 922 ShenandoahHeap::heap()->evacuate_collection_set(true /*concurrent*/); 923 } 924 925 void ShenandoahConcurrentGC::op_init_updaterefs() { 926 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 927 heap->set_evacuation_in_progress(false); 928 heap->set_concurrent_weak_root_in_progress(false); 929 heap->prepare_update_heap_references(true /*concurrent*/); 930 heap->set_update_refs_in_progress(true); 931 932 if (ShenandoahPacing) { 933 heap->pacer()->setup_for_updaterefs(); 934 } 935 } 936 937 void ShenandoahConcurrentGC::op_updaterefs() { 938 ShenandoahHeap::heap()->update_heap_references(true /*concurrent*/); 939 } 940 941 class ShenandoahUpdateThreadClosure : public HandshakeClosure { 942 private: 943 ShenandoahUpdateRefsClosure _cl; 944 public: 945 ShenandoahUpdateThreadClosure(); 946 void do_thread(Thread* thread); 947 }; 948 949 ShenandoahUpdateThreadClosure::ShenandoahUpdateThreadClosure() : 950 HandshakeClosure("Shenandoah Update Thread Roots") { 951 } 952 953 void ShenandoahUpdateThreadClosure::do_thread(Thread* thread) { 954 if (thread->is_Java_thread()) { 955 JavaThread* jt = JavaThread::cast(thread); 956 ResourceMark rm; 957 jt->oops_do(&_cl, nullptr); 958 } 959 } 960 961 void ShenandoahConcurrentGC::op_update_thread_roots() { 962 ShenandoahUpdateThreadClosure cl; 963 Handshake::execute(&cl); 964 } 965 966 void ShenandoahConcurrentGC::op_final_updaterefs() { 967 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 968 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint"); 969 assert(!heap->_update_refs_iterator.has_next(), "Should have finished update references"); 970 971 heap->finish_concurrent_roots(); 972 973 // Clear cancelled GC, if set. On cancellation path, the block before would handle 974 // everything. 975 if (heap->cancelled_gc()) { 976 heap->clear_cancelled_gc(); 977 } 978 979 // Has to be done before cset is clear 980 if (ShenandoahVerify) { 981 heap->verifier()->verify_roots_in_to_space(); 982 } 983 984 heap->update_heap_region_states(true /*concurrent*/); 985 986 heap->set_update_refs_in_progress(false); 987 heap->set_has_forwarded_objects(false); 988 989 if (ShenandoahVerify) { 990 heap->verifier()->verify_after_updaterefs(); 991 } 992 993 if (VerifyAfterGC) { 994 Universe::verify(); 995 } 996 997 heap->rebuild_free_set(true /*concurrent*/); 998 } 999 1000 void ShenandoahConcurrentGC::op_final_roots() { 1001 ShenandoahHeap::heap()->set_concurrent_weak_root_in_progress(false); 1002 } 1003 1004 void ShenandoahConcurrentGC::op_cleanup_complete() { 1005 ShenandoahHeap::heap()->free_set()->recycle_trash(); 1006 } 1007 1008 bool ShenandoahConcurrentGC::check_cancellation_and_abort(ShenandoahDegenPoint point) { 1009 if (ShenandoahHeap::heap()->cancelled_gc()) { 1010 _degen_point = point; 1011 return true; 1012 } 1013 return false; 1014 } 1015 1016 const char* ShenandoahConcurrentGC::init_mark_event_message() const { 1017 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 1018 assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here"); 1019 if (heap->unload_classes()) { 1020 return "Pause Init Mark (unload classes)"; 1021 } else { 1022 return "Pause Init Mark"; 1023 } 1024 } 1025 1026 const char* ShenandoahConcurrentGC::final_mark_event_message() const { 1027 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 1028 assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here"); 1029 if (heap->unload_classes()) { 1030 return "Pause Final Mark (unload classes)"; 1031 } else { 1032 return "Pause Final Mark"; 1033 } 1034 } 1035 1036 const char* ShenandoahConcurrentGC::conc_mark_event_message() const { 1037 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 1038 assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here"); 1039 if (heap->unload_classes()) { 1040 return "Concurrent marking (unload classes)"; 1041 } else { 1042 return "Concurrent marking"; 1043 } 1044 }