1 /* 2 * Copyright (c) 2021, 2022, Red Hat, Inc. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 27 #include "gc/shared/barrierSetNMethod.hpp" 28 #include "gc/shared/collectorCounters.hpp" 29 #include "gc/shared/continuationGCSupport.inline.hpp" 30 #include "gc/shenandoah/shenandoahBreakpoint.hpp" 31 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp" 32 #include "gc/shenandoah/shenandoahConcurrentGC.hpp" 33 #include "gc/shenandoah/shenandoahFreeSet.hpp" 34 #include "gc/shenandoah/shenandoahLock.hpp" 35 #include "gc/shenandoah/shenandoahMark.inline.hpp" 36 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp" 37 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp" 38 #include "gc/shenandoah/shenandoahPhaseTimings.hpp" 39 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp" 40 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp" 41 #include "gc/shenandoah/shenandoahStackWatermark.hpp" 42 #include "gc/shenandoah/shenandoahUtils.hpp" 43 #include "gc/shenandoah/shenandoahVerifier.hpp" 44 #include "gc/shenandoah/shenandoahVMOperations.hpp" 45 #include "gc/shenandoah/shenandoahWorkGroup.hpp" 46 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp" 47 #include "memory/allocation.hpp" 48 #include "prims/jvmtiTagMap.hpp" 49 #include "runtime/vmThread.hpp" 50 #include "utilities/events.hpp" 51 52 // Breakpoint support 53 class ShenandoahBreakpointGCScope : public StackObj { 54 private: 55 const GCCause::Cause _cause; 56 public: 57 ShenandoahBreakpointGCScope(GCCause::Cause cause) : _cause(cause) { 58 if (cause == GCCause::_wb_breakpoint) { 59 ShenandoahBreakpoint::start_gc(); 60 ShenandoahBreakpoint::at_before_gc(); 61 } 62 } 63 64 ~ShenandoahBreakpointGCScope() { 65 if (_cause == GCCause::_wb_breakpoint) { 66 ShenandoahBreakpoint::at_after_gc(); 67 } 68 } 69 }; 70 71 class ShenandoahBreakpointMarkScope : public StackObj { 72 private: 73 const GCCause::Cause _cause; 74 public: 75 ShenandoahBreakpointMarkScope(GCCause::Cause cause) : _cause(cause) { 76 if (_cause == GCCause::_wb_breakpoint) { 77 ShenandoahBreakpoint::at_after_marking_started(); 78 } 79 } 80 81 ~ShenandoahBreakpointMarkScope() { 82 if (_cause == GCCause::_wb_breakpoint) { 83 ShenandoahBreakpoint::at_before_marking_completed(); 84 } 85 } 86 }; 87 88 ShenandoahConcurrentGC::ShenandoahConcurrentGC() : 89 _mark(), 90 _degen_point(ShenandoahDegenPoint::_degenerated_unset) { 91 } 92 93 ShenandoahGC::ShenandoahDegenPoint ShenandoahConcurrentGC::degen_point() const { 94 return _degen_point; 95 } 96 97 void ShenandoahConcurrentGC::cancel() { 98 ShenandoahConcurrentMark::cancel(); 99 } 100 101 bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) { 102 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 103 ShenandoahBreakpointGCScope breakpoint_gc_scope(cause); 104 105 // Reset for upcoming marking 106 entry_reset(); 107 108 // Start initial mark under STW 109 vmop_entry_init_mark(); 110 111 { 112 ShenandoahBreakpointMarkScope breakpoint_mark_scope(cause); 113 // Concurrent mark roots 114 entry_mark_roots(); 115 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_outside_cycle)) return false; 116 117 // Continue concurrent mark 118 entry_mark(); 119 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_mark)) return false; 120 } 121 122 // Complete marking under STW, and start evacuation 123 vmop_entry_final_mark(); 124 125 // Concurrent stack processing 126 if (heap->is_evacuation_in_progress()) { 127 entry_thread_roots(); 128 } 129 130 // Process weak roots that might still point to regions that would be broken by cleanup 131 if (heap->is_concurrent_weak_root_in_progress()) { 132 entry_weak_refs(); 133 entry_weak_roots(); 134 } 135 136 // Final mark might have reclaimed some immediate garbage, kick cleanup to reclaim 137 // the space. This would be the last action if there is nothing to evacuate. 138 entry_cleanup_early(); 139 140 { 141 ShenandoahHeapLocker locker(heap->lock()); 142 heap->free_set()->log_status(); 143 } 144 145 // Perform concurrent class unloading 146 if (heap->unload_classes() && 147 heap->is_concurrent_weak_root_in_progress()) { 148 entry_class_unloading(); 149 } 150 151 // Processing strong roots 152 // This may be skipped if there is nothing to update/evacuate. 153 // If so, strong_root_in_progress would be unset. 154 if (heap->is_concurrent_strong_root_in_progress()) { 155 entry_strong_roots(); 156 } 157 158 // Continue the cycle with evacuation and optional update-refs. 159 // This may be skipped if there is nothing to evacuate. 160 // If so, evac_in_progress would be unset by collection set preparation code. 161 if (heap->is_evacuation_in_progress()) { 162 // Concurrently evacuate 163 entry_evacuate(); 164 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_evac)) return false; 165 166 // Perform update-refs phase. 167 vmop_entry_init_updaterefs(); 168 entry_updaterefs(); 169 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_updaterefs)) return false; 170 171 // Concurrent update thread roots 172 entry_update_thread_roots(); 173 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_updaterefs)) return false; 174 175 vmop_entry_final_updaterefs(); 176 177 // Update references freed up collection set, kick the cleanup to reclaim the space. 178 entry_cleanup_complete(); 179 } else { 180 vmop_entry_final_roots(); 181 } 182 183 return true; 184 } 185 186 void ShenandoahConcurrentGC::vmop_entry_init_mark() { 187 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 188 TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters()); 189 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_mark_gross); 190 191 heap->try_inject_alloc_failure(); 192 VM_ShenandoahInitMark op(this); 193 VMThread::execute(&op); // jump to entry_init_mark() under safepoint 194 } 195 196 void ShenandoahConcurrentGC::vmop_entry_final_mark() { 197 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 198 TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters()); 199 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_mark_gross); 200 201 heap->try_inject_alloc_failure(); 202 VM_ShenandoahFinalMarkStartEvac op(this); 203 VMThread::execute(&op); // jump to entry_final_mark under safepoint 204 } 205 206 void ShenandoahConcurrentGC::vmop_entry_init_updaterefs() { 207 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 208 TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters()); 209 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_update_refs_gross); 210 211 heap->try_inject_alloc_failure(); 212 VM_ShenandoahInitUpdateRefs op(this); 213 VMThread::execute(&op); 214 } 215 216 void ShenandoahConcurrentGC::vmop_entry_final_updaterefs() { 217 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 218 TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters()); 219 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_update_refs_gross); 220 221 heap->try_inject_alloc_failure(); 222 VM_ShenandoahFinalUpdateRefs op(this); 223 VMThread::execute(&op); 224 } 225 226 void ShenandoahConcurrentGC::vmop_entry_final_roots() { 227 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 228 TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters()); 229 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_roots_gross); 230 231 // This phase does not use workers, no need for setup 232 heap->try_inject_alloc_failure(); 233 VM_ShenandoahFinalRoots op(this); 234 VMThread::execute(&op); 235 } 236 237 void ShenandoahConcurrentGC::entry_init_mark() { 238 const char* msg = init_mark_event_message(); 239 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::init_mark); 240 EventMark em("%s", msg); 241 242 ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(), 243 ShenandoahWorkerPolicy::calc_workers_for_init_marking(), 244 "init marking"); 245 246 op_init_mark(); 247 } 248 249 void ShenandoahConcurrentGC::entry_final_mark() { 250 const char* msg = final_mark_event_message(); 251 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_mark); 252 EventMark em("%s", msg); 253 254 ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(), 255 ShenandoahWorkerPolicy::calc_workers_for_final_marking(), 256 "final marking"); 257 258 op_final_mark(); 259 } 260 261 void ShenandoahConcurrentGC::entry_init_updaterefs() { 262 static const char* msg = "Pause Init Update Refs"; 263 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::init_update_refs); 264 EventMark em("%s", msg); 265 266 // No workers used in this phase, no setup required 267 op_init_updaterefs(); 268 } 269 270 void ShenandoahConcurrentGC::entry_final_updaterefs() { 271 static const char* msg = "Pause Final Update Refs"; 272 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_update_refs); 273 EventMark em("%s", msg); 274 275 ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(), 276 ShenandoahWorkerPolicy::calc_workers_for_final_update_ref(), 277 "final reference update"); 278 279 op_final_updaterefs(); 280 } 281 282 void ShenandoahConcurrentGC::entry_final_roots() { 283 static const char* msg = "Pause Final Roots"; 284 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_roots); 285 EventMark em("%s", msg); 286 287 op_final_roots(); 288 } 289 290 void ShenandoahConcurrentGC::entry_reset() { 291 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 292 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 293 static const char* msg = "Concurrent reset"; 294 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_reset); 295 EventMark em("%s", msg); 296 297 ShenandoahWorkerScope scope(heap->workers(), 298 ShenandoahWorkerPolicy::calc_workers_for_conc_reset(), 299 "concurrent reset"); 300 301 heap->try_inject_alloc_failure(); 302 op_reset(); 303 } 304 305 void ShenandoahConcurrentGC::entry_mark_roots() { 306 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 307 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 308 const char* msg = "Concurrent marking roots"; 309 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_mark_roots); 310 EventMark em("%s", msg); 311 312 ShenandoahWorkerScope scope(heap->workers(), 313 ShenandoahWorkerPolicy::calc_workers_for_conc_marking(), 314 "concurrent marking roots"); 315 316 heap->try_inject_alloc_failure(); 317 op_mark_roots(); 318 } 319 320 void ShenandoahConcurrentGC::entry_mark() { 321 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 322 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 323 const char* msg = conc_mark_event_message(); 324 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_mark); 325 EventMark em("%s", msg); 326 327 ShenandoahWorkerScope scope(heap->workers(), 328 ShenandoahWorkerPolicy::calc_workers_for_conc_marking(), 329 "concurrent marking"); 330 331 heap->try_inject_alloc_failure(); 332 op_mark(); 333 } 334 335 void ShenandoahConcurrentGC::entry_thread_roots() { 336 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 337 static const char* msg = "Concurrent thread roots"; 338 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_thread_roots); 339 EventMark em("%s", msg); 340 341 ShenandoahWorkerScope scope(heap->workers(), 342 ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(), 343 msg); 344 345 heap->try_inject_alloc_failure(); 346 op_thread_roots(); 347 } 348 349 void ShenandoahConcurrentGC::entry_weak_refs() { 350 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 351 static const char* msg = "Concurrent weak references"; 352 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_weak_refs); 353 EventMark em("%s", msg); 354 355 ShenandoahWorkerScope scope(heap->workers(), 356 ShenandoahWorkerPolicy::calc_workers_for_conc_refs_processing(), 357 "concurrent weak references"); 358 359 heap->try_inject_alloc_failure(); 360 op_weak_refs(); 361 } 362 363 void ShenandoahConcurrentGC::entry_weak_roots() { 364 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 365 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 366 static const char* msg = "Concurrent weak roots"; 367 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_weak_roots); 368 EventMark em("%s", msg); 369 370 ShenandoahWorkerScope scope(heap->workers(), 371 ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(), 372 "concurrent weak root"); 373 374 heap->try_inject_alloc_failure(); 375 op_weak_roots(); 376 } 377 378 void ShenandoahConcurrentGC::entry_class_unloading() { 379 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 380 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 381 static const char* msg = "Concurrent class unloading"; 382 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_class_unload); 383 EventMark em("%s", msg); 384 385 ShenandoahWorkerScope scope(heap->workers(), 386 ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(), 387 "concurrent class unloading"); 388 389 heap->try_inject_alloc_failure(); 390 op_class_unloading(); 391 } 392 393 void ShenandoahConcurrentGC::entry_strong_roots() { 394 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 395 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 396 static const char* msg = "Concurrent strong roots"; 397 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_strong_roots); 398 EventMark em("%s", msg); 399 400 ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_strong_roots); 401 402 ShenandoahWorkerScope scope(heap->workers(), 403 ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(), 404 "concurrent strong root"); 405 406 heap->try_inject_alloc_failure(); 407 op_strong_roots(); 408 } 409 410 void ShenandoahConcurrentGC::entry_cleanup_early() { 411 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 412 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 413 static const char* msg = "Concurrent cleanup"; 414 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_cleanup_early, true /* log_heap_usage */); 415 EventMark em("%s", msg); 416 417 // This phase does not use workers, no need for setup 418 heap->try_inject_alloc_failure(); 419 op_cleanup_early(); 420 } 421 422 void ShenandoahConcurrentGC::entry_evacuate() { 423 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 424 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 425 426 static const char* msg = "Concurrent evacuation"; 427 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_evac); 428 EventMark em("%s", msg); 429 430 ShenandoahWorkerScope scope(heap->workers(), 431 ShenandoahWorkerPolicy::calc_workers_for_conc_evac(), 432 "concurrent evacuation"); 433 434 heap->try_inject_alloc_failure(); 435 op_evacuate(); 436 } 437 438 void ShenandoahConcurrentGC::entry_update_thread_roots() { 439 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 440 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 441 442 static const char* msg = "Concurrent update thread roots"; 443 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_update_thread_roots); 444 EventMark em("%s", msg); 445 446 // No workers used in this phase, no setup required 447 heap->try_inject_alloc_failure(); 448 op_update_thread_roots(); 449 } 450 451 void ShenandoahConcurrentGC::entry_updaterefs() { 452 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 453 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 454 static const char* msg = "Concurrent update references"; 455 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_update_refs); 456 EventMark em("%s", msg); 457 458 ShenandoahWorkerScope scope(heap->workers(), 459 ShenandoahWorkerPolicy::calc_workers_for_conc_update_ref(), 460 "concurrent reference update"); 461 462 heap->try_inject_alloc_failure(); 463 op_updaterefs(); 464 } 465 466 void ShenandoahConcurrentGC::entry_cleanup_complete() { 467 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 468 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 469 static const char* msg = "Concurrent cleanup"; 470 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_cleanup_complete, true /* log_heap_usage */); 471 EventMark em("%s", msg); 472 473 // This phase does not use workers, no need for setup 474 heap->try_inject_alloc_failure(); 475 op_cleanup_complete(); 476 } 477 478 void ShenandoahConcurrentGC::op_reset() { 479 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 480 if (ShenandoahPacing) { 481 heap->pacer()->setup_for_reset(); 482 } 483 484 heap->prepare_gc(); 485 } 486 487 class ShenandoahInitMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure { 488 private: 489 ShenandoahMarkingContext* const _ctx; 490 public: 491 ShenandoahInitMarkUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {} 492 493 void heap_region_do(ShenandoahHeapRegion* r) { 494 assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index()); 495 if (r->is_active()) { 496 // Check if region needs updating its TAMS. We have updated it already during concurrent 497 // reset, so it is very likely we don't need to do another write here. 498 if (_ctx->top_at_mark_start(r) != r->top()) { 499 _ctx->capture_top_at_mark_start(r); 500 } 501 } else { 502 assert(_ctx->top_at_mark_start(r) == r->top(), 503 "Region " SIZE_FORMAT " should already have correct TAMS", r->index()); 504 } 505 } 506 507 bool is_thread_safe() { return true; } 508 }; 509 510 void ShenandoahConcurrentGC::start_mark() { 511 _mark.start_mark(); 512 } 513 514 void ShenandoahConcurrentGC::op_init_mark() { 515 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 516 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint"); 517 assert(Thread::current()->is_VM_thread(), "can only do this in VMThread"); 518 519 assert(heap->marking_context()->is_bitmap_clear(), "need clear marking bitmap"); 520 assert(!heap->marking_context()->is_complete(), "should not be complete"); 521 assert(!heap->has_forwarded_objects(), "No forwarded objects on this path"); 522 523 if (ShenandoahVerify) { 524 heap->verifier()->verify_before_concmark(); 525 } 526 527 if (VerifyBeforeGC) { 528 Universe::verify(); 529 } 530 531 heap->set_concurrent_mark_in_progress(true); 532 533 start_mark(); 534 535 { 536 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states); 537 ShenandoahInitMarkUpdateRegionStateClosure cl; 538 heap->parallel_heap_region_iterate(&cl); 539 } 540 541 // Weak reference processing 542 ShenandoahReferenceProcessor* rp = heap->ref_processor(); 543 rp->reset_thread_locals(); 544 rp->set_soft_reference_policy(heap->soft_ref_policy()->should_clear_all_soft_refs()); 545 546 // Make above changes visible to worker threads 547 OrderAccess::fence(); 548 549 // Arm nmethods for concurrent mark 550 ShenandoahCodeRoots::arm_nmethods_for_mark(); 551 552 ShenandoahStackWatermark::change_epoch_id(); 553 if (ShenandoahPacing) { 554 heap->pacer()->setup_for_mark(); 555 } 556 } 557 558 void ShenandoahConcurrentGC::op_mark_roots() { 559 _mark.mark_concurrent_roots(); 560 } 561 562 void ShenandoahConcurrentGC::op_mark() { 563 _mark.concurrent_mark(); 564 } 565 566 void ShenandoahConcurrentGC::op_final_mark() { 567 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 568 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint"); 569 assert(!heap->has_forwarded_objects(), "No forwarded objects on this path"); 570 571 if (ShenandoahVerify) { 572 heap->verifier()->verify_roots_no_forwarded(); 573 } 574 575 if (!heap->cancelled_gc()) { 576 _mark.finish_mark(); 577 assert(!heap->cancelled_gc(), "STW mark cannot OOM"); 578 579 // Notify JVMTI that the tagmap table will need cleaning. 580 JvmtiTagMap::set_needs_cleaning(); 581 582 heap->prepare_regions_and_collection_set(true /*concurrent*/); 583 584 // Has to be done after cset selection 585 heap->prepare_concurrent_roots(); 586 587 if (!heap->collection_set()->is_empty()) { 588 if (ShenandoahVerify) { 589 heap->verifier()->verify_before_evacuation(); 590 } 591 592 heap->set_evacuation_in_progress(true); 593 // From here on, we need to update references. 594 heap->set_has_forwarded_objects(true); 595 596 // Verify before arming for concurrent processing. 597 // Otherwise, verification can trigger stack processing. 598 if (ShenandoahVerify) { 599 heap->verifier()->verify_during_evacuation(); 600 } 601 602 // Arm nmethods/stack for concurrent processing 603 ShenandoahCodeRoots::arm_nmethods_for_evac(); 604 ShenandoahStackWatermark::change_epoch_id(); 605 606 if (ShenandoahPacing) { 607 heap->pacer()->setup_for_evac(); 608 } 609 } else { 610 if (ShenandoahVerify) { 611 heap->verifier()->verify_after_concmark(); 612 } 613 614 if (VerifyAfterGC) { 615 Universe::verify(); 616 } 617 } 618 } 619 } 620 621 class ShenandoahConcurrentEvacThreadClosure : public ThreadClosure { 622 private: 623 OopClosure* const _oops; 624 625 public: 626 ShenandoahConcurrentEvacThreadClosure(OopClosure* oops); 627 void do_thread(Thread* thread); 628 }; 629 630 ShenandoahConcurrentEvacThreadClosure::ShenandoahConcurrentEvacThreadClosure(OopClosure* oops) : 631 _oops(oops) { 632 } 633 634 void ShenandoahConcurrentEvacThreadClosure::do_thread(Thread* thread) { 635 JavaThread* const jt = JavaThread::cast(thread); 636 StackWatermarkSet::finish_processing(jt, _oops, StackWatermarkKind::gc); 637 } 638 639 class ShenandoahConcurrentEvacUpdateThreadTask : public WorkerTask { 640 private: 641 ShenandoahJavaThreadsIterator _java_threads; 642 643 public: 644 ShenandoahConcurrentEvacUpdateThreadTask(uint n_workers) : 645 WorkerTask("Shenandoah Evacuate/Update Concurrent Thread Roots"), 646 _java_threads(ShenandoahPhaseTimings::conc_thread_roots, n_workers) { 647 } 648 649 void work(uint worker_id) { 650 // ShenandoahEvacOOMScope has to be setup by ShenandoahContextEvacuateUpdateRootsClosure. 651 // Otherwise, may deadlock with watermark lock 652 ShenandoahContextEvacuateUpdateRootsClosure oops_cl; 653 ShenandoahConcurrentEvacThreadClosure thr_cl(&oops_cl); 654 _java_threads.threads_do(&thr_cl, worker_id); 655 } 656 }; 657 658 void ShenandoahConcurrentGC::op_thread_roots() { 659 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 660 assert(heap->is_evacuation_in_progress(), "Checked by caller"); 661 ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_thread_roots); 662 ShenandoahConcurrentEvacUpdateThreadTask task(heap->workers()->active_workers()); 663 heap->workers()->run_task(&task); 664 } 665 666 void ShenandoahConcurrentGC::op_weak_refs() { 667 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 668 assert(heap->is_concurrent_weak_root_in_progress(), "Only during this phase"); 669 // Concurrent weak refs processing 670 ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_refs); 671 if (heap->gc_cause() == GCCause::_wb_breakpoint) { 672 ShenandoahBreakpoint::at_after_reference_processing_started(); 673 } 674 heap->ref_processor()->process_references(ShenandoahPhaseTimings::conc_weak_refs, heap->workers(), true /* concurrent */); 675 } 676 677 class ShenandoahEvacUpdateCleanupOopStorageRootsClosure : public BasicOopIterateClosure { 678 private: 679 ShenandoahHeap* const _heap; 680 ShenandoahMarkingContext* const _mark_context; 681 bool _evac_in_progress; 682 Thread* const _thread; 683 684 public: 685 ShenandoahEvacUpdateCleanupOopStorageRootsClosure(); 686 void do_oop(oop* p); 687 void do_oop(narrowOop* p); 688 }; 689 690 ShenandoahEvacUpdateCleanupOopStorageRootsClosure::ShenandoahEvacUpdateCleanupOopStorageRootsClosure() : 691 _heap(ShenandoahHeap::heap()), 692 _mark_context(ShenandoahHeap::heap()->marking_context()), 693 _evac_in_progress(ShenandoahHeap::heap()->is_evacuation_in_progress()), 694 _thread(Thread::current()) { 695 } 696 697 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(oop* p) { 698 const oop obj = RawAccess<>::oop_load(p); 699 if (!CompressedOops::is_null(obj)) { 700 if (!_mark_context->is_marked(obj)) { 701 shenandoah_assert_correct(p, obj); 702 ShenandoahHeap::atomic_clear_oop(p, obj); 703 } else if (_evac_in_progress && _heap->in_collection_set(obj)) { 704 oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj); 705 if (resolved == obj) { 706 resolved = _heap->evacuate_object(obj, _thread); 707 } 708 ShenandoahHeap::atomic_update_oop(resolved, p, obj); 709 assert(_heap->cancelled_gc() || 710 _mark_context->is_marked(resolved) && !_heap->in_collection_set(resolved), 711 "Sanity"); 712 } 713 } 714 } 715 716 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(narrowOop* p) { 717 ShouldNotReachHere(); 718 } 719 720 class ShenandoahIsCLDAliveClosure : public CLDClosure { 721 public: 722 void do_cld(ClassLoaderData* cld) { 723 cld->is_alive(); 724 } 725 }; 726 727 class ShenandoahIsNMethodAliveClosure: public NMethodClosure { 728 public: 729 void do_nmethod(nmethod* n) { 730 n->is_unloading(); 731 } 732 }; 733 734 // This task not only evacuates/updates marked weak roots, but also "null" 735 // dead weak roots. 736 class ShenandoahConcurrentWeakRootsEvacUpdateTask : public WorkerTask { 737 private: 738 ShenandoahVMWeakRoots<true /*concurrent*/> _vm_roots; 739 740 // Roots related to concurrent class unloading 741 ShenandoahClassLoaderDataRoots<true /* concurrent */> 742 _cld_roots; 743 ShenandoahConcurrentNMethodIterator _nmethod_itr; 744 ShenandoahPhaseTimings::Phase _phase; 745 746 public: 747 ShenandoahConcurrentWeakRootsEvacUpdateTask(ShenandoahPhaseTimings::Phase phase) : 748 WorkerTask("Shenandoah Evacuate/Update Concurrent Weak Roots"), 749 _vm_roots(phase), 750 _cld_roots(phase, ShenandoahHeap::heap()->workers()->active_workers(), false /*heap iteration*/), 751 _nmethod_itr(ShenandoahCodeRoots::table()), 752 _phase(phase) { 753 if (ShenandoahHeap::heap()->unload_classes()) { 754 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 755 _nmethod_itr.nmethods_do_begin(); 756 } 757 } 758 759 ~ShenandoahConcurrentWeakRootsEvacUpdateTask() { 760 if (ShenandoahHeap::heap()->unload_classes()) { 761 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 762 _nmethod_itr.nmethods_do_end(); 763 } 764 // Notify runtime data structures of potentially dead oops 765 _vm_roots.report_num_dead(); 766 } 767 768 void work(uint worker_id) { 769 ShenandoahConcurrentWorkerSession worker_session(worker_id); 770 ShenandoahSuspendibleThreadSetJoiner sts_join; 771 { 772 ShenandoahEvacOOMScope oom; 773 // jni_roots and weak_roots are OopStorage backed roots, concurrent iteration 774 // may race against OopStorage::release() calls. 775 ShenandoahEvacUpdateCleanupOopStorageRootsClosure cl; 776 _vm_roots.oops_do(&cl, worker_id); 777 } 778 779 // If we are going to perform concurrent class unloading later on, we need to 780 // cleanup the weak oops in CLD and determinate nmethod's unloading state, so that we 781 // can cleanup immediate garbage sooner. 782 if (ShenandoahHeap::heap()->unload_classes()) { 783 // Applies ShenandoahIsCLDAlive closure to CLDs, native barrier will either null the 784 // CLD's holder or evacuate it. 785 { 786 ShenandoahIsCLDAliveClosure is_cld_alive; 787 _cld_roots.cld_do(&is_cld_alive, worker_id); 788 } 789 790 // Applies ShenandoahIsNMethodAliveClosure to registered nmethods. 791 // The closure calls nmethod->is_unloading(). The is_unloading 792 // state is cached, therefore, during concurrent class unloading phase, 793 // we will not touch the metadata of unloading nmethods 794 { 795 ShenandoahWorkerTimingsTracker timer(_phase, ShenandoahPhaseTimings::CodeCacheRoots, worker_id); 796 ShenandoahIsNMethodAliveClosure is_nmethod_alive; 797 _nmethod_itr.nmethods_do(&is_nmethod_alive); 798 } 799 } 800 } 801 }; 802 803 void ShenandoahConcurrentGC::op_weak_roots() { 804 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 805 assert(heap->is_concurrent_weak_root_in_progress(), "Only during this phase"); 806 // Concurrent weak root processing 807 { 808 ShenandoahTimingsTracker t(ShenandoahPhaseTimings::conc_weak_roots_work); 809 ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_roots_work); 810 ShenandoahConcurrentWeakRootsEvacUpdateTask task(ShenandoahPhaseTimings::conc_weak_roots_work); 811 heap->workers()->run_task(&task); 812 } 813 814 // Perform handshake to flush out dead oops 815 { 816 ShenandoahTimingsTracker t(ShenandoahPhaseTimings::conc_weak_roots_rendezvous); 817 heap->rendezvous_threads(); 818 } 819 } 820 821 void ShenandoahConcurrentGC::op_class_unloading() { 822 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 823 assert (heap->is_concurrent_weak_root_in_progress() && 824 heap->unload_classes(), 825 "Checked by caller"); 826 heap->do_class_unloading(); 827 } 828 829 class ShenandoahEvacUpdateCodeCacheClosure : public NMethodClosure { 830 private: 831 BarrierSetNMethod* const _bs; 832 ShenandoahEvacuateUpdateMetadataClosure _cl; 833 834 public: 835 ShenandoahEvacUpdateCodeCacheClosure() : 836 _bs(BarrierSet::barrier_set()->barrier_set_nmethod()), 837 _cl() { 838 } 839 840 void do_nmethod(nmethod* n) { 841 ShenandoahNMethod* data = ShenandoahNMethod::gc_data(n); 842 ShenandoahReentrantLocker locker(data->lock()); 843 // Setup EvacOOM scope below reentrant lock to avoid deadlock with 844 // nmethod_entry_barrier 845 ShenandoahEvacOOMScope oom; 846 data->oops_do(&_cl, true/*fix relocation*/); 847 _bs->disarm(n); 848 } 849 }; 850 851 class ShenandoahConcurrentRootsEvacUpdateTask : public WorkerTask { 852 private: 853 ShenandoahPhaseTimings::Phase _phase; 854 ShenandoahVMRoots<true /*concurrent*/> _vm_roots; 855 ShenandoahClassLoaderDataRoots<true /*concurrent*/> 856 _cld_roots; 857 ShenandoahConcurrentNMethodIterator _nmethod_itr; 858 859 public: 860 ShenandoahConcurrentRootsEvacUpdateTask(ShenandoahPhaseTimings::Phase phase) : 861 WorkerTask("Shenandoah Evacuate/Update Concurrent Strong Roots"), 862 _phase(phase), 863 _vm_roots(phase), 864 _cld_roots(phase, ShenandoahHeap::heap()->workers()->active_workers(), false /*heap iteration*/), 865 _nmethod_itr(ShenandoahCodeRoots::table()) { 866 if (!ShenandoahHeap::heap()->unload_classes()) { 867 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 868 _nmethod_itr.nmethods_do_begin(); 869 } 870 } 871 872 ~ShenandoahConcurrentRootsEvacUpdateTask() { 873 if (!ShenandoahHeap::heap()->unload_classes()) { 874 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 875 _nmethod_itr.nmethods_do_end(); 876 } 877 } 878 879 void work(uint worker_id) { 880 ShenandoahConcurrentWorkerSession worker_session(worker_id); 881 { 882 ShenandoahEvacOOMScope oom; 883 { 884 // vm_roots and weak_roots are OopStorage backed roots, concurrent iteration 885 // may race against OopStorage::release() calls. 886 ShenandoahContextEvacuateUpdateRootsClosure cl; 887 _vm_roots.oops_do<ShenandoahContextEvacuateUpdateRootsClosure>(&cl, worker_id); 888 } 889 890 { 891 ShenandoahEvacuateUpdateMetadataClosure cl; 892 CLDToOopClosure clds(&cl, ClassLoaderData::_claim_strong); 893 _cld_roots.cld_do(&clds, worker_id); 894 } 895 } 896 897 // Cannot setup ShenandoahEvacOOMScope here, due to potential deadlock with nmethod_entry_barrier. 898 if (!ShenandoahHeap::heap()->unload_classes()) { 899 ShenandoahWorkerTimingsTracker timer(_phase, ShenandoahPhaseTimings::CodeCacheRoots, worker_id); 900 ShenandoahEvacUpdateCodeCacheClosure cl; 901 _nmethod_itr.nmethods_do(&cl); 902 } 903 } 904 }; 905 906 void ShenandoahConcurrentGC::op_strong_roots() { 907 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 908 assert(heap->is_concurrent_strong_root_in_progress(), "Checked by caller"); 909 ShenandoahConcurrentRootsEvacUpdateTask task(ShenandoahPhaseTimings::conc_strong_roots); 910 heap->workers()->run_task(&task); 911 heap->set_concurrent_strong_root_in_progress(false); 912 } 913 914 void ShenandoahConcurrentGC::op_cleanup_early() { 915 ShenandoahHeap::heap()->free_set()->recycle_trash(); 916 } 917 918 void ShenandoahConcurrentGC::op_evacuate() { 919 ShenandoahHeap::heap()->evacuate_collection_set(true /*concurrent*/); 920 } 921 922 void ShenandoahConcurrentGC::op_init_updaterefs() { 923 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 924 heap->set_evacuation_in_progress(false); 925 heap->set_concurrent_weak_root_in_progress(false); 926 heap->prepare_update_heap_references(true /*concurrent*/); 927 heap->set_update_refs_in_progress(true); 928 929 if (ShenandoahPacing) { 930 heap->pacer()->setup_for_updaterefs(); 931 } 932 } 933 934 void ShenandoahConcurrentGC::op_updaterefs() { 935 ShenandoahHeap::heap()->update_heap_references(true /*concurrent*/); 936 } 937 938 class ShenandoahUpdateThreadClosure : public HandshakeClosure { 939 private: 940 ShenandoahUpdateRefsClosure _cl; 941 public: 942 ShenandoahUpdateThreadClosure(); 943 void do_thread(Thread* thread); 944 }; 945 946 ShenandoahUpdateThreadClosure::ShenandoahUpdateThreadClosure() : 947 HandshakeClosure("Shenandoah Update Thread Roots") { 948 } 949 950 void ShenandoahUpdateThreadClosure::do_thread(Thread* thread) { 951 if (thread->is_Java_thread()) { 952 JavaThread* jt = JavaThread::cast(thread); 953 ResourceMark rm; 954 jt->oops_do(&_cl, nullptr); 955 } 956 } 957 958 void ShenandoahConcurrentGC::op_update_thread_roots() { 959 ShenandoahUpdateThreadClosure cl; 960 Handshake::execute(&cl); 961 } 962 963 void ShenandoahConcurrentGC::op_final_updaterefs() { 964 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 965 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint"); 966 assert(!heap->_update_refs_iterator.has_next(), "Should have finished update references"); 967 968 heap->finish_concurrent_roots(); 969 970 // Clear cancelled GC, if set. On cancellation path, the block before would handle 971 // everything. 972 if (heap->cancelled_gc()) { 973 heap->clear_cancelled_gc(); 974 } 975 976 // Has to be done before cset is clear 977 if (ShenandoahVerify) { 978 heap->verifier()->verify_roots_in_to_space(); 979 } 980 981 heap->update_heap_region_states(true /*concurrent*/); 982 983 heap->set_update_refs_in_progress(false); 984 heap->set_has_forwarded_objects(false); 985 986 if (ShenandoahVerify) { 987 heap->verifier()->verify_after_updaterefs(); 988 } 989 990 if (VerifyAfterGC) { 991 Universe::verify(); 992 } 993 994 heap->rebuild_free_set(true /*concurrent*/); 995 } 996 997 void ShenandoahConcurrentGC::op_final_roots() { 998 ShenandoahHeap::heap()->set_concurrent_weak_root_in_progress(false); 999 } 1000 1001 void ShenandoahConcurrentGC::op_cleanup_complete() { 1002 ShenandoahHeap::heap()->free_set()->recycle_trash(); 1003 } 1004 1005 bool ShenandoahConcurrentGC::check_cancellation_and_abort(ShenandoahDegenPoint point) { 1006 if (ShenandoahHeap::heap()->cancelled_gc()) { 1007 _degen_point = point; 1008 return true; 1009 } 1010 return false; 1011 } 1012 1013 const char* ShenandoahConcurrentGC::init_mark_event_message() const { 1014 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 1015 assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here"); 1016 if (heap->unload_classes()) { 1017 return "Pause Init Mark (unload classes)"; 1018 } else { 1019 return "Pause Init Mark"; 1020 } 1021 } 1022 1023 const char* ShenandoahConcurrentGC::final_mark_event_message() const { 1024 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 1025 assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here"); 1026 if (heap->unload_classes()) { 1027 return "Pause Final Mark (unload classes)"; 1028 } else { 1029 return "Pause Final Mark"; 1030 } 1031 } 1032 1033 const char* ShenandoahConcurrentGC::conc_mark_event_message() const { 1034 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 1035 assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here"); 1036 if (heap->unload_classes()) { 1037 return "Concurrent marking (unload classes)"; 1038 } else { 1039 return "Concurrent marking"; 1040 } 1041 }