1 /* 2 * Copyright (c) 2021, 2022, Red Hat, Inc. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 27 #include "gc/shared/barrierSetNMethod.hpp" 28 #include "gc/shared/collectorCounters.hpp" 29 #include "gc/shared/continuationGCSupport.inline.hpp" 30 #include "gc/shenandoah/shenandoahBreakpoint.hpp" 31 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp" 32 #include "gc/shenandoah/shenandoahConcurrentGC.hpp" 33 #include "gc/shenandoah/shenandoahFreeSet.hpp" 34 #include "gc/shenandoah/shenandoahLock.hpp" 35 #include "gc/shenandoah/shenandoahMark.inline.hpp" 36 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp" 37 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp" 38 #include "gc/shenandoah/shenandoahPhaseTimings.hpp" 39 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp" 40 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp" 41 #include "gc/shenandoah/shenandoahStackWatermark.hpp" 42 #include "gc/shenandoah/shenandoahUtils.hpp" 43 #include "gc/shenandoah/shenandoahVerifier.hpp" 44 #include "gc/shenandoah/shenandoahVMOperations.hpp" 45 #include "gc/shenandoah/shenandoahWorkGroup.hpp" 46 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp" 47 #include "memory/allocation.hpp" 48 #include "prims/jvmtiTagMap.hpp" 49 #include "runtime/vmThread.hpp" 50 #include "utilities/events.hpp" 51 52 // Breakpoint support 53 class ShenandoahBreakpointGCScope : public StackObj { 54 private: 55 const GCCause::Cause _cause; 56 public: 57 ShenandoahBreakpointGCScope(GCCause::Cause cause) : _cause(cause) { 58 if (cause == GCCause::_wb_breakpoint) { 59 ShenandoahBreakpoint::start_gc(); 60 ShenandoahBreakpoint::at_before_gc(); 61 } 62 } 63 64 ~ShenandoahBreakpointGCScope() { 65 if (_cause == GCCause::_wb_breakpoint) { 66 ShenandoahBreakpoint::at_after_gc(); 67 } 68 } 69 }; 70 71 class ShenandoahBreakpointMarkScope : public StackObj { 72 private: 73 const GCCause::Cause _cause; 74 public: 75 ShenandoahBreakpointMarkScope(GCCause::Cause cause) : _cause(cause) { 76 if (_cause == GCCause::_wb_breakpoint) { 77 ShenandoahBreakpoint::at_after_marking_started(); 78 } 79 } 80 81 ~ShenandoahBreakpointMarkScope() { 82 if (_cause == GCCause::_wb_breakpoint) { 83 ShenandoahBreakpoint::at_before_marking_completed(); 84 } 85 } 86 }; 87 88 ShenandoahConcurrentGC::ShenandoahConcurrentGC() : 89 _mark(), 90 _degen_point(ShenandoahDegenPoint::_degenerated_unset) { 91 } 92 93 ShenandoahGC::ShenandoahDegenPoint ShenandoahConcurrentGC::degen_point() const { 94 return _degen_point; 95 } 96 97 void ShenandoahConcurrentGC::cancel() { 98 ShenandoahConcurrentMark::cancel(); 99 } 100 101 bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) { 102 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 103 ShenandoahBreakpointGCScope breakpoint_gc_scope(cause); 104 105 // Reset for upcoming marking 106 entry_reset(); 107 108 // Start initial mark under STW 109 vmop_entry_init_mark(); 110 111 { 112 ShenandoahBreakpointMarkScope breakpoint_mark_scope(cause); 113 // Concurrent mark roots 114 entry_mark_roots(); 115 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_outside_cycle)) return false; 116 117 // Continue concurrent mark 118 entry_mark(); 119 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_mark)) return false; 120 } 121 122 // Complete marking under STW, and start evacuation 123 vmop_entry_final_mark(); 124 125 // Concurrent stack processing 126 if (heap->is_evacuation_in_progress()) { 127 entry_thread_roots(); 128 } 129 130 // Process weak roots that might still point to regions that would be broken by cleanup 131 if (heap->is_concurrent_weak_root_in_progress()) { 132 entry_weak_refs(); 133 entry_weak_roots(); 134 } 135 136 // Final mark might have reclaimed some immediate garbage, kick cleanup to reclaim 137 // the space. This would be the last action if there is nothing to evacuate. 138 entry_cleanup_early(); 139 140 { 141 ShenandoahHeapLocker locker(heap->lock()); 142 heap->free_set()->log_status(); 143 } 144 145 // Perform concurrent class unloading 146 if (heap->unload_classes() && 147 heap->is_concurrent_weak_root_in_progress()) { 148 entry_class_unloading(); 149 } 150 151 // Processing strong roots 152 // This may be skipped if there is nothing to update/evacuate. 153 // If so, strong_root_in_progress would be unset. 154 if (heap->is_concurrent_strong_root_in_progress()) { 155 entry_strong_roots(); 156 } 157 158 // Continue the cycle with evacuation and optional update-refs. 159 // This may be skipped if there is nothing to evacuate. 160 // If so, evac_in_progress would be unset by collection set preparation code. 161 if (heap->is_evacuation_in_progress()) { 162 // Concurrently evacuate 163 entry_evacuate(); 164 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_evac)) return false; 165 166 // Perform update-refs phase. 167 vmop_entry_init_updaterefs(); 168 entry_updaterefs(); 169 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_updaterefs)) return false; 170 171 // Concurrent update thread roots 172 entry_update_thread_roots(); 173 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_updaterefs)) return false; 174 175 vmop_entry_final_updaterefs(); 176 177 // Update references freed up collection set, kick the cleanup to reclaim the space. 178 entry_cleanup_complete(); 179 } else { 180 vmop_entry_final_roots(); 181 } 182 183 return true; 184 } 185 186 void ShenandoahConcurrentGC::vmop_entry_init_mark() { 187 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 188 TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters()); 189 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_mark_gross); 190 191 heap->try_inject_alloc_failure(); 192 VM_ShenandoahInitMark op(this); 193 VMThread::execute(&op); // jump to entry_init_mark() under safepoint 194 } 195 196 void ShenandoahConcurrentGC::vmop_entry_final_mark() { 197 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 198 TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters()); 199 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_mark_gross); 200 201 heap->try_inject_alloc_failure(); 202 VM_ShenandoahFinalMarkStartEvac op(this); 203 VMThread::execute(&op); // jump to entry_final_mark under safepoint 204 } 205 206 void ShenandoahConcurrentGC::vmop_entry_init_updaterefs() { 207 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 208 TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters()); 209 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_update_refs_gross); 210 211 heap->try_inject_alloc_failure(); 212 VM_ShenandoahInitUpdateRefs op(this); 213 VMThread::execute(&op); 214 } 215 216 void ShenandoahConcurrentGC::vmop_entry_final_updaterefs() { 217 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 218 TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters()); 219 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_update_refs_gross); 220 221 heap->try_inject_alloc_failure(); 222 VM_ShenandoahFinalUpdateRefs op(this); 223 VMThread::execute(&op); 224 } 225 226 void ShenandoahConcurrentGC::vmop_entry_final_roots() { 227 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 228 TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters()); 229 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_roots_gross); 230 231 // This phase does not use workers, no need for setup 232 heap->try_inject_alloc_failure(); 233 VM_ShenandoahFinalRoots op(this); 234 VMThread::execute(&op); 235 } 236 237 void ShenandoahConcurrentGC::entry_init_mark() { 238 const char* msg = init_mark_event_message(); 239 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::init_mark); 240 EventMark em("%s", msg); 241 242 ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(), 243 ShenandoahWorkerPolicy::calc_workers_for_init_marking(), 244 "init marking"); 245 246 op_init_mark(); 247 } 248 249 void ShenandoahConcurrentGC::entry_final_mark() { 250 const char* msg = final_mark_event_message(); 251 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_mark); 252 EventMark em("%s", msg); 253 254 ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(), 255 ShenandoahWorkerPolicy::calc_workers_for_final_marking(), 256 "final marking"); 257 258 op_final_mark(); 259 } 260 261 void ShenandoahConcurrentGC::entry_init_updaterefs() { 262 static const char* msg = "Pause Init Update Refs"; 263 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::init_update_refs); 264 EventMark em("%s", msg); 265 266 // No workers used in this phase, no setup required 267 op_init_updaterefs(); 268 } 269 270 void ShenandoahConcurrentGC::entry_final_updaterefs() { 271 static const char* msg = "Pause Final Update Refs"; 272 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_update_refs); 273 EventMark em("%s", msg); 274 275 ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(), 276 ShenandoahWorkerPolicy::calc_workers_for_final_update_ref(), 277 "final reference update"); 278 279 op_final_updaterefs(); 280 } 281 282 void ShenandoahConcurrentGC::entry_final_roots() { 283 static const char* msg = "Pause Final Roots"; 284 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_roots); 285 EventMark em("%s", msg); 286 287 op_final_roots(); 288 } 289 290 void ShenandoahConcurrentGC::entry_reset() { 291 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 292 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 293 static const char* msg = "Concurrent reset"; 294 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_reset); 295 EventMark em("%s", msg); 296 297 ShenandoahWorkerScope scope(heap->workers(), 298 ShenandoahWorkerPolicy::calc_workers_for_conc_reset(), 299 "concurrent reset"); 300 301 heap->try_inject_alloc_failure(); 302 op_reset(); 303 } 304 305 void ShenandoahConcurrentGC::entry_mark_roots() { 306 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 307 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 308 const char* msg = "Concurrent marking roots"; 309 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_mark_roots); 310 EventMark em("%s", msg); 311 312 ShenandoahWorkerScope scope(heap->workers(), 313 ShenandoahWorkerPolicy::calc_workers_for_conc_marking(), 314 "concurrent marking roots"); 315 316 heap->try_inject_alloc_failure(); 317 op_mark_roots(); 318 } 319 320 void ShenandoahConcurrentGC::entry_mark() { 321 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 322 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 323 const char* msg = conc_mark_event_message(); 324 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_mark); 325 EventMark em("%s", msg); 326 327 ShenandoahWorkerScope scope(heap->workers(), 328 ShenandoahWorkerPolicy::calc_workers_for_conc_marking(), 329 "concurrent marking"); 330 331 heap->try_inject_alloc_failure(); 332 op_mark(); 333 } 334 335 void ShenandoahConcurrentGC::entry_thread_roots() { 336 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 337 static const char* msg = "Concurrent thread roots"; 338 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_thread_roots); 339 EventMark em("%s", msg); 340 341 ShenandoahWorkerScope scope(heap->workers(), 342 ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(), 343 msg); 344 345 heap->try_inject_alloc_failure(); 346 op_thread_roots(); 347 } 348 349 void ShenandoahConcurrentGC::entry_weak_refs() { 350 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 351 static const char* msg = "Concurrent weak references"; 352 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_weak_refs); 353 EventMark em("%s", msg); 354 355 ShenandoahWorkerScope scope(heap->workers(), 356 ShenandoahWorkerPolicy::calc_workers_for_conc_refs_processing(), 357 "concurrent weak references"); 358 359 heap->try_inject_alloc_failure(); 360 op_weak_refs(); 361 } 362 363 void ShenandoahConcurrentGC::entry_weak_roots() { 364 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 365 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 366 static const char* msg = "Concurrent weak roots"; 367 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_weak_roots); 368 EventMark em("%s", msg); 369 370 ShenandoahWorkerScope scope(heap->workers(), 371 ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(), 372 "concurrent weak root"); 373 374 heap->try_inject_alloc_failure(); 375 op_weak_roots(); 376 } 377 378 void ShenandoahConcurrentGC::entry_class_unloading() { 379 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 380 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 381 static const char* msg = "Concurrent class unloading"; 382 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_class_unload); 383 EventMark em("%s", msg); 384 385 ShenandoahWorkerScope scope(heap->workers(), 386 ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(), 387 "concurrent class unloading"); 388 389 heap->try_inject_alloc_failure(); 390 op_class_unloading(); 391 } 392 393 void ShenandoahConcurrentGC::entry_strong_roots() { 394 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 395 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 396 static const char* msg = "Concurrent strong roots"; 397 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_strong_roots); 398 EventMark em("%s", msg); 399 400 ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_strong_roots); 401 402 ShenandoahWorkerScope scope(heap->workers(), 403 ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(), 404 "concurrent strong root"); 405 406 heap->try_inject_alloc_failure(); 407 op_strong_roots(); 408 } 409 410 void ShenandoahConcurrentGC::entry_cleanup_early() { 411 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 412 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 413 static const char* msg = "Concurrent cleanup"; 414 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_cleanup_early, true /* log_heap_usage */); 415 EventMark em("%s", msg); 416 417 // This phase does not use workers, no need for setup 418 heap->try_inject_alloc_failure(); 419 op_cleanup_early(); 420 } 421 422 void ShenandoahConcurrentGC::entry_evacuate() { 423 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 424 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 425 426 static const char* msg = "Concurrent evacuation"; 427 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_evac); 428 EventMark em("%s", msg); 429 430 ShenandoahWorkerScope scope(heap->workers(), 431 ShenandoahWorkerPolicy::calc_workers_for_conc_evac(), 432 "concurrent evacuation"); 433 434 heap->try_inject_alloc_failure(); 435 op_evacuate(); 436 } 437 438 void ShenandoahConcurrentGC::entry_update_thread_roots() { 439 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 440 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 441 442 static const char* msg = "Concurrent update thread roots"; 443 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_update_thread_roots); 444 EventMark em("%s", msg); 445 446 // No workers used in this phase, no setup required 447 heap->try_inject_alloc_failure(); 448 op_update_thread_roots(); 449 } 450 451 void ShenandoahConcurrentGC::entry_updaterefs() { 452 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 453 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 454 static const char* msg = "Concurrent update references"; 455 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_update_refs); 456 EventMark em("%s", msg); 457 458 ShenandoahWorkerScope scope(heap->workers(), 459 ShenandoahWorkerPolicy::calc_workers_for_conc_update_ref(), 460 "concurrent reference update"); 461 462 heap->try_inject_alloc_failure(); 463 op_updaterefs(); 464 } 465 466 void ShenandoahConcurrentGC::entry_cleanup_complete() { 467 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 468 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 469 static const char* msg = "Concurrent cleanup"; 470 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_cleanup_complete, true /* log_heap_usage */); 471 EventMark em("%s", msg); 472 473 // This phase does not use workers, no need for setup 474 heap->try_inject_alloc_failure(); 475 op_cleanup_complete(); 476 } 477 478 void ShenandoahConcurrentGC::op_reset() { 479 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 480 if (ShenandoahPacing) { 481 heap->pacer()->setup_for_reset(); 482 } 483 484 heap->prepare_gc(); 485 } 486 487 class ShenandoahInitMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure { 488 private: 489 ShenandoahMarkingContext* const _ctx; 490 public: 491 ShenandoahInitMarkUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {} 492 493 void heap_region_do(ShenandoahHeapRegion* r) { 494 assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index()); 495 if (r->is_active()) { 496 // Check if region needs updating its TAMS. We have updated it already during concurrent 497 // reset, so it is very likely we don't need to do another write here. 498 if (_ctx->top_at_mark_start(r) != r->top()) { 499 _ctx->capture_top_at_mark_start(r); 500 } 501 } else { 502 assert(_ctx->top_at_mark_start(r) == r->top(), 503 "Region " SIZE_FORMAT " should already have correct TAMS", r->index()); 504 } 505 } 506 507 bool is_thread_safe() { return true; } 508 }; 509 510 void ShenandoahConcurrentGC::start_mark() { 511 _mark.start_mark(); 512 } 513 514 void ShenandoahConcurrentGC::op_init_mark() { 515 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 516 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint"); 517 assert(Thread::current()->is_VM_thread(), "can only do this in VMThread"); 518 519 assert(heap->marking_context()->is_bitmap_clear(), "need clear marking bitmap"); 520 assert(!heap->marking_context()->is_complete(), "should not be complete"); 521 assert(!heap->has_forwarded_objects(), "No forwarded objects on this path"); 522 523 if (ShenandoahVerify) { 524 heap->verifier()->verify_before_concmark(); 525 } 526 527 if (VerifyBeforeGC) { 528 Universe::verify(); 529 } 530 531 heap->set_concurrent_mark_in_progress(true); 532 533 start_mark(); 534 535 { 536 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states); 537 ShenandoahInitMarkUpdateRegionStateClosure cl; 538 heap->parallel_heap_region_iterate(&cl); 539 } 540 541 // Weak reference processing 542 ShenandoahReferenceProcessor* rp = heap->ref_processor(); 543 rp->reset_thread_locals(); 544 rp->set_soft_reference_policy(heap->soft_ref_policy()->should_clear_all_soft_refs()); 545 546 // Make above changes visible to worker threads 547 OrderAccess::fence(); 548 // Arm nmethods for concurrent marking. When a nmethod is about to be executed, 549 // we need to make sure that all its metadata are marked. alternative is to remark 550 // thread roots at final mark pause, but it can be potential latency killer. 551 if (heap->unload_classes()) { 552 ShenandoahCodeRoots::arm_nmethods(); 553 } 554 555 ShenandoahStackWatermark::change_epoch_id(); 556 if (ShenandoahPacing) { 557 heap->pacer()->setup_for_mark(); 558 } 559 } 560 561 void ShenandoahConcurrentGC::op_mark_roots() { 562 _mark.mark_concurrent_roots(); 563 } 564 565 void ShenandoahConcurrentGC::op_mark() { 566 _mark.concurrent_mark(); 567 } 568 569 void ShenandoahConcurrentGC::op_final_mark() { 570 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 571 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint"); 572 assert(!heap->has_forwarded_objects(), "No forwarded objects on this path"); 573 574 if (ShenandoahVerify) { 575 heap->verifier()->verify_roots_no_forwarded(); 576 } 577 578 if (!heap->cancelled_gc()) { 579 _mark.finish_mark(); 580 assert(!heap->cancelled_gc(), "STW mark cannot OOM"); 581 582 // Notify JVMTI that the tagmap table will need cleaning. 583 JvmtiTagMap::set_needs_cleaning(); 584 585 heap->prepare_regions_and_collection_set(true /*concurrent*/); 586 587 // Has to be done after cset selection 588 heap->prepare_concurrent_roots(); 589 590 if (!heap->collection_set()->is_empty()) { 591 if (ShenandoahVerify) { 592 heap->verifier()->verify_before_evacuation(); 593 } 594 595 heap->set_evacuation_in_progress(true); 596 // From here on, we need to update references. 597 heap->set_has_forwarded_objects(true); 598 599 // Verify before arming for concurrent processing. 600 // Otherwise, verification can trigger stack processing. 601 if (ShenandoahVerify) { 602 heap->verifier()->verify_during_evacuation(); 603 } 604 605 // Arm nmethods/stack for concurrent processing 606 ShenandoahCodeRoots::arm_nmethods(); 607 ShenandoahStackWatermark::change_epoch_id(); 608 609 if (ShenandoahPacing) { 610 heap->pacer()->setup_for_evac(); 611 } 612 } else { 613 if (ShenandoahVerify) { 614 heap->verifier()->verify_after_concmark(); 615 } 616 617 if (VerifyAfterGC) { 618 Universe::verify(); 619 } 620 } 621 } 622 } 623 624 class ShenandoahConcurrentEvacThreadClosure : public ThreadClosure { 625 private: 626 OopClosure* const _oops; 627 628 public: 629 ShenandoahConcurrentEvacThreadClosure(OopClosure* oops); 630 void do_thread(Thread* thread); 631 }; 632 633 ShenandoahConcurrentEvacThreadClosure::ShenandoahConcurrentEvacThreadClosure(OopClosure* oops) : 634 _oops(oops) { 635 } 636 637 void ShenandoahConcurrentEvacThreadClosure::do_thread(Thread* thread) { 638 JavaThread* const jt = JavaThread::cast(thread); 639 StackWatermarkSet::finish_processing(jt, _oops, StackWatermarkKind::gc); 640 } 641 642 class ShenandoahConcurrentEvacUpdateThreadTask : public WorkerTask { 643 private: 644 ShenandoahJavaThreadsIterator _java_threads; 645 646 public: 647 ShenandoahConcurrentEvacUpdateThreadTask(uint n_workers) : 648 WorkerTask("Shenandoah Evacuate/Update Concurrent Thread Roots"), 649 _java_threads(ShenandoahPhaseTimings::conc_thread_roots, n_workers) { 650 } 651 652 void work(uint worker_id) { 653 // ShenandoahEvacOOMScope has to be setup by ShenandoahContextEvacuateUpdateRootsClosure. 654 // Otherwise, may deadlock with watermark lock 655 ShenandoahContextEvacuateUpdateRootsClosure oops_cl; 656 ShenandoahConcurrentEvacThreadClosure thr_cl(&oops_cl); 657 _java_threads.threads_do(&thr_cl, worker_id); 658 } 659 }; 660 661 void ShenandoahConcurrentGC::op_thread_roots() { 662 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 663 assert(heap->is_evacuation_in_progress(), "Checked by caller"); 664 ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_thread_roots); 665 ShenandoahConcurrentEvacUpdateThreadTask task(heap->workers()->active_workers()); 666 heap->workers()->run_task(&task); 667 } 668 669 void ShenandoahConcurrentGC::op_weak_refs() { 670 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 671 assert(heap->is_concurrent_weak_root_in_progress(), "Only during this phase"); 672 // Concurrent weak refs processing 673 ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_refs); 674 if (heap->gc_cause() == GCCause::_wb_breakpoint) { 675 ShenandoahBreakpoint::at_after_reference_processing_started(); 676 } 677 heap->ref_processor()->process_references(ShenandoahPhaseTimings::conc_weak_refs, heap->workers(), true /* concurrent */); 678 } 679 680 class ShenandoahEvacUpdateCleanupOopStorageRootsClosure : public BasicOopIterateClosure { 681 private: 682 ShenandoahHeap* const _heap; 683 ShenandoahMarkingContext* const _mark_context; 684 bool _evac_in_progress; 685 Thread* const _thread; 686 687 public: 688 ShenandoahEvacUpdateCleanupOopStorageRootsClosure(); 689 void do_oop(oop* p); 690 void do_oop(narrowOop* p); 691 }; 692 693 ShenandoahEvacUpdateCleanupOopStorageRootsClosure::ShenandoahEvacUpdateCleanupOopStorageRootsClosure() : 694 _heap(ShenandoahHeap::heap()), 695 _mark_context(ShenandoahHeap::heap()->marking_context()), 696 _evac_in_progress(ShenandoahHeap::heap()->is_evacuation_in_progress()), 697 _thread(Thread::current()) { 698 } 699 700 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(oop* p) { 701 const oop obj = RawAccess<>::oop_load(p); 702 if (!CompressedOops::is_null(obj)) { 703 if (!_mark_context->is_marked(obj)) { 704 shenandoah_assert_correct(p, obj); 705 ShenandoahHeap::atomic_clear_oop(p, obj); 706 } else if (_evac_in_progress && _heap->in_collection_set(obj)) { 707 oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj); 708 if (resolved == obj) { 709 resolved = _heap->evacuate_object(obj, _thread); 710 } 711 ShenandoahHeap::atomic_update_oop(resolved, p, obj); 712 assert(_heap->cancelled_gc() || 713 _mark_context->is_marked(resolved) && !_heap->in_collection_set(resolved), 714 "Sanity"); 715 } 716 } 717 } 718 719 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(narrowOop* p) { 720 ShouldNotReachHere(); 721 } 722 723 class ShenandoahIsCLDAliveClosure : public CLDClosure { 724 public: 725 void do_cld(ClassLoaderData* cld) { 726 cld->is_alive(); 727 } 728 }; 729 730 class ShenandoahIsNMethodAliveClosure: public NMethodClosure { 731 public: 732 void do_nmethod(nmethod* n) { 733 n->is_unloading(); 734 } 735 }; 736 737 // This task not only evacuates/updates marked weak roots, but also "NULL" 738 // dead weak roots. 739 class ShenandoahConcurrentWeakRootsEvacUpdateTask : public WorkerTask { 740 private: 741 ShenandoahVMWeakRoots<true /*concurrent*/> _vm_roots; 742 743 // Roots related to concurrent class unloading 744 ShenandoahClassLoaderDataRoots<true /* concurrent */> 745 _cld_roots; 746 ShenandoahConcurrentNMethodIterator _nmethod_itr; 747 ShenandoahPhaseTimings::Phase _phase; 748 749 public: 750 ShenandoahConcurrentWeakRootsEvacUpdateTask(ShenandoahPhaseTimings::Phase phase) : 751 WorkerTask("Shenandoah Evacuate/Update Concurrent Weak Roots"), 752 _vm_roots(phase), 753 _cld_roots(phase, ShenandoahHeap::heap()->workers()->active_workers(), false /*heap iteration*/), 754 _nmethod_itr(ShenandoahCodeRoots::table()), 755 _phase(phase) { 756 if (ShenandoahHeap::heap()->unload_classes()) { 757 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 758 _nmethod_itr.nmethods_do_begin(); 759 } 760 } 761 762 ~ShenandoahConcurrentWeakRootsEvacUpdateTask() { 763 if (ShenandoahHeap::heap()->unload_classes()) { 764 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 765 _nmethod_itr.nmethods_do_end(); 766 } 767 // Notify runtime data structures of potentially dead oops 768 _vm_roots.report_num_dead(); 769 } 770 771 void work(uint worker_id) { 772 ShenandoahConcurrentWorkerSession worker_session(worker_id); 773 { 774 ShenandoahEvacOOMScope oom; 775 // jni_roots and weak_roots are OopStorage backed roots, concurrent iteration 776 // may race against OopStorage::release() calls. 777 ShenandoahEvacUpdateCleanupOopStorageRootsClosure cl; 778 _vm_roots.oops_do(&cl, worker_id); 779 } 780 781 // If we are going to perform concurrent class unloading later on, we need to 782 // cleanup the weak oops in CLD and determinate nmethod's unloading state, so that we 783 // can cleanup immediate garbage sooner. 784 if (ShenandoahHeap::heap()->unload_classes()) { 785 // Applies ShenandoahIsCLDAlive closure to CLDs, native barrier will either NULL the 786 // CLD's holder or evacuate it. 787 { 788 ShenandoahIsCLDAliveClosure is_cld_alive; 789 _cld_roots.cld_do(&is_cld_alive, worker_id); 790 } 791 792 // Applies ShenandoahIsNMethodAliveClosure to registered nmethods. 793 // The closure calls nmethod->is_unloading(). The is_unloading 794 // state is cached, therefore, during concurrent class unloading phase, 795 // we will not touch the metadata of unloading nmethods 796 { 797 ShenandoahWorkerTimingsTracker timer(_phase, ShenandoahPhaseTimings::CodeCacheRoots, worker_id); 798 ShenandoahIsNMethodAliveClosure is_nmethod_alive; 799 _nmethod_itr.nmethods_do(&is_nmethod_alive); 800 } 801 } 802 } 803 }; 804 805 void ShenandoahConcurrentGC::op_weak_roots() { 806 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 807 assert(heap->is_concurrent_weak_root_in_progress(), "Only during this phase"); 808 // Concurrent weak root processing 809 { 810 ShenandoahTimingsTracker t(ShenandoahPhaseTimings::conc_weak_roots_work); 811 ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_roots_work); 812 ShenandoahConcurrentWeakRootsEvacUpdateTask task(ShenandoahPhaseTimings::conc_weak_roots_work); 813 heap->workers()->run_task(&task); 814 } 815 816 // Perform handshake to flush out dead oops 817 { 818 ShenandoahTimingsTracker t(ShenandoahPhaseTimings::conc_weak_roots_rendezvous); 819 heap->rendezvous_threads(); 820 } 821 } 822 823 void ShenandoahConcurrentGC::op_class_unloading() { 824 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 825 assert (heap->is_concurrent_weak_root_in_progress() && 826 heap->unload_classes(), 827 "Checked by caller"); 828 heap->do_class_unloading(); 829 } 830 831 class ShenandoahEvacUpdateCodeCacheClosure : public NMethodClosure { 832 private: 833 BarrierSetNMethod* const _bs; 834 ShenandoahEvacuateUpdateMetadataClosure _cl; 835 836 public: 837 ShenandoahEvacUpdateCodeCacheClosure() : 838 _bs(BarrierSet::barrier_set()->barrier_set_nmethod()), 839 _cl() { 840 } 841 842 void do_nmethod(nmethod* n) { 843 ShenandoahNMethod* data = ShenandoahNMethod::gc_data(n); 844 ShenandoahReentrantLocker locker(data->lock()); 845 // Setup EvacOOM scope below reentrant lock to avoid deadlock with 846 // nmethod_entry_barrier 847 ShenandoahEvacOOMScope oom; 848 data->oops_do(&_cl, true/*fix relocation*/); 849 _bs->disarm(n); 850 } 851 }; 852 853 class ShenandoahConcurrentRootsEvacUpdateTask : public WorkerTask { 854 private: 855 ShenandoahPhaseTimings::Phase _phase; 856 ShenandoahVMRoots<true /*concurrent*/> _vm_roots; 857 ShenandoahClassLoaderDataRoots<true /*concurrent*/> 858 _cld_roots; 859 ShenandoahConcurrentNMethodIterator _nmethod_itr; 860 861 public: 862 ShenandoahConcurrentRootsEvacUpdateTask(ShenandoahPhaseTimings::Phase phase) : 863 WorkerTask("Shenandoah Evacuate/Update Concurrent Strong Roots"), 864 _phase(phase), 865 _vm_roots(phase), 866 _cld_roots(phase, ShenandoahHeap::heap()->workers()->active_workers(), false /*heap iteration*/), 867 _nmethod_itr(ShenandoahCodeRoots::table()) { 868 if (!ShenandoahHeap::heap()->unload_classes()) { 869 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 870 _nmethod_itr.nmethods_do_begin(); 871 } 872 } 873 874 ~ShenandoahConcurrentRootsEvacUpdateTask() { 875 if (!ShenandoahHeap::heap()->unload_classes()) { 876 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 877 _nmethod_itr.nmethods_do_end(); 878 } 879 } 880 881 void work(uint worker_id) { 882 ShenandoahConcurrentWorkerSession worker_session(worker_id); 883 { 884 ShenandoahEvacOOMScope oom; 885 { 886 // vm_roots and weak_roots are OopStorage backed roots, concurrent iteration 887 // may race against OopStorage::release() calls. 888 ShenandoahContextEvacuateUpdateRootsClosure cl; 889 _vm_roots.oops_do<ShenandoahContextEvacuateUpdateRootsClosure>(&cl, worker_id); 890 } 891 892 { 893 ShenandoahEvacuateUpdateMetadataClosure cl; 894 CLDToOopClosure clds(&cl, ClassLoaderData::_claim_strong); 895 _cld_roots.cld_do(&clds, worker_id); 896 } 897 } 898 899 // Cannot setup ShenandoahEvacOOMScope here, due to potential deadlock with nmethod_entry_barrier. 900 if (!ShenandoahHeap::heap()->unload_classes()) { 901 ShenandoahWorkerTimingsTracker timer(_phase, ShenandoahPhaseTimings::CodeCacheRoots, worker_id); 902 ShenandoahEvacUpdateCodeCacheClosure cl; 903 _nmethod_itr.nmethods_do(&cl); 904 } 905 } 906 }; 907 908 void ShenandoahConcurrentGC::op_strong_roots() { 909 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 910 assert(heap->is_concurrent_strong_root_in_progress(), "Checked by caller"); 911 ShenandoahConcurrentRootsEvacUpdateTask task(ShenandoahPhaseTimings::conc_strong_roots); 912 heap->workers()->run_task(&task); 913 heap->set_concurrent_strong_root_in_progress(false); 914 } 915 916 void ShenandoahConcurrentGC::op_cleanup_early() { 917 ShenandoahHeap::heap()->free_set()->recycle_trash(); 918 } 919 920 void ShenandoahConcurrentGC::op_evacuate() { 921 ShenandoahHeap::heap()->evacuate_collection_set(true /*concurrent*/); 922 } 923 924 void ShenandoahConcurrentGC::op_init_updaterefs() { 925 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 926 heap->set_evacuation_in_progress(false); 927 heap->set_concurrent_weak_root_in_progress(false); 928 heap->prepare_update_heap_references(true /*concurrent*/); 929 heap->set_update_refs_in_progress(true); 930 931 if (ShenandoahPacing) { 932 heap->pacer()->setup_for_updaterefs(); 933 } 934 } 935 936 void ShenandoahConcurrentGC::op_updaterefs() { 937 ShenandoahHeap::heap()->update_heap_references(true /*concurrent*/); 938 } 939 940 class ShenandoahUpdateThreadClosure : public HandshakeClosure { 941 private: 942 ShenandoahUpdateRefsClosure _cl; 943 public: 944 ShenandoahUpdateThreadClosure(); 945 void do_thread(Thread* thread); 946 }; 947 948 ShenandoahUpdateThreadClosure::ShenandoahUpdateThreadClosure() : 949 HandshakeClosure("Shenandoah Update Thread Roots") { 950 } 951 952 void ShenandoahUpdateThreadClosure::do_thread(Thread* thread) { 953 if (thread->is_Java_thread()) { 954 JavaThread* jt = JavaThread::cast(thread); 955 ResourceMark rm; 956 jt->oops_do(&_cl, NULL); 957 } 958 } 959 960 void ShenandoahConcurrentGC::op_update_thread_roots() { 961 ShenandoahUpdateThreadClosure cl; 962 Handshake::execute(&cl); 963 } 964 965 void ShenandoahConcurrentGC::op_final_updaterefs() { 966 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 967 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint"); 968 assert(!heap->_update_refs_iterator.has_next(), "Should have finished update references"); 969 970 heap->finish_concurrent_roots(); 971 972 // Clear cancelled GC, if set. On cancellation path, the block before would handle 973 // everything. 974 if (heap->cancelled_gc()) { 975 heap->clear_cancelled_gc(); 976 } 977 978 // Has to be done before cset is clear 979 if (ShenandoahVerify) { 980 heap->verifier()->verify_roots_in_to_space(); 981 } 982 983 heap->update_heap_region_states(true /*concurrent*/); 984 985 heap->set_update_refs_in_progress(false); 986 heap->set_has_forwarded_objects(false); 987 988 if (ShenandoahVerify) { 989 heap->verifier()->verify_after_updaterefs(); 990 } 991 992 if (VerifyAfterGC) { 993 Universe::verify(); 994 } 995 996 heap->rebuild_free_set(true /*concurrent*/); 997 } 998 999 void ShenandoahConcurrentGC::op_final_roots() { 1000 ShenandoahHeap::heap()->set_concurrent_weak_root_in_progress(false); 1001 } 1002 1003 void ShenandoahConcurrentGC::op_cleanup_complete() { 1004 ShenandoahHeap::heap()->free_set()->recycle_trash(); 1005 } 1006 1007 bool ShenandoahConcurrentGC::check_cancellation_and_abort(ShenandoahDegenPoint point) { 1008 if (ShenandoahHeap::heap()->cancelled_gc()) { 1009 _degen_point = point; 1010 return true; 1011 } 1012 return false; 1013 } 1014 1015 const char* ShenandoahConcurrentGC::init_mark_event_message() const { 1016 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 1017 assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here"); 1018 if (heap->unload_classes()) { 1019 return "Pause Init Mark (unload classes)"; 1020 } else { 1021 return "Pause Init Mark"; 1022 } 1023 } 1024 1025 const char* ShenandoahConcurrentGC::final_mark_event_message() const { 1026 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 1027 assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here"); 1028 if (heap->unload_classes()) { 1029 return "Pause Final Mark (unload classes)"; 1030 } else { 1031 return "Pause Final Mark"; 1032 } 1033 } 1034 1035 const char* ShenandoahConcurrentGC::conc_mark_event_message() const { 1036 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 1037 assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here"); 1038 if (heap->unload_classes()) { 1039 return "Concurrent marking (unload classes)"; 1040 } else { 1041 return "Concurrent marking"; 1042 } 1043 }