1 /* 2 * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2021, 2022, Red Hat, Inc. All rights reserved. 4 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. 5 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 6 * 7 * This code is free software; you can redistribute it and/or modify it 8 * under the terms of the GNU General Public License version 2 only, as 9 * published by the Free Software Foundation. 10 * 11 * This code is distributed in the hope that it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 14 * version 2 for more details (a copy is included in the LICENSE file that 15 * accompanied this code). 16 * 17 * You should have received a copy of the GNU General Public License version 18 * 2 along with this work; if not, write to the Free Software Foundation, 19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 20 * 21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 22 * or visit www.oracle.com if you need additional information or have any 23 * questions. 24 * 25 */ 26 27 #include "precompiled.hpp" 28 29 #include "gc/shared/barrierSetNMethod.hpp" 30 #include "gc/shared/collectorCounters.hpp" 31 #include "gc/shared/continuationGCSupport.inline.hpp" 32 #include "gc/shenandoah/shenandoahBreakpoint.hpp" 33 #include "gc/shenandoah/shenandoahClosures.inline.hpp" 34 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp" 35 #include "gc/shenandoah/shenandoahConcurrentGC.hpp" 36 #include "gc/shenandoah/shenandoahFreeSet.hpp" 37 #include "gc/shenandoah/shenandoahGeneration.hpp" 38 #include "gc/shenandoah/shenandoahGenerationalHeap.hpp" 39 #include "gc/shenandoah/shenandoahOldGeneration.hpp" 40 #include "gc/shenandoah/shenandoahYoungGeneration.hpp" 41 #include "gc/shenandoah/shenandoahLock.hpp" 42 #include "gc/shenandoah/shenandoahMark.inline.hpp" 43 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp" 44 #include "gc/shenandoah/shenandoahPhaseTimings.hpp" 45 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp" 46 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp" 47 #include "gc/shenandoah/shenandoahStackWatermark.hpp" 48 #include "gc/shenandoah/shenandoahUtils.hpp" 49 #include "gc/shenandoah/shenandoahVerifier.hpp" 50 #include "gc/shenandoah/shenandoahVMOperations.hpp" 51 #include "gc/shenandoah/shenandoahWorkGroup.hpp" 52 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp" 53 #include "memory/allocation.hpp" 54 #include "prims/jvmtiTagMap.hpp" 55 #include "runtime/vmThread.hpp" 56 #include "utilities/events.hpp" 57 58 // Breakpoint support 59 class ShenandoahBreakpointGCScope : public StackObj { 60 private: 61 const GCCause::Cause _cause; 62 public: 63 ShenandoahBreakpointGCScope(GCCause::Cause cause) : _cause(cause) { 64 if (cause == GCCause::_wb_breakpoint) { 65 ShenandoahBreakpoint::start_gc(); 66 ShenandoahBreakpoint::at_before_gc(); 67 } 68 } 69 70 ~ShenandoahBreakpointGCScope() { 71 if (_cause == GCCause::_wb_breakpoint) { 72 ShenandoahBreakpoint::at_after_gc(); 73 } 74 } 75 }; 76 77 class ShenandoahBreakpointMarkScope : public StackObj { 78 private: 79 const GCCause::Cause _cause; 80 public: 81 ShenandoahBreakpointMarkScope(GCCause::Cause cause) : _cause(cause) { 82 if (_cause == GCCause::_wb_breakpoint) { 83 ShenandoahBreakpoint::at_after_marking_started(); 84 } 85 } 86 87 ~ShenandoahBreakpointMarkScope() { 88 if (_cause == GCCause::_wb_breakpoint) { 89 ShenandoahBreakpoint::at_before_marking_completed(); 90 } 91 } 92 }; 93 94 ShenandoahConcurrentGC::ShenandoahConcurrentGC(ShenandoahGeneration* generation, bool do_old_gc_bootstrap) : 95 _mark(generation), 96 _generation(generation), 97 _degen_point(ShenandoahDegenPoint::_degenerated_unset), 98 _abbreviated(false), 99 _do_old_gc_bootstrap(do_old_gc_bootstrap) { 100 } 101 102 ShenandoahGC::ShenandoahDegenPoint ShenandoahConcurrentGC::degen_point() const { 103 return _degen_point; 104 } 105 106 bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) { 107 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 108 109 ShenandoahBreakpointGCScope breakpoint_gc_scope(cause); 110 111 // Reset for upcoming marking 112 entry_reset(); 113 114 // Start initial mark under STW 115 vmop_entry_init_mark(); 116 117 { 118 ShenandoahBreakpointMarkScope breakpoint_mark_scope(cause); 119 120 // Reset task queue stats here, rather than in mark_concurrent_roots, 121 // because remembered set scan will `push` oops into the queues and 122 // resetting after this happens will lose those counts. 123 TASKQUEUE_STATS_ONLY(_mark.task_queues()->reset_taskqueue_stats()); 124 125 // Concurrent remembered set scanning 126 entry_scan_remembered_set(); 127 128 // Concurrent mark roots 129 entry_mark_roots(); 130 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_roots)) { 131 return false; 132 } 133 134 // Continue concurrent mark 135 entry_mark(); 136 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_mark)) { 137 return false; 138 } 139 } 140 141 // Complete marking under STW, and start evacuation 142 vmop_entry_final_mark(); 143 144 // If the GC was cancelled before final mark, nothing happens on the safepoint. We are still 145 // in the marking phase and must resume the degenerated cycle from there. If the GC was cancelled 146 // after final mark, then we've entered the evacuation phase and must resume the degenerated cycle 147 // from that phase. 148 if (_generation->is_concurrent_mark_in_progress()) { 149 bool cancelled = check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_mark); 150 assert(cancelled, "GC must have been cancelled between concurrent and final mark"); 151 return false; 152 } 153 154 // Concurrent stack processing 155 if (heap->is_evacuation_in_progress()) { 156 entry_thread_roots(); 157 } 158 159 // Process weak roots that might still point to regions that would be broken by cleanup 160 if (heap->is_concurrent_weak_root_in_progress()) { 161 entry_weak_refs(); 162 entry_weak_roots(); 163 } 164 165 // Final mark might have reclaimed some immediate garbage, kick cleanup to reclaim 166 // the space. This would be the last action if there is nothing to evacuate. Note that 167 // we will not age young-gen objects in the case that we skip evacuation. 168 entry_cleanup_early(); 169 170 heap->free_set()->log_status_under_lock(); 171 172 // Perform concurrent class unloading 173 if (heap->unload_classes() && 174 heap->is_concurrent_weak_root_in_progress()) { 175 entry_class_unloading(); 176 } 177 178 // Processing strong roots 179 // This may be skipped if there is nothing to update/evacuate. 180 // If so, strong_root_in_progress would be unset. 181 if (heap->is_concurrent_strong_root_in_progress()) { 182 entry_strong_roots(); 183 } 184 185 // Continue the cycle with evacuation and optional update-refs. 186 // This may be skipped if there is nothing to evacuate. 187 // If so, evac_in_progress would be unset by collection set preparation code. 188 if (heap->is_evacuation_in_progress()) { 189 // Concurrently evacuate 190 entry_evacuate(); 191 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_evac)) { 192 return false; 193 } 194 195 // Perform update-refs phase. 196 vmop_entry_init_updaterefs(); 197 entry_updaterefs(); 198 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_updaterefs)) { 199 return false; 200 } 201 202 // Concurrent update thread roots 203 entry_update_thread_roots(); 204 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_updaterefs)) { 205 return false; 206 } 207 208 vmop_entry_final_updaterefs(); 209 210 // Update references freed up collection set, kick the cleanup to reclaim the space. 211 entry_cleanup_complete(); 212 } else { 213 // We chose not to evacuate because we found sufficient immediate garbage. 214 // However, there may still be regions to promote in place, so do that now. 215 if (has_in_place_promotions(heap)) { 216 entry_promote_in_place(); 217 218 // If the promote-in-place operation was cancelled, we can have the degenerated 219 // cycle complete the operation. It will see that no evacuations are in progress, 220 // and that there are regions wanting promotion. The risk with not handling the 221 // cancellation would be failing to restore top for these regions and leaving 222 // them unable to serve allocations for the old generation. 223 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_evac)) { 224 return false; 225 } 226 } 227 228 // At this point, the cycle is effectively complete. If the cycle has been cancelled here, 229 // the control thread will detect it on its next iteration and run a degenerated young cycle. 230 vmop_entry_final_roots(); 231 _abbreviated = true; 232 } 233 234 // We defer generation resizing actions until after cset regions have been recycled. We do this even following an 235 // abbreviated cycle. 236 if (heap->mode()->is_generational()) { 237 ShenandoahGenerationalHeap::heap()->complete_concurrent_cycle(); 238 } 239 return true; 240 } 241 242 void ShenandoahConcurrentGC::vmop_entry_init_mark() { 243 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 244 TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters()); 245 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_mark_gross); 246 247 heap->try_inject_alloc_failure(); 248 VM_ShenandoahInitMark op(this); 249 VMThread::execute(&op); // jump to entry_init_mark() under safepoint 250 } 251 252 void ShenandoahConcurrentGC::vmop_entry_final_mark() { 253 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 254 TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters()); 255 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_mark_gross); 256 257 heap->try_inject_alloc_failure(); 258 VM_ShenandoahFinalMarkStartEvac op(this); 259 VMThread::execute(&op); // jump to entry_final_mark under safepoint 260 } 261 262 void ShenandoahConcurrentGC::vmop_entry_init_updaterefs() { 263 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 264 TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters()); 265 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_update_refs_gross); 266 267 heap->try_inject_alloc_failure(); 268 VM_ShenandoahInitUpdateRefs op(this); 269 VMThread::execute(&op); 270 } 271 272 void ShenandoahConcurrentGC::vmop_entry_final_updaterefs() { 273 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 274 TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters()); 275 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_update_refs_gross); 276 277 heap->try_inject_alloc_failure(); 278 VM_ShenandoahFinalUpdateRefs op(this); 279 VMThread::execute(&op); 280 } 281 282 void ShenandoahConcurrentGC::vmop_entry_final_roots() { 283 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 284 TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters()); 285 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_roots_gross); 286 287 // This phase does not use workers, no need for setup 288 heap->try_inject_alloc_failure(); 289 VM_ShenandoahFinalRoots op(this); 290 VMThread::execute(&op); 291 } 292 293 void ShenandoahConcurrentGC::entry_init_mark() { 294 const char* msg = init_mark_event_message(); 295 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::init_mark); 296 EventMark em("%s", msg); 297 298 ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(), 299 ShenandoahWorkerPolicy::calc_workers_for_init_marking(), 300 "init marking"); 301 302 op_init_mark(); 303 } 304 305 void ShenandoahConcurrentGC::entry_final_mark() { 306 const char* msg = final_mark_event_message(); 307 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_mark); 308 EventMark em("%s", msg); 309 310 ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(), 311 ShenandoahWorkerPolicy::calc_workers_for_final_marking(), 312 "final marking"); 313 314 op_final_mark(); 315 } 316 317 void ShenandoahConcurrentGC::entry_init_updaterefs() { 318 static const char* msg = "Pause Init Update Refs"; 319 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::init_update_refs); 320 EventMark em("%s", msg); 321 322 // No workers used in this phase, no setup required 323 op_init_updaterefs(); 324 } 325 326 void ShenandoahConcurrentGC::entry_final_updaterefs() { 327 static const char* msg = "Pause Final Update Refs"; 328 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_update_refs); 329 EventMark em("%s", msg); 330 331 ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(), 332 ShenandoahWorkerPolicy::calc_workers_for_final_update_ref(), 333 "final reference update"); 334 335 op_final_updaterefs(); 336 } 337 338 void ShenandoahConcurrentGC::entry_final_roots() { 339 const char* msg = final_roots_event_message(); 340 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_roots); 341 EventMark em("%s", msg); 342 343 op_final_roots(); 344 } 345 346 void ShenandoahConcurrentGC::entry_reset() { 347 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 348 heap->try_inject_alloc_failure(); 349 350 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 351 { 352 const char* msg = conc_reset_event_message(); 353 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_reset); 354 EventMark em("%s", msg); 355 356 ShenandoahWorkerScope scope(heap->workers(), 357 ShenandoahWorkerPolicy::calc_workers_for_conc_reset(), 358 msg); 359 op_reset(); 360 } 361 362 if (_do_old_gc_bootstrap) { 363 static const char* msg = "Concurrent reset (Old)"; 364 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_reset_old); 365 ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(), 366 ShenandoahWorkerPolicy::calc_workers_for_conc_reset(), 367 msg); 368 EventMark em("%s", msg); 369 370 heap->old_generation()->prepare_gc(); 371 } 372 } 373 374 void ShenandoahConcurrentGC::entry_scan_remembered_set() { 375 if (_generation->is_young()) { 376 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 377 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 378 const char* msg = "Concurrent remembered set scanning"; 379 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::init_scan_rset); 380 EventMark em("%s", msg); 381 382 ShenandoahWorkerScope scope(heap->workers(), 383 ShenandoahWorkerPolicy::calc_workers_for_rs_scanning(), 384 msg); 385 386 heap->try_inject_alloc_failure(); 387 _generation->scan_remembered_set(true /* is_concurrent */); 388 } 389 } 390 391 void ShenandoahConcurrentGC::entry_mark_roots() { 392 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 393 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 394 const char* msg = "Concurrent marking roots"; 395 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_mark_roots); 396 EventMark em("%s", msg); 397 398 ShenandoahWorkerScope scope(heap->workers(), 399 ShenandoahWorkerPolicy::calc_workers_for_conc_marking(), 400 "concurrent marking roots"); 401 402 heap->try_inject_alloc_failure(); 403 op_mark_roots(); 404 } 405 406 void ShenandoahConcurrentGC::entry_mark() { 407 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 408 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 409 const char* msg = conc_mark_event_message(); 410 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_mark); 411 EventMark em("%s", msg); 412 413 ShenandoahWorkerScope scope(heap->workers(), 414 ShenandoahWorkerPolicy::calc_workers_for_conc_marking(), 415 "concurrent marking"); 416 417 heap->try_inject_alloc_failure(); 418 op_mark(); 419 } 420 421 void ShenandoahConcurrentGC::entry_thread_roots() { 422 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 423 static const char* msg = "Concurrent thread roots"; 424 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_thread_roots); 425 EventMark em("%s", msg); 426 427 ShenandoahWorkerScope scope(heap->workers(), 428 ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(), 429 msg); 430 431 heap->try_inject_alloc_failure(); 432 op_thread_roots(); 433 } 434 435 void ShenandoahConcurrentGC::entry_weak_refs() { 436 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 437 const char* msg = conc_weak_refs_event_message(); 438 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_weak_refs); 439 EventMark em("%s", msg); 440 441 ShenandoahWorkerScope scope(heap->workers(), 442 ShenandoahWorkerPolicy::calc_workers_for_conc_refs_processing(), 443 "concurrent weak references"); 444 445 heap->try_inject_alloc_failure(); 446 op_weak_refs(); 447 } 448 449 void ShenandoahConcurrentGC::entry_weak_roots() { 450 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 451 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 452 const char* msg = conc_weak_roots_event_message(); 453 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_weak_roots); 454 EventMark em("%s", msg); 455 456 ShenandoahWorkerScope scope(heap->workers(), 457 ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(), 458 "concurrent weak root"); 459 460 heap->try_inject_alloc_failure(); 461 op_weak_roots(); 462 } 463 464 void ShenandoahConcurrentGC::entry_class_unloading() { 465 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 466 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 467 static const char* msg = "Concurrent class unloading"; 468 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_class_unload); 469 EventMark em("%s", msg); 470 471 ShenandoahWorkerScope scope(heap->workers(), 472 ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(), 473 "concurrent class unloading"); 474 475 heap->try_inject_alloc_failure(); 476 op_class_unloading(); 477 } 478 479 void ShenandoahConcurrentGC::entry_strong_roots() { 480 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 481 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 482 static const char* msg = "Concurrent strong roots"; 483 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_strong_roots); 484 EventMark em("%s", msg); 485 486 ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_strong_roots); 487 488 ShenandoahWorkerScope scope(heap->workers(), 489 ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(), 490 "concurrent strong root"); 491 492 heap->try_inject_alloc_failure(); 493 op_strong_roots(); 494 } 495 496 void ShenandoahConcurrentGC::entry_cleanup_early() { 497 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 498 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 499 const char* msg = conc_cleanup_event_message(); 500 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_cleanup_early, true /* log_heap_usage */); 501 EventMark em("%s", msg); 502 503 // This phase does not use workers, no need for setup 504 heap->try_inject_alloc_failure(); 505 op_cleanup_early(); 506 } 507 508 void ShenandoahConcurrentGC::entry_evacuate() { 509 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 510 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 511 512 static const char* msg = "Concurrent evacuation"; 513 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_evac); 514 EventMark em("%s", msg); 515 516 ShenandoahWorkerScope scope(heap->workers(), 517 ShenandoahWorkerPolicy::calc_workers_for_conc_evac(), 518 "concurrent evacuation"); 519 520 heap->try_inject_alloc_failure(); 521 op_evacuate(); 522 } 523 524 void ShenandoahConcurrentGC::entry_promote_in_place() { 525 shenandoah_assert_generational(); 526 527 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 528 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 529 530 static const char* msg = "Promote in place"; 531 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::promote_in_place); 532 EventMark em("%s", msg); 533 534 ShenandoahWorkerScope scope(heap->workers(), 535 ShenandoahWorkerPolicy::calc_workers_for_conc_evac(), 536 "promote in place"); 537 538 ShenandoahGenerationalHeap::heap()->promote_regions_in_place(true); 539 } 540 541 void ShenandoahConcurrentGC::entry_update_thread_roots() { 542 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 543 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 544 545 static const char* msg = "Concurrent update thread roots"; 546 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_update_thread_roots); 547 EventMark em("%s", msg); 548 549 // No workers used in this phase, no setup required 550 heap->try_inject_alloc_failure(); 551 op_update_thread_roots(); 552 } 553 554 void ShenandoahConcurrentGC::entry_updaterefs() { 555 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 556 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 557 static const char* msg = "Concurrent update references"; 558 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_update_refs); 559 EventMark em("%s", msg); 560 561 ShenandoahWorkerScope scope(heap->workers(), 562 ShenandoahWorkerPolicy::calc_workers_for_conc_update_ref(), 563 "concurrent reference update"); 564 565 heap->try_inject_alloc_failure(); 566 op_updaterefs(); 567 } 568 569 void ShenandoahConcurrentGC::entry_cleanup_complete() { 570 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 571 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 572 const char* msg = conc_cleanup_event_message(); 573 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_cleanup_complete, true /* log_heap_usage */); 574 EventMark em("%s", msg); 575 576 // This phase does not use workers, no need for setup 577 heap->try_inject_alloc_failure(); 578 op_cleanup_complete(); 579 } 580 581 void ShenandoahConcurrentGC::op_reset() { 582 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 583 if (ShenandoahPacing) { 584 heap->pacer()->setup_for_reset(); 585 } 586 _generation->prepare_gc(); 587 } 588 589 class ShenandoahInitMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure { 590 private: 591 ShenandoahMarkingContext* const _ctx; 592 public: 593 ShenandoahInitMarkUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {} 594 595 void heap_region_do(ShenandoahHeapRegion* r) { 596 assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index()); 597 if (r->is_active()) { 598 // Check if region needs updating its TAMS. We have updated it already during concurrent 599 // reset, so it is very likely we don't need to do another write here. Since most regions 600 // are not "active", this path is relatively rare. 601 if (_ctx->top_at_mark_start(r) != r->top()) { 602 _ctx->capture_top_at_mark_start(r); 603 } 604 } else { 605 assert(_ctx->top_at_mark_start(r) == r->top(), 606 "Region " SIZE_FORMAT " should already have correct TAMS", r->index()); 607 } 608 } 609 610 bool is_thread_safe() { return true; } 611 }; 612 613 void ShenandoahConcurrentGC::start_mark() { 614 _mark.start_mark(); 615 } 616 617 void ShenandoahConcurrentGC::op_init_mark() { 618 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 619 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint"); 620 assert(Thread::current()->is_VM_thread(), "can only do this in VMThread"); 621 622 assert(_generation->is_bitmap_clear(), "need clear marking bitmap"); 623 assert(!_generation->is_mark_complete(), "should not be complete"); 624 assert(!heap->has_forwarded_objects(), "No forwarded objects on this path"); 625 626 627 if (heap->mode()->is_generational()) { 628 if (_generation->is_young()) { 629 // The current implementation of swap_remembered_set() copies the write-card-table to the read-card-table. 630 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_swap_rset); 631 _generation->swap_remembered_set(); 632 } 633 634 if (_generation->is_global()) { 635 heap->old_generation()->cancel_gc(); 636 } else if (heap->is_concurrent_old_mark_in_progress()) { 637 // Purge the SATB buffers, transferring any valid, old pointers to the 638 // old generation mark queue. Any pointers in a young region will be 639 // abandoned. 640 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_transfer_satb); 641 heap->old_generation()->transfer_pointers_from_satb(); 642 } 643 } 644 645 if (ShenandoahVerify) { 646 heap->verifier()->verify_before_concmark(); 647 } 648 649 if (VerifyBeforeGC) { 650 Universe::verify(); 651 } 652 653 _generation->set_concurrent_mark_in_progress(true); 654 655 start_mark(); 656 657 if (_do_old_gc_bootstrap) { 658 shenandoah_assert_generational(); 659 // Update region state for both young and old regions 660 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states); 661 ShenandoahInitMarkUpdateRegionStateClosure cl; 662 heap->parallel_heap_region_iterate(&cl); 663 heap->old_generation()->ref_processor()->reset_thread_locals(); 664 } else { 665 // Update region state for only young regions 666 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states); 667 ShenandoahInitMarkUpdateRegionStateClosure cl; 668 _generation->parallel_heap_region_iterate(&cl); 669 } 670 671 // Weak reference processing 672 ShenandoahReferenceProcessor* rp = _generation->ref_processor(); 673 rp->reset_thread_locals(); 674 rp->set_soft_reference_policy(heap->soft_ref_policy()->should_clear_all_soft_refs()); 675 676 // Make above changes visible to worker threads 677 OrderAccess::fence(); 678 679 // Arm nmethods for concurrent mark 680 ShenandoahCodeRoots::arm_nmethods_for_mark(); 681 682 ShenandoahStackWatermark::change_epoch_id(); 683 if (ShenandoahPacing) { 684 heap->pacer()->setup_for_mark(); 685 } 686 } 687 688 void ShenandoahConcurrentGC::op_mark_roots() { 689 _mark.mark_concurrent_roots(); 690 } 691 692 void ShenandoahConcurrentGC::op_mark() { 693 _mark.concurrent_mark(); 694 } 695 696 void ShenandoahConcurrentGC::op_final_mark() { 697 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 698 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint"); 699 assert(!heap->has_forwarded_objects(), "No forwarded objects on this path"); 700 701 if (ShenandoahVerify) { 702 heap->verifier()->verify_roots_no_forwarded(); 703 } 704 705 if (!heap->cancelled_gc()) { 706 _mark.finish_mark(); 707 assert(!heap->cancelled_gc(), "STW mark cannot OOM"); 708 709 // Notify JVMTI that the tagmap table will need cleaning. 710 JvmtiTagMap::set_needs_cleaning(); 711 712 // The collection set is chosen by prepare_regions_and_collection_set(). Additionally, certain parameters have been 713 // established to govern the evacuation efforts that are about to begin. Refer to comments on reserve members in 714 // ShenandoahGeneration and ShenandoahOldGeneration for more detail. 715 _generation->prepare_regions_and_collection_set(true /*concurrent*/); 716 717 // Has to be done after cset selection 718 heap->prepare_concurrent_roots(); 719 720 if (!heap->collection_set()->is_empty()) { 721 LogTarget(Debug, gc, cset) lt; 722 if (lt.is_enabled()) { 723 ResourceMark rm; 724 LogStream ls(lt); 725 heap->collection_set()->print_on(&ls); 726 } 727 728 if (ShenandoahVerify) { 729 heap->verifier()->verify_before_evacuation(); 730 } 731 732 heap->set_evacuation_in_progress(true); 733 // From here on, we need to update references. 734 heap->set_has_forwarded_objects(true); 735 736 // Arm nmethods/stack for concurrent processing 737 ShenandoahCodeRoots::arm_nmethods_for_evac(); 738 ShenandoahStackWatermark::change_epoch_id(); 739 740 if (ShenandoahPacing) { 741 heap->pacer()->setup_for_evac(); 742 } 743 } else { 744 if (ShenandoahVerify) { 745 if (has_in_place_promotions(heap)) { 746 heap->verifier()->verify_after_concmark_with_promotions(); 747 } else { 748 heap->verifier()->verify_after_concmark(); 749 } 750 } 751 752 if (VerifyAfterGC) { 753 Universe::verify(); 754 } 755 } 756 } 757 } 758 759 bool ShenandoahConcurrentGC::has_in_place_promotions(ShenandoahHeap* heap) { 760 return heap->mode()->is_generational() && heap->old_generation()->has_in_place_promotions(); 761 } 762 763 template<bool GENERATIONAL> 764 class ShenandoahConcurrentEvacThreadClosure : public ThreadClosure { 765 private: 766 OopClosure* const _oops; 767 public: 768 explicit ShenandoahConcurrentEvacThreadClosure(OopClosure* oops) : _oops(oops) {} 769 770 void do_thread(Thread* thread) override { 771 JavaThread* const jt = JavaThread::cast(thread); 772 StackWatermarkSet::finish_processing(jt, _oops, StackWatermarkKind::gc); 773 if (GENERATIONAL) { 774 ShenandoahThreadLocalData::enable_plab_promotions(thread); 775 } 776 } 777 }; 778 779 template<bool GENERATIONAL> 780 class ShenandoahConcurrentEvacUpdateThreadTask : public WorkerTask { 781 private: 782 ShenandoahJavaThreadsIterator _java_threads; 783 784 public: 785 explicit ShenandoahConcurrentEvacUpdateThreadTask(uint n_workers) : 786 WorkerTask("Shenandoah Evacuate/Update Concurrent Thread Roots"), 787 _java_threads(ShenandoahPhaseTimings::conc_thread_roots, n_workers) { 788 } 789 790 void work(uint worker_id) override { 791 if (GENERATIONAL) { 792 Thread* worker_thread = Thread::current(); 793 ShenandoahThreadLocalData::enable_plab_promotions(worker_thread); 794 } 795 796 // ShenandoahEvacOOMScope has to be setup by ShenandoahContextEvacuateUpdateRootsClosure. 797 // Otherwise, may deadlock with watermark lock 798 ShenandoahContextEvacuateUpdateRootsClosure oops_cl; 799 ShenandoahConcurrentEvacThreadClosure<GENERATIONAL> thr_cl(&oops_cl); 800 _java_threads.threads_do(&thr_cl, worker_id); 801 } 802 }; 803 804 void ShenandoahConcurrentGC::op_thread_roots() { 805 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 806 assert(heap->is_evacuation_in_progress(), "Checked by caller"); 807 ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_thread_roots); 808 if (heap->mode()->is_generational()) { 809 ShenandoahConcurrentEvacUpdateThreadTask<true> task(heap->workers()->active_workers()); 810 heap->workers()->run_task(&task); 811 } else { 812 ShenandoahConcurrentEvacUpdateThreadTask<false> task(heap->workers()->active_workers()); 813 heap->workers()->run_task(&task); 814 } 815 } 816 817 void ShenandoahConcurrentGC::op_weak_refs() { 818 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 819 assert(heap->is_concurrent_weak_root_in_progress(), "Only during this phase"); 820 // Concurrent weak refs processing 821 ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_refs); 822 if (heap->gc_cause() == GCCause::_wb_breakpoint) { 823 ShenandoahBreakpoint::at_after_reference_processing_started(); 824 } 825 _generation->ref_processor()->process_references(ShenandoahPhaseTimings::conc_weak_refs, heap->workers(), true /* concurrent */); 826 } 827 828 class ShenandoahEvacUpdateCleanupOopStorageRootsClosure : public BasicOopIterateClosure { 829 private: 830 ShenandoahHeap* const _heap; 831 ShenandoahMarkingContext* const _mark_context; 832 bool _evac_in_progress; 833 Thread* const _thread; 834 835 public: 836 ShenandoahEvacUpdateCleanupOopStorageRootsClosure(); 837 void do_oop(oop* p); 838 void do_oop(narrowOop* p); 839 }; 840 841 ShenandoahEvacUpdateCleanupOopStorageRootsClosure::ShenandoahEvacUpdateCleanupOopStorageRootsClosure() : 842 _heap(ShenandoahHeap::heap()), 843 _mark_context(ShenandoahHeap::heap()->marking_context()), 844 _evac_in_progress(ShenandoahHeap::heap()->is_evacuation_in_progress()), 845 _thread(Thread::current()) { 846 } 847 848 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(oop* p) { 849 const oop obj = RawAccess<>::oop_load(p); 850 if (!CompressedOops::is_null(obj)) { 851 if (!_mark_context->is_marked(obj)) { 852 shenandoah_assert_generations_reconciled(); 853 if (_heap->is_in_active_generation(obj)) { 854 // Note: The obj is dead here. Do not touch it, just clear. 855 ShenandoahHeap::atomic_clear_oop(p, obj); 856 } 857 } else if (_evac_in_progress && _heap->in_collection_set(obj)) { 858 oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj); 859 if (resolved == obj) { 860 resolved = _heap->evacuate_object(obj, _thread); 861 } 862 shenandoah_assert_not_in_cset_except(p, resolved, _heap->cancelled_gc()); 863 ShenandoahHeap::atomic_update_oop(resolved, p, obj); 864 } 865 } 866 } 867 868 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(narrowOop* p) { 869 ShouldNotReachHere(); 870 } 871 872 class ShenandoahIsCLDAliveClosure : public CLDClosure { 873 public: 874 void do_cld(ClassLoaderData* cld) { 875 cld->is_alive(); 876 } 877 }; 878 879 class ShenandoahIsNMethodAliveClosure: public NMethodClosure { 880 public: 881 void do_nmethod(nmethod* n) { 882 n->is_unloading(); 883 } 884 }; 885 886 // This task not only evacuates/updates marked weak roots, but also "null" 887 // dead weak roots. 888 class ShenandoahConcurrentWeakRootsEvacUpdateTask : public WorkerTask { 889 private: 890 ShenandoahVMWeakRoots<true /*concurrent*/> _vm_roots; 891 892 // Roots related to concurrent class unloading 893 ShenandoahClassLoaderDataRoots<true /* concurrent */> 894 _cld_roots; 895 ShenandoahConcurrentNMethodIterator _nmethod_itr; 896 ShenandoahPhaseTimings::Phase _phase; 897 898 public: 899 ShenandoahConcurrentWeakRootsEvacUpdateTask(ShenandoahPhaseTimings::Phase phase) : 900 WorkerTask("Shenandoah Evacuate/Update Concurrent Weak Roots"), 901 _vm_roots(phase), 902 _cld_roots(phase, ShenandoahHeap::heap()->workers()->active_workers(), false /*heap iteration*/), 903 _nmethod_itr(ShenandoahCodeRoots::table()), 904 _phase(phase) {} 905 906 ~ShenandoahConcurrentWeakRootsEvacUpdateTask() { 907 // Notify runtime data structures of potentially dead oops 908 _vm_roots.report_num_dead(); 909 } 910 911 void work(uint worker_id) { 912 ShenandoahConcurrentWorkerSession worker_session(worker_id); 913 ShenandoahSuspendibleThreadSetJoiner sts_join; 914 { 915 ShenandoahEvacOOMScope oom; 916 // jni_roots and weak_roots are OopStorage backed roots, concurrent iteration 917 // may race against OopStorage::release() calls. 918 ShenandoahEvacUpdateCleanupOopStorageRootsClosure cl; 919 _vm_roots.oops_do(&cl, worker_id); 920 } 921 922 // If we are going to perform concurrent class unloading later on, we need to 923 // cleanup the weak oops in CLD and determinate nmethod's unloading state, so that we 924 // can cleanup immediate garbage sooner. 925 if (ShenandoahHeap::heap()->unload_classes()) { 926 // Applies ShenandoahIsCLDAlive closure to CLDs, native barrier will either null the 927 // CLD's holder or evacuate it. 928 { 929 ShenandoahIsCLDAliveClosure is_cld_alive; 930 _cld_roots.cld_do(&is_cld_alive, worker_id); 931 } 932 933 // Applies ShenandoahIsNMethodAliveClosure to registered nmethods. 934 // The closure calls nmethod->is_unloading(). The is_unloading 935 // state is cached, therefore, during concurrent class unloading phase, 936 // we will not touch the metadata of unloading nmethods 937 { 938 ShenandoahWorkerTimingsTracker timer(_phase, ShenandoahPhaseTimings::CodeCacheRoots, worker_id); 939 ShenandoahIsNMethodAliveClosure is_nmethod_alive; 940 _nmethod_itr.nmethods_do(&is_nmethod_alive); 941 } 942 } 943 } 944 }; 945 946 void ShenandoahConcurrentGC::op_weak_roots() { 947 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 948 assert(heap->is_concurrent_weak_root_in_progress(), "Only during this phase"); 949 // Concurrent weak root processing 950 { 951 ShenandoahTimingsTracker t(ShenandoahPhaseTimings::conc_weak_roots_work); 952 ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_roots_work); 953 ShenandoahConcurrentWeakRootsEvacUpdateTask task(ShenandoahPhaseTimings::conc_weak_roots_work); 954 heap->workers()->run_task(&task); 955 } 956 957 // Perform handshake to flush out dead oops 958 { 959 ShenandoahTimingsTracker t(ShenandoahPhaseTimings::conc_weak_roots_rendezvous); 960 heap->rendezvous_threads("Shenandoah Concurrent Weak Roots"); 961 } 962 // We can only toggle concurrent_weak_root_in_progress flag 963 // at a safepoint, so that mutators see a consistent 964 // value. The flag will be cleared at the next safepoint. 965 } 966 967 void ShenandoahConcurrentGC::op_class_unloading() { 968 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 969 assert (heap->is_concurrent_weak_root_in_progress() && 970 heap->unload_classes(), 971 "Checked by caller"); 972 heap->do_class_unloading(); 973 } 974 975 class ShenandoahEvacUpdateCodeCacheClosure : public NMethodClosure { 976 private: 977 BarrierSetNMethod* const _bs; 978 ShenandoahEvacuateUpdateMetadataClosure _cl; 979 980 public: 981 ShenandoahEvacUpdateCodeCacheClosure() : 982 _bs(BarrierSet::barrier_set()->barrier_set_nmethod()), 983 _cl() { 984 } 985 986 void do_nmethod(nmethod* n) { 987 ShenandoahNMethod* data = ShenandoahNMethod::gc_data(n); 988 ShenandoahReentrantLocker locker(data->lock()); 989 // Setup EvacOOM scope below reentrant lock to avoid deadlock with 990 // nmethod_entry_barrier 991 ShenandoahEvacOOMScope oom; 992 data->oops_do(&_cl, true/*fix relocation*/); 993 _bs->disarm(n); 994 } 995 }; 996 997 class ShenandoahConcurrentRootsEvacUpdateTask : public WorkerTask { 998 private: 999 ShenandoahPhaseTimings::Phase _phase; 1000 ShenandoahVMRoots<true /*concurrent*/> _vm_roots; 1001 ShenandoahClassLoaderDataRoots<true /*concurrent*/> 1002 _cld_roots; 1003 ShenandoahConcurrentNMethodIterator _nmethod_itr; 1004 1005 public: 1006 ShenandoahConcurrentRootsEvacUpdateTask(ShenandoahPhaseTimings::Phase phase) : 1007 WorkerTask("Shenandoah Evacuate/Update Concurrent Strong Roots"), 1008 _phase(phase), 1009 _vm_roots(phase), 1010 _cld_roots(phase, ShenandoahHeap::heap()->workers()->active_workers(), false /*heap iteration*/), 1011 _nmethod_itr(ShenandoahCodeRoots::table()) {} 1012 1013 void work(uint worker_id) { 1014 ShenandoahConcurrentWorkerSession worker_session(worker_id); 1015 { 1016 ShenandoahEvacOOMScope oom; 1017 { 1018 // vm_roots and weak_roots are OopStorage backed roots, concurrent iteration 1019 // may race against OopStorage::release() calls. 1020 ShenandoahContextEvacuateUpdateRootsClosure cl; 1021 _vm_roots.oops_do<ShenandoahContextEvacuateUpdateRootsClosure>(&cl, worker_id); 1022 } 1023 1024 { 1025 ShenandoahEvacuateUpdateMetadataClosure cl; 1026 CLDToOopClosure clds(&cl, ClassLoaderData::_claim_strong); 1027 _cld_roots.cld_do(&clds, worker_id); 1028 } 1029 } 1030 1031 // Cannot setup ShenandoahEvacOOMScope here, due to potential deadlock with nmethod_entry_barrier. 1032 if (!ShenandoahHeap::heap()->unload_classes()) { 1033 ShenandoahWorkerTimingsTracker timer(_phase, ShenandoahPhaseTimings::CodeCacheRoots, worker_id); 1034 ShenandoahEvacUpdateCodeCacheClosure cl; 1035 _nmethod_itr.nmethods_do(&cl); 1036 } 1037 } 1038 }; 1039 1040 void ShenandoahConcurrentGC::op_strong_roots() { 1041 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 1042 assert(heap->is_concurrent_strong_root_in_progress(), "Checked by caller"); 1043 ShenandoahConcurrentRootsEvacUpdateTask task(ShenandoahPhaseTimings::conc_strong_roots); 1044 heap->workers()->run_task(&task); 1045 heap->set_concurrent_strong_root_in_progress(false); 1046 } 1047 1048 void ShenandoahConcurrentGC::op_cleanup_early() { 1049 ShenandoahHeap::heap()->free_set()->recycle_trash(); 1050 } 1051 1052 void ShenandoahConcurrentGC::op_evacuate() { 1053 ShenandoahHeap::heap()->evacuate_collection_set(true /*concurrent*/); 1054 } 1055 1056 void ShenandoahConcurrentGC::op_init_updaterefs() { 1057 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 1058 heap->set_evacuation_in_progress(false); 1059 heap->set_concurrent_weak_root_in_progress(false); 1060 heap->prepare_update_heap_references(true /*concurrent*/); 1061 heap->set_update_refs_in_progress(true); 1062 if (ShenandoahVerify) { 1063 heap->verifier()->verify_before_updaterefs(); 1064 } 1065 if (ShenandoahPacing) { 1066 heap->pacer()->setup_for_updaterefs(); 1067 } 1068 } 1069 1070 void ShenandoahConcurrentGC::op_updaterefs() { 1071 ShenandoahHeap::heap()->update_heap_references(true /*concurrent*/); 1072 } 1073 1074 class ShenandoahUpdateThreadClosure : public HandshakeClosure { 1075 private: 1076 // This closure runs when thread is stopped for handshake, which means 1077 // we can use non-concurrent closure here, as long as it only updates 1078 // locations modified by the thread itself, i.e. stack locations. 1079 ShenandoahNonConcUpdateRefsClosure _cl; 1080 public: 1081 ShenandoahUpdateThreadClosure(); 1082 void do_thread(Thread* thread); 1083 }; 1084 1085 ShenandoahUpdateThreadClosure::ShenandoahUpdateThreadClosure() : 1086 HandshakeClosure("Shenandoah Update Thread Roots") { 1087 } 1088 1089 void ShenandoahUpdateThreadClosure::do_thread(Thread* thread) { 1090 if (thread->is_Java_thread()) { 1091 JavaThread* jt = JavaThread::cast(thread); 1092 ResourceMark rm; 1093 jt->oops_do(&_cl, nullptr); 1094 } 1095 } 1096 1097 void ShenandoahConcurrentGC::op_update_thread_roots() { 1098 ShenandoahUpdateThreadClosure cl; 1099 Handshake::execute(&cl); 1100 } 1101 1102 void ShenandoahConcurrentGC::op_final_updaterefs() { 1103 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 1104 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint"); 1105 assert(!heap->_update_refs_iterator.has_next(), "Should have finished update references"); 1106 1107 heap->finish_concurrent_roots(); 1108 1109 // Clear cancelled GC, if set. On cancellation path, the block before would handle 1110 // everything. 1111 if (heap->cancelled_gc()) { 1112 heap->clear_cancelled_gc(true /* clear oom handler */); 1113 } 1114 1115 // Has to be done before cset is clear 1116 if (ShenandoahVerify) { 1117 heap->verifier()->verify_roots_in_to_space(); 1118 } 1119 1120 // If we are running in generational mode and this is an aging cycle, this will also age active 1121 // regions that haven't been used for allocation. 1122 heap->update_heap_region_states(true /*concurrent*/); 1123 1124 heap->set_update_refs_in_progress(false); 1125 heap->set_has_forwarded_objects(false); 1126 1127 if (heap->mode()->is_generational() && heap->is_concurrent_old_mark_in_progress()) { 1128 // When the SATB barrier is left on to support concurrent old gen mark, it may pick up writes to 1129 // objects in the collection set. After those objects are evacuated, the pointers in the 1130 // SATB are no longer safe. Once we have finished update references, we are guaranteed that 1131 // no more writes to the collection set are possible. 1132 // 1133 // This will transfer any old pointers in _active_ regions from the SATB to the old gen 1134 // mark queues. All other pointers will be discarded. This would also discard any pointers 1135 // in old regions that were included in a mixed evacuation. We aren't using the SATB filter 1136 // methods here because we cannot control when they execute. If the SATB filter runs _after_ 1137 // a region has been recycled, we will not be able to detect the bad pointer. 1138 // 1139 // We are not concerned about skipping this step in abbreviated cycles because regions 1140 // with no live objects cannot have been written to and so cannot have entries in the SATB 1141 // buffers. 1142 heap->old_generation()->transfer_pointers_from_satb(); 1143 1144 // Aging_cycle is only relevant during evacuation cycle for individual objects and during final mark for 1145 // entire regions. Both of these relevant operations occur before final update refs. 1146 ShenandoahGenerationalHeap::heap()->set_aging_cycle(false); 1147 } 1148 1149 if (ShenandoahVerify) { 1150 heap->verifier()->verify_after_updaterefs(); 1151 } 1152 1153 if (VerifyAfterGC) { 1154 Universe::verify(); 1155 } 1156 1157 heap->rebuild_free_set(true /*concurrent*/); 1158 } 1159 1160 void ShenandoahConcurrentGC::op_final_roots() { 1161 1162 ShenandoahHeap *heap = ShenandoahHeap::heap(); 1163 heap->set_concurrent_weak_root_in_progress(false); 1164 heap->set_evacuation_in_progress(false); 1165 1166 if (heap->mode()->is_generational()) { 1167 // If the cycle was shortened for having enough immediate garbage, this could be 1168 // the last GC safepoint before concurrent marking of old resumes. We must be sure 1169 // that old mark threads don't see any pointers to garbage in the SATB buffers. 1170 if (heap->is_concurrent_old_mark_in_progress()) { 1171 heap->old_generation()->transfer_pointers_from_satb(); 1172 } 1173 1174 if (!_generation->is_old()) { 1175 ShenandoahGenerationalHeap::heap()->update_region_ages(_generation->complete_marking_context()); 1176 } 1177 } 1178 } 1179 1180 void ShenandoahConcurrentGC::op_cleanup_complete() { 1181 ShenandoahHeap::heap()->free_set()->recycle_trash(); 1182 } 1183 1184 bool ShenandoahConcurrentGC::check_cancellation_and_abort(ShenandoahDegenPoint point) { 1185 if (ShenandoahHeap::heap()->cancelled_gc()) { 1186 _degen_point = point; 1187 return true; 1188 } 1189 return false; 1190 } 1191 1192 const char* ShenandoahConcurrentGC::init_mark_event_message() const { 1193 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 1194 assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here"); 1195 if (heap->unload_classes()) { 1196 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Init Mark", " (unload classes)"); 1197 } else { 1198 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Init Mark", ""); 1199 } 1200 } 1201 1202 const char* ShenandoahConcurrentGC::final_mark_event_message() const { 1203 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 1204 assert(!heap->has_forwarded_objects() || heap->is_concurrent_old_mark_in_progress(), 1205 "Should not have forwarded objects during final mark, unless old gen concurrent mark is running"); 1206 1207 if (heap->unload_classes()) { 1208 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Final Mark", " (unload classes)"); 1209 } else { 1210 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Final Mark", ""); 1211 } 1212 } 1213 1214 const char* ShenandoahConcurrentGC::conc_mark_event_message() const { 1215 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 1216 assert(!heap->has_forwarded_objects() || heap->is_concurrent_old_mark_in_progress(), 1217 "Should not have forwarded objects concurrent mark, unless old gen concurrent mark is running"); 1218 if (heap->unload_classes()) { 1219 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent marking", " (unload classes)"); 1220 } else { 1221 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent marking", ""); 1222 } 1223 } 1224 1225 const char* ShenandoahConcurrentGC::conc_reset_event_message() const { 1226 if (ShenandoahHeap::heap()->unload_classes()) { 1227 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent reset", " (unload classes)"); 1228 } else { 1229 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent reset", ""); 1230 } 1231 } 1232 1233 const char* ShenandoahConcurrentGC::final_roots_event_message() const { 1234 if (ShenandoahHeap::heap()->unload_classes()) { 1235 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Final Roots", " (unload classes)"); 1236 } else { 1237 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Final Roots", ""); 1238 } 1239 } 1240 1241 const char* ShenandoahConcurrentGC::conc_weak_refs_event_message() const { 1242 if (ShenandoahHeap::heap()->unload_classes()) { 1243 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent weak references", " (unload classes)"); 1244 } else { 1245 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent weak references", ""); 1246 } 1247 } 1248 1249 const char* ShenandoahConcurrentGC::conc_weak_roots_event_message() const { 1250 if (ShenandoahHeap::heap()->unload_classes()) { 1251 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent weak roots", " (unload classes)"); 1252 } else { 1253 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent weak roots", ""); 1254 } 1255 } 1256 1257 const char* ShenandoahConcurrentGC::conc_cleanup_event_message() const { 1258 if (ShenandoahHeap::heap()->unload_classes()) { 1259 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent cleanup", " (unload classes)"); 1260 } else { 1261 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent cleanup", ""); 1262 } 1263 }