1 /* 2 * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2021, 2022, Red Hat, Inc. All rights reserved. 4 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. 5 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 6 * 7 * This code is free software; you can redistribute it and/or modify it 8 * under the terms of the GNU General Public License version 2 only, as 9 * published by the Free Software Foundation. 10 * 11 * This code is distributed in the hope that it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 14 * version 2 for more details (a copy is included in the LICENSE file that 15 * accompanied this code). 16 * 17 * You should have received a copy of the GNU General Public License version 18 * 2 along with this work; if not, write to the Free Software Foundation, 19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 20 * 21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 22 * or visit www.oracle.com if you need additional information or have any 23 * questions. 24 * 25 */ 26 27 #include "precompiled.hpp" 28 29 #include "gc/shared/barrierSetNMethod.hpp" 30 #include "gc/shared/collectorCounters.hpp" 31 #include "gc/shared/continuationGCSupport.inline.hpp" 32 #include "gc/shenandoah/shenandoahBreakpoint.hpp" 33 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp" 34 #include "gc/shenandoah/shenandoahConcurrentGC.hpp" 35 #include "gc/shenandoah/shenandoahFreeSet.hpp" 36 #include "gc/shenandoah/shenandoahGeneration.hpp" 37 #include "gc/shenandoah/shenandoahGenerationalHeap.hpp" 38 #include "gc/shenandoah/shenandoahOldGeneration.hpp" 39 #include "gc/shenandoah/shenandoahYoungGeneration.hpp" 40 #include "gc/shenandoah/shenandoahLock.hpp" 41 #include "gc/shenandoah/shenandoahMark.inline.hpp" 42 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp" 43 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp" 44 #include "gc/shenandoah/shenandoahPhaseTimings.hpp" 45 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp" 46 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp" 47 #include "gc/shenandoah/shenandoahStackWatermark.hpp" 48 #include "gc/shenandoah/shenandoahUtils.hpp" 49 #include "gc/shenandoah/shenandoahVerifier.hpp" 50 #include "gc/shenandoah/shenandoahVMOperations.hpp" 51 #include "gc/shenandoah/shenandoahWorkGroup.hpp" 52 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp" 53 #include "memory/allocation.hpp" 54 #include "prims/jvmtiTagMap.hpp" 55 #include "runtime/vmThread.hpp" 56 #include "utilities/events.hpp" 57 58 // Breakpoint support 59 class ShenandoahBreakpointGCScope : public StackObj { 60 private: 61 const GCCause::Cause _cause; 62 public: 63 ShenandoahBreakpointGCScope(GCCause::Cause cause) : _cause(cause) { 64 if (cause == GCCause::_wb_breakpoint) { 65 ShenandoahBreakpoint::start_gc(); 66 ShenandoahBreakpoint::at_before_gc(); 67 } 68 } 69 70 ~ShenandoahBreakpointGCScope() { 71 if (_cause == GCCause::_wb_breakpoint) { 72 ShenandoahBreakpoint::at_after_gc(); 73 } 74 } 75 }; 76 77 class ShenandoahBreakpointMarkScope : public StackObj { 78 private: 79 const GCCause::Cause _cause; 80 public: 81 ShenandoahBreakpointMarkScope(GCCause::Cause cause) : _cause(cause) { 82 if (_cause == GCCause::_wb_breakpoint) { 83 ShenandoahBreakpoint::at_after_marking_started(); 84 } 85 } 86 87 ~ShenandoahBreakpointMarkScope() { 88 if (_cause == GCCause::_wb_breakpoint) { 89 ShenandoahBreakpoint::at_before_marking_completed(); 90 } 91 } 92 }; 93 94 ShenandoahConcurrentGC::ShenandoahConcurrentGC(ShenandoahGeneration* generation, bool do_old_gc_bootstrap) : 95 _mark(generation), 96 _degen_point(ShenandoahDegenPoint::_degenerated_unset), 97 _abbreviated(false), 98 _do_old_gc_bootstrap(do_old_gc_bootstrap), 99 _generation(generation) { 100 } 101 102 ShenandoahGC::ShenandoahDegenPoint ShenandoahConcurrentGC::degen_point() const { 103 return _degen_point; 104 } 105 106 bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) { 107 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 108 109 ShenandoahBreakpointGCScope breakpoint_gc_scope(cause); 110 111 // Reset for upcoming marking 112 entry_reset(); 113 114 // Start initial mark under STW 115 vmop_entry_init_mark(); 116 117 { 118 ShenandoahBreakpointMarkScope breakpoint_mark_scope(cause); 119 120 // Reset task queue stats here, rather than in mark_concurrent_roots, 121 // because remembered set scan will `push` oops into the queues and 122 // resetting after this happens will lose those counts. 123 TASKQUEUE_STATS_ONLY(_mark.task_queues()->reset_taskqueue_stats()); 124 125 // Concurrent remembered set scanning 126 entry_scan_remembered_set(); 127 // TODO: When RS scanning yields, we will need a check_cancellation_and_abort() degeneration point here. 128 129 // Concurrent mark roots 130 entry_mark_roots(); 131 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_roots)) { 132 return false; 133 } 134 135 // Continue concurrent mark 136 entry_mark(); 137 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_mark)) { 138 return false; 139 } 140 } 141 142 // Complete marking under STW, and start evacuation 143 vmop_entry_final_mark(); 144 145 // If the GC was cancelled before final mark, nothing happens on the safepoint. We are still 146 // in the marking phase and must resume the degenerated cycle from there. If the GC was cancelled 147 // after final mark, then we've entered the evacuation phase and must resume the degenerated cycle 148 // from that phase. 149 if (_generation->is_concurrent_mark_in_progress()) { 150 bool cancelled = check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_mark); 151 assert(cancelled, "GC must have been cancelled between concurrent and final mark"); 152 return false; 153 } 154 155 // Concurrent stack processing 156 if (heap->is_evacuation_in_progress()) { 157 entry_thread_roots(); 158 } 159 160 // Process weak roots that might still point to regions that would be broken by cleanup 161 if (heap->is_concurrent_weak_root_in_progress()) { 162 entry_weak_refs(); 163 entry_weak_roots(); 164 } 165 166 // Final mark might have reclaimed some immediate garbage, kick cleanup to reclaim 167 // the space. This would be the last action if there is nothing to evacuate. Note that 168 // we will not age young-gen objects in the case that we skip evacuation. 169 entry_cleanup_early(); 170 171 heap->free_set()->log_status_under_lock(); 172 173 // Perform concurrent class unloading 174 if (heap->unload_classes() && 175 heap->is_concurrent_weak_root_in_progress()) { 176 entry_class_unloading(); 177 } 178 179 // Processing strong roots 180 // This may be skipped if there is nothing to update/evacuate. 181 // If so, strong_root_in_progress would be unset. 182 if (heap->is_concurrent_strong_root_in_progress()) { 183 entry_strong_roots(); 184 } 185 186 // Continue the cycle with evacuation and optional update-refs. 187 // This may be skipped if there is nothing to evacuate. 188 // If so, evac_in_progress would be unset by collection set preparation code. 189 if (heap->is_evacuation_in_progress()) { 190 // Concurrently evacuate 191 entry_evacuate(); 192 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_evac)) { 193 return false; 194 } 195 } 196 197 if (heap->has_forwarded_objects()) { 198 // Perform update-refs phase. 199 vmop_entry_init_updaterefs(); 200 entry_updaterefs(); 201 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_updaterefs)) { 202 return false; 203 } 204 205 // Concurrent update thread roots 206 entry_update_thread_roots(); 207 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_updaterefs)) { 208 return false; 209 } 210 211 vmop_entry_final_updaterefs(); 212 213 // Update references freed up collection set, kick the cleanup to reclaim the space. 214 entry_cleanup_complete(); 215 } else { 216 // We chose not to evacuate because we found sufficient immediate garbage. Note that we 217 // do not check for cancellation here because, at this point, the cycle is effectively 218 // complete. If the cycle has been cancelled here, the control thread will detect it 219 // on its next iteration and run a degenerated young cycle. 220 vmop_entry_final_roots(); 221 _abbreviated = true; 222 } 223 224 // We defer generation resizing actions until after cset regions have been recycled. We do this even following an 225 // abbreviated cycle. 226 if (heap->mode()->is_generational()) { 227 ShenandoahGenerationalHeap::heap()->complete_concurrent_cycle(); 228 } 229 return true; 230 } 231 232 void ShenandoahConcurrentGC::vmop_entry_init_mark() { 233 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 234 TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters()); 235 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_mark_gross); 236 237 heap->try_inject_alloc_failure(); 238 VM_ShenandoahInitMark op(this); 239 VMThread::execute(&op); // jump to entry_init_mark() under safepoint 240 } 241 242 void ShenandoahConcurrentGC::vmop_entry_final_mark() { 243 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 244 TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters()); 245 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_mark_gross); 246 247 heap->try_inject_alloc_failure(); 248 VM_ShenandoahFinalMarkStartEvac op(this); 249 VMThread::execute(&op); // jump to entry_final_mark under safepoint 250 } 251 252 void ShenandoahConcurrentGC::vmop_entry_init_updaterefs() { 253 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 254 TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters()); 255 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_update_refs_gross); 256 257 heap->try_inject_alloc_failure(); 258 VM_ShenandoahInitUpdateRefs op(this); 259 VMThread::execute(&op); 260 } 261 262 void ShenandoahConcurrentGC::vmop_entry_final_updaterefs() { 263 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 264 TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters()); 265 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_update_refs_gross); 266 267 heap->try_inject_alloc_failure(); 268 VM_ShenandoahFinalUpdateRefs op(this); 269 VMThread::execute(&op); 270 } 271 272 void ShenandoahConcurrentGC::vmop_entry_final_roots() { 273 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 274 TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters()); 275 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_roots_gross); 276 277 // This phase does not use workers, no need for setup 278 heap->try_inject_alloc_failure(); 279 VM_ShenandoahFinalRoots op(this); 280 VMThread::execute(&op); 281 } 282 283 void ShenandoahConcurrentGC::entry_init_mark() { 284 const char* msg = init_mark_event_message(); 285 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::init_mark); 286 EventMark em("%s", msg); 287 288 ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(), 289 ShenandoahWorkerPolicy::calc_workers_for_init_marking(), 290 "init marking"); 291 292 op_init_mark(); 293 } 294 295 void ShenandoahConcurrentGC::entry_final_mark() { 296 const char* msg = final_mark_event_message(); 297 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_mark); 298 EventMark em("%s", msg); 299 300 ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(), 301 ShenandoahWorkerPolicy::calc_workers_for_final_marking(), 302 "final marking"); 303 304 op_final_mark(); 305 } 306 307 void ShenandoahConcurrentGC::entry_init_updaterefs() { 308 static const char* msg = "Pause Init Update Refs"; 309 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::init_update_refs); 310 EventMark em("%s", msg); 311 312 // No workers used in this phase, no setup required 313 op_init_updaterefs(); 314 } 315 316 void ShenandoahConcurrentGC::entry_final_updaterefs() { 317 static const char* msg = "Pause Final Update Refs"; 318 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_update_refs); 319 EventMark em("%s", msg); 320 321 ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(), 322 ShenandoahWorkerPolicy::calc_workers_for_final_update_ref(), 323 "final reference update"); 324 325 op_final_updaterefs(); 326 } 327 328 void ShenandoahConcurrentGC::entry_final_roots() { 329 static const char* msg = "Pause Final Roots"; 330 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_roots); 331 EventMark em("%s", msg); 332 333 op_final_roots(); 334 } 335 336 void ShenandoahConcurrentGC::entry_reset() { 337 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 338 heap->try_inject_alloc_failure(); 339 340 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 341 { 342 static const char* msg = "Concurrent reset"; 343 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_reset); 344 EventMark em("%s", msg); 345 346 ShenandoahWorkerScope scope(heap->workers(), 347 ShenandoahWorkerPolicy::calc_workers_for_conc_reset(), 348 msg); 349 op_reset(); 350 } 351 352 if (_do_old_gc_bootstrap) { 353 static const char* msg = "Concurrent reset (OLD)"; 354 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_reset_old); 355 ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(), 356 ShenandoahWorkerPolicy::calc_workers_for_conc_reset(), 357 msg); 358 EventMark em("%s", msg); 359 360 heap->old_generation()->prepare_gc(); 361 } 362 } 363 364 void ShenandoahConcurrentGC::entry_scan_remembered_set() { 365 if (_generation->is_young()) { 366 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 367 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 368 const char* msg = "Concurrent remembered set scanning"; 369 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::init_scan_rset); 370 EventMark em("%s", msg); 371 372 ShenandoahWorkerScope scope(heap->workers(), 373 ShenandoahWorkerPolicy::calc_workers_for_rs_scanning(), 374 msg); 375 376 heap->try_inject_alloc_failure(); 377 _generation->scan_remembered_set(true /* is_concurrent */); 378 } 379 } 380 381 void ShenandoahConcurrentGC::entry_mark_roots() { 382 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 383 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 384 const char* msg = "Concurrent marking roots"; 385 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_mark_roots); 386 EventMark em("%s", msg); 387 388 ShenandoahWorkerScope scope(heap->workers(), 389 ShenandoahWorkerPolicy::calc_workers_for_conc_marking(), 390 "concurrent marking roots"); 391 392 heap->try_inject_alloc_failure(); 393 op_mark_roots(); 394 } 395 396 void ShenandoahConcurrentGC::entry_mark() { 397 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 398 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 399 const char* msg = conc_mark_event_message(); 400 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_mark); 401 EventMark em("%s", msg); 402 403 ShenandoahWorkerScope scope(heap->workers(), 404 ShenandoahWorkerPolicy::calc_workers_for_conc_marking(), 405 "concurrent marking"); 406 407 heap->try_inject_alloc_failure(); 408 op_mark(); 409 } 410 411 void ShenandoahConcurrentGC::entry_thread_roots() { 412 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 413 static const char* msg = "Concurrent thread roots"; 414 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_thread_roots); 415 EventMark em("%s", msg); 416 417 ShenandoahWorkerScope scope(heap->workers(), 418 ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(), 419 msg); 420 421 heap->try_inject_alloc_failure(); 422 op_thread_roots(); 423 } 424 425 void ShenandoahConcurrentGC::entry_weak_refs() { 426 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 427 static const char* msg = "Concurrent weak references"; 428 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_weak_refs); 429 EventMark em("%s", msg); 430 431 ShenandoahWorkerScope scope(heap->workers(), 432 ShenandoahWorkerPolicy::calc_workers_for_conc_refs_processing(), 433 "concurrent weak references"); 434 435 heap->try_inject_alloc_failure(); 436 op_weak_refs(); 437 } 438 439 void ShenandoahConcurrentGC::entry_weak_roots() { 440 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 441 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 442 static const char* msg = "Concurrent weak roots"; 443 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_weak_roots); 444 EventMark em("%s", msg); 445 446 ShenandoahWorkerScope scope(heap->workers(), 447 ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(), 448 "concurrent weak root"); 449 450 heap->try_inject_alloc_failure(); 451 op_weak_roots(); 452 } 453 454 void ShenandoahConcurrentGC::entry_class_unloading() { 455 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 456 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 457 static const char* msg = "Concurrent class unloading"; 458 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_class_unload); 459 EventMark em("%s", msg); 460 461 ShenandoahWorkerScope scope(heap->workers(), 462 ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(), 463 "concurrent class unloading"); 464 465 heap->try_inject_alloc_failure(); 466 op_class_unloading(); 467 } 468 469 void ShenandoahConcurrentGC::entry_strong_roots() { 470 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 471 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 472 static const char* msg = "Concurrent strong roots"; 473 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_strong_roots); 474 EventMark em("%s", msg); 475 476 ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_strong_roots); 477 478 ShenandoahWorkerScope scope(heap->workers(), 479 ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(), 480 "concurrent strong root"); 481 482 heap->try_inject_alloc_failure(); 483 op_strong_roots(); 484 } 485 486 void ShenandoahConcurrentGC::entry_cleanup_early() { 487 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 488 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 489 static const char* msg = "Concurrent cleanup"; 490 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_cleanup_early, true /* log_heap_usage */); 491 EventMark em("%s", msg); 492 493 // This phase does not use workers, no need for setup 494 heap->try_inject_alloc_failure(); 495 op_cleanup_early(); 496 } 497 498 void ShenandoahConcurrentGC::entry_evacuate() { 499 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 500 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 501 502 static const char* msg = "Concurrent evacuation"; 503 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_evac); 504 EventMark em("%s", msg); 505 506 ShenandoahWorkerScope scope(heap->workers(), 507 ShenandoahWorkerPolicy::calc_workers_for_conc_evac(), 508 "concurrent evacuation"); 509 510 heap->try_inject_alloc_failure(); 511 op_evacuate(); 512 } 513 514 void ShenandoahConcurrentGC::entry_update_thread_roots() { 515 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 516 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 517 518 static const char* msg = "Concurrent update thread roots"; 519 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_update_thread_roots); 520 EventMark em("%s", msg); 521 522 // No workers used in this phase, no setup required 523 heap->try_inject_alloc_failure(); 524 op_update_thread_roots(); 525 } 526 527 void ShenandoahConcurrentGC::entry_updaterefs() { 528 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 529 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 530 static const char* msg = "Concurrent update references"; 531 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_update_refs); 532 EventMark em("%s", msg); 533 534 ShenandoahWorkerScope scope(heap->workers(), 535 ShenandoahWorkerPolicy::calc_workers_for_conc_update_ref(), 536 "concurrent reference update"); 537 538 heap->try_inject_alloc_failure(); 539 op_updaterefs(); 540 } 541 542 void ShenandoahConcurrentGC::entry_cleanup_complete() { 543 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 544 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 545 static const char* msg = "Concurrent cleanup"; 546 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_cleanup_complete, true /* log_heap_usage */); 547 EventMark em("%s", msg); 548 549 // This phase does not use workers, no need for setup 550 heap->try_inject_alloc_failure(); 551 op_cleanup_complete(); 552 } 553 554 void ShenandoahConcurrentGC::op_reset() { 555 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 556 if (ShenandoahPacing) { 557 heap->pacer()->setup_for_reset(); 558 } 559 _generation->prepare_gc(); 560 } 561 562 class ShenandoahInitMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure { 563 private: 564 ShenandoahMarkingContext* const _ctx; 565 public: 566 ShenandoahInitMarkUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {} 567 568 void heap_region_do(ShenandoahHeapRegion* r) { 569 assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index()); 570 if (r->is_active()) { 571 // Check if region needs updating its TAMS. We have updated it already during concurrent 572 // reset, so it is very likely we don't need to do another write here. Since most regions 573 // are not "active", this path is relatively rare. 574 if (_ctx->top_at_mark_start(r) != r->top()) { 575 _ctx->capture_top_at_mark_start(r); 576 } 577 } else { 578 assert(_ctx->top_at_mark_start(r) == r->top(), 579 "Region " SIZE_FORMAT " should already have correct TAMS", r->index()); 580 } 581 } 582 583 bool is_thread_safe() { return true; } 584 }; 585 586 void ShenandoahConcurrentGC::start_mark() { 587 _mark.start_mark(); 588 } 589 590 void ShenandoahConcurrentGC::op_init_mark() { 591 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 592 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint"); 593 assert(Thread::current()->is_VM_thread(), "can only do this in VMThread"); 594 595 assert(_generation->is_bitmap_clear(), "need clear marking bitmap"); 596 assert(!_generation->is_mark_complete(), "should not be complete"); 597 assert(!heap->has_forwarded_objects(), "No forwarded objects on this path"); 598 599 600 if (heap->mode()->is_generational()) { 601 if (_generation->is_young() || (_generation->is_global() && ShenandoahVerify)) { 602 // The current implementation of swap_remembered_set() copies the write-card-table 603 // to the read-card-table. The remembered sets are also swapped for GLOBAL collections 604 // so that the verifier works with the correct copy of the card table when verifying. 605 // TODO: This path should not really depend on ShenandoahVerify. 606 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_swap_rset); 607 _generation->swap_remembered_set(); 608 } 609 610 if (_generation->is_global()) { 611 heap->old_generation()->cancel_gc(); 612 } else if (heap->is_concurrent_old_mark_in_progress()) { 613 // Purge the SATB buffers, transferring any valid, old pointers to the 614 // old generation mark queue. Any pointers in a young region will be 615 // abandoned. 616 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_transfer_satb); 617 heap->old_generation()->transfer_pointers_from_satb(); 618 } 619 } 620 621 if (ShenandoahVerify) { 622 heap->verifier()->verify_before_concmark(); 623 } 624 625 if (VerifyBeforeGC) { 626 Universe::verify(); 627 } 628 629 _generation->set_concurrent_mark_in_progress(true); 630 631 start_mark(); 632 633 if (_do_old_gc_bootstrap) { 634 shenandoah_assert_generational(); 635 // Update region state for both young and old regions 636 // TODO: We should be able to pull this out of the safepoint for the bootstrap 637 // cycle. The top of an old region will only move when a GC cycle evacuates 638 // objects into it. When we start an old cycle, we know that nothing can touch 639 // the top of old regions. 640 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states); 641 ShenandoahInitMarkUpdateRegionStateClosure cl; 642 heap->parallel_heap_region_iterate(&cl); 643 heap->old_generation()->ref_processor()->reset_thread_locals(); 644 } else { 645 // Update region state for only young regions 646 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states); 647 ShenandoahInitMarkUpdateRegionStateClosure cl; 648 _generation->parallel_heap_region_iterate(&cl); 649 } 650 651 // Weak reference processing 652 ShenandoahReferenceProcessor* rp = _generation->ref_processor(); 653 rp->reset_thread_locals(); 654 rp->set_soft_reference_policy(heap->soft_ref_policy()->should_clear_all_soft_refs()); 655 656 // Make above changes visible to worker threads 657 OrderAccess::fence(); 658 659 // Arm nmethods for concurrent mark 660 ShenandoahCodeRoots::arm_nmethods_for_mark(); 661 662 ShenandoahStackWatermark::change_epoch_id(); 663 if (ShenandoahPacing) { 664 heap->pacer()->setup_for_mark(); 665 } 666 } 667 668 void ShenandoahConcurrentGC::op_mark_roots() { 669 _mark.mark_concurrent_roots(); 670 } 671 672 void ShenandoahConcurrentGC::op_mark() { 673 _mark.concurrent_mark(); 674 } 675 676 void ShenandoahConcurrentGC::op_final_mark() { 677 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 678 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint"); 679 assert(!heap->has_forwarded_objects(), "No forwarded objects on this path"); 680 681 if (ShenandoahVerify) { 682 heap->verifier()->verify_roots_no_forwarded(); 683 } 684 685 if (!heap->cancelled_gc()) { 686 _mark.finish_mark(); 687 assert(!heap->cancelled_gc(), "STW mark cannot OOM"); 688 689 // Notify JVMTI that the tagmap table will need cleaning. 690 JvmtiTagMap::set_needs_cleaning(); 691 692 // The collection set is chosen by prepare_regions_and_collection_set(). 693 // 694 // TODO: Under severe memory overload conditions that can be checked here, we may want to limit 695 // the inclusion of old-gen candidates within the collection set. This would allow us to prioritize efforts on 696 // evacuating young-gen, This remediation is most appropriate when old-gen availability is very high (so there 697 // are negligible negative impacts from delaying completion of old-gen evacuation) and when young-gen collections 698 // are "under duress" (as signalled by very low availability of memory within young-gen, indicating that/ young-gen 699 // collections are not triggering frequently enough). 700 _generation->prepare_regions_and_collection_set(true /*concurrent*/); 701 702 // Upon return from prepare_regions_and_collection_set(), certain parameters have been established to govern the 703 // evacuation efforts that are about to begin. In particular: 704 // 705 // heap->get_promoted_reserve() represents the amount of memory within old-gen's available memory that has 706 // been set aside to hold objects promoted from young-gen memory. This represents an estimated percentage 707 // of the live young-gen memory within the collection set. If there is more data ready to be promoted than 708 // can fit within this reserve, the promotion of some objects will be deferred until a subsequent evacuation 709 // pass. 710 // 711 // heap->get_old_evac_reserve() represents the amount of memory within old-gen's available memory that has been 712 // set aside to hold objects evacuated from the old-gen collection set. 713 // 714 // heap->get_young_evac_reserve() represents the amount of memory within young-gen's available memory that has 715 // been set aside to hold objects evacuated from the young-gen collection set. Conservatively, this value 716 // equals the entire amount of live young-gen memory within the collection set, even though some of this memory 717 // will likely be promoted. 718 719 // Has to be done after cset selection 720 heap->prepare_concurrent_roots(); 721 722 if (!heap->collection_set()->is_empty() || has_in_place_promotions(heap)) { 723 // Even if the collection set is empty, we need to do evacuation if there are regions to be promoted in place. 724 // Concurrent evacuation takes responsibility for registering objects and setting the remembered set cards to dirty. 725 726 LogTarget(Debug, gc, cset) lt; 727 if (lt.is_enabled()) { 728 ResourceMark rm; 729 LogStream ls(lt); 730 heap->collection_set()->print_on(&ls); 731 } 732 733 if (ShenandoahVerify) { 734 heap->verifier()->verify_before_evacuation(); 735 } 736 737 // TODO: Do we need to set this if we are only promoting regions in place? We don't need the barriers on for that. 738 heap->set_evacuation_in_progress(true); 739 740 // Verify before arming for concurrent processing. 741 // Otherwise, verification can trigger stack processing. 742 if (ShenandoahVerify) { 743 heap->verifier()->verify_during_evacuation(); 744 } 745 746 // Generational mode may promote objects in place during the evacuation phase. 747 // If that is the only reason we are evacuating, we don't need to update references 748 // and there will be no forwarded objects on the heap. 749 heap->set_has_forwarded_objects(!heap->collection_set()->is_empty()); 750 751 // Arm nmethods/stack for concurrent processing 752 if (!heap->collection_set()->is_empty()) { 753 // Iff objects will be evaluated, arm the nmethod barriers. These will be disarmed 754 // under the same condition (established in prepare_concurrent_roots) after strong 755 // root evacuation has completed (see op_strong_roots). 756 ShenandoahCodeRoots::arm_nmethods_for_evac(); 757 ShenandoahStackWatermark::change_epoch_id(); 758 } 759 760 if (ShenandoahPacing) { 761 heap->pacer()->setup_for_evac(); 762 } 763 } else { 764 if (ShenandoahVerify) { 765 heap->verifier()->verify_after_concmark(); 766 } 767 768 if (VerifyAfterGC) { 769 Universe::verify(); 770 } 771 } 772 } 773 } 774 775 bool ShenandoahConcurrentGC::has_in_place_promotions(ShenandoahHeap* heap) { 776 return heap->mode()->is_generational() && heap->old_generation()->has_in_place_promotions(); 777 } 778 779 template<bool GENERATIONAL> 780 class ShenandoahConcurrentEvacThreadClosure : public ThreadClosure { 781 private: 782 OopClosure* const _oops; 783 public: 784 explicit ShenandoahConcurrentEvacThreadClosure(OopClosure* oops) : _oops(oops) {} 785 786 void do_thread(Thread* thread) override { 787 JavaThread* const jt = JavaThread::cast(thread); 788 StackWatermarkSet::finish_processing(jt, _oops, StackWatermarkKind::gc); 789 if (GENERATIONAL) { 790 ShenandoahThreadLocalData::enable_plab_promotions(thread); 791 } 792 } 793 }; 794 795 template<bool GENERATIONAL> 796 class ShenandoahConcurrentEvacUpdateThreadTask : public WorkerTask { 797 private: 798 ShenandoahJavaThreadsIterator _java_threads; 799 800 public: 801 explicit ShenandoahConcurrentEvacUpdateThreadTask(uint n_workers) : 802 WorkerTask("Shenandoah Evacuate/Update Concurrent Thread Roots"), 803 _java_threads(ShenandoahPhaseTimings::conc_thread_roots, n_workers) { 804 } 805 806 void work(uint worker_id) override { 807 if (GENERATIONAL) { 808 Thread* worker_thread = Thread::current(); 809 ShenandoahThreadLocalData::enable_plab_promotions(worker_thread); 810 } 811 812 // ShenandoahEvacOOMScope has to be setup by ShenandoahContextEvacuateUpdateRootsClosure. 813 // Otherwise, may deadlock with watermark lock 814 ShenandoahContextEvacuateUpdateRootsClosure oops_cl; 815 ShenandoahConcurrentEvacThreadClosure<GENERATIONAL> thr_cl(&oops_cl); 816 _java_threads.threads_do(&thr_cl, worker_id); 817 } 818 }; 819 820 void ShenandoahConcurrentGC::op_thread_roots() { 821 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 822 assert(heap->is_evacuation_in_progress(), "Checked by caller"); 823 ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_thread_roots); 824 if (heap->mode()->is_generational()) { 825 ShenandoahConcurrentEvacUpdateThreadTask<true> task(heap->workers()->active_workers()); 826 heap->workers()->run_task(&task); 827 } else { 828 ShenandoahConcurrentEvacUpdateThreadTask<false> task(heap->workers()->active_workers()); 829 heap->workers()->run_task(&task); 830 } 831 } 832 833 void ShenandoahConcurrentGC::op_weak_refs() { 834 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 835 assert(heap->is_concurrent_weak_root_in_progress(), "Only during this phase"); 836 // Concurrent weak refs processing 837 ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_refs); 838 if (heap->gc_cause() == GCCause::_wb_breakpoint) { 839 ShenandoahBreakpoint::at_after_reference_processing_started(); 840 } 841 _generation->ref_processor()->process_references(ShenandoahPhaseTimings::conc_weak_refs, heap->workers(), true /* concurrent */); 842 } 843 844 class ShenandoahEvacUpdateCleanupOopStorageRootsClosure : public BasicOopIterateClosure { 845 private: 846 ShenandoahHeap* const _heap; 847 ShenandoahMarkingContext* const _mark_context; 848 bool _evac_in_progress; 849 Thread* const _thread; 850 851 public: 852 ShenandoahEvacUpdateCleanupOopStorageRootsClosure(); 853 void do_oop(oop* p); 854 void do_oop(narrowOop* p); 855 }; 856 857 ShenandoahEvacUpdateCleanupOopStorageRootsClosure::ShenandoahEvacUpdateCleanupOopStorageRootsClosure() : 858 _heap(ShenandoahHeap::heap()), 859 _mark_context(ShenandoahHeap::heap()->marking_context()), 860 _evac_in_progress(ShenandoahHeap::heap()->is_evacuation_in_progress()), 861 _thread(Thread::current()) { 862 } 863 864 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(oop* p) { 865 const oop obj = RawAccess<>::oop_load(p); 866 if (!CompressedOops::is_null(obj)) { 867 if (!_mark_context->is_marked(obj)) { 868 shenandoah_assert_generations_reconciled(); 869 if (_heap->is_in_active_generation(obj)) { 870 // TODO: This worries me. Here we are asserting that an unmarked from-space object is 'correct'. 871 // Normally, I would call this a bogus assert, but there seems to be a legitimate use-case for 872 // accessing from-space objects during class unloading. However, the from-space object may have 873 // been "filled". We've made no effort to prevent old generation classes being unloaded by young 874 // gen (and vice-versa). 875 shenandoah_assert_correct(p, obj); 876 ShenandoahHeap::atomic_clear_oop(p, obj); 877 } 878 } else if (_evac_in_progress && _heap->in_collection_set(obj)) { 879 oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj); 880 if (resolved == obj) { 881 resolved = _heap->evacuate_object(obj, _thread); 882 } 883 shenandoah_assert_not_in_cset_except(p, resolved, _heap->cancelled_gc()); 884 ShenandoahHeap::atomic_update_oop(resolved, p, obj); 885 } 886 } 887 } 888 889 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(narrowOop* p) { 890 ShouldNotReachHere(); 891 } 892 893 class ShenandoahIsCLDAliveClosure : public CLDClosure { 894 public: 895 void do_cld(ClassLoaderData* cld) { 896 cld->is_alive(); 897 } 898 }; 899 900 class ShenandoahIsNMethodAliveClosure: public NMethodClosure { 901 public: 902 void do_nmethod(nmethod* n) { 903 n->is_unloading(); 904 } 905 }; 906 907 // This task not only evacuates/updates marked weak roots, but also "null" 908 // dead weak roots. 909 class ShenandoahConcurrentWeakRootsEvacUpdateTask : public WorkerTask { 910 private: 911 ShenandoahVMWeakRoots<true /*concurrent*/> _vm_roots; 912 913 // Roots related to concurrent class unloading 914 ShenandoahClassLoaderDataRoots<true /* concurrent */> 915 _cld_roots; 916 ShenandoahConcurrentNMethodIterator _nmethod_itr; 917 ShenandoahPhaseTimings::Phase _phase; 918 919 public: 920 ShenandoahConcurrentWeakRootsEvacUpdateTask(ShenandoahPhaseTimings::Phase phase) : 921 WorkerTask("Shenandoah Evacuate/Update Concurrent Weak Roots"), 922 _vm_roots(phase), 923 _cld_roots(phase, ShenandoahHeap::heap()->workers()->active_workers(), false /*heap iteration*/), 924 _nmethod_itr(ShenandoahCodeRoots::table()), 925 _phase(phase) { 926 if (ShenandoahHeap::heap()->unload_classes()) { 927 _nmethod_itr.nmethods_do_begin(); 928 } 929 } 930 931 ~ShenandoahConcurrentWeakRootsEvacUpdateTask() { 932 if (ShenandoahHeap::heap()->unload_classes()) { 933 _nmethod_itr.nmethods_do_end(); 934 } 935 // Notify runtime data structures of potentially dead oops 936 _vm_roots.report_num_dead(); 937 } 938 939 void work(uint worker_id) { 940 ShenandoahConcurrentWorkerSession worker_session(worker_id); 941 ShenandoahSuspendibleThreadSetJoiner sts_join; 942 { 943 ShenandoahEvacOOMScope oom; 944 // jni_roots and weak_roots are OopStorage backed roots, concurrent iteration 945 // may race against OopStorage::release() calls. 946 ShenandoahEvacUpdateCleanupOopStorageRootsClosure cl; 947 _vm_roots.oops_do(&cl, worker_id); 948 } 949 950 // If we are going to perform concurrent class unloading later on, we need to 951 // cleanup the weak oops in CLD and determinate nmethod's unloading state, so that we 952 // can cleanup immediate garbage sooner. 953 if (ShenandoahHeap::heap()->unload_classes()) { 954 // Applies ShenandoahIsCLDAlive closure to CLDs, native barrier will either null the 955 // CLD's holder or evacuate it. 956 { 957 ShenandoahIsCLDAliveClosure is_cld_alive; 958 _cld_roots.cld_do(&is_cld_alive, worker_id); 959 } 960 961 // Applies ShenandoahIsNMethodAliveClosure to registered nmethods. 962 // The closure calls nmethod->is_unloading(). The is_unloading 963 // state is cached, therefore, during concurrent class unloading phase, 964 // we will not touch the metadata of unloading nmethods 965 { 966 ShenandoahWorkerTimingsTracker timer(_phase, ShenandoahPhaseTimings::CodeCacheRoots, worker_id); 967 ShenandoahIsNMethodAliveClosure is_nmethod_alive; 968 _nmethod_itr.nmethods_do(&is_nmethod_alive); 969 } 970 } 971 } 972 }; 973 974 void ShenandoahConcurrentGC::op_weak_roots() { 975 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 976 assert(heap->is_concurrent_weak_root_in_progress(), "Only during this phase"); 977 // Concurrent weak root processing 978 { 979 ShenandoahTimingsTracker t(ShenandoahPhaseTimings::conc_weak_roots_work); 980 ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_roots_work); 981 ShenandoahConcurrentWeakRootsEvacUpdateTask task(ShenandoahPhaseTimings::conc_weak_roots_work); 982 heap->workers()->run_task(&task); 983 } 984 985 // Perform handshake to flush out dead oops 986 { 987 ShenandoahTimingsTracker t(ShenandoahPhaseTimings::conc_weak_roots_rendezvous); 988 heap->rendezvous_threads(); 989 } 990 // We can only toggle concurrent_weak_root_in_progress flag 991 // at a safepoint, so that mutators see a consistent 992 // value. The flag will be cleared at the next safepoint. 993 } 994 995 void ShenandoahConcurrentGC::op_class_unloading() { 996 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 997 assert (heap->is_concurrent_weak_root_in_progress() && 998 heap->unload_classes(), 999 "Checked by caller"); 1000 heap->do_class_unloading(); 1001 } 1002 1003 class ShenandoahEvacUpdateCodeCacheClosure : public NMethodClosure { 1004 private: 1005 BarrierSetNMethod* const _bs; 1006 ShenandoahEvacuateUpdateMetadataClosure _cl; 1007 1008 public: 1009 ShenandoahEvacUpdateCodeCacheClosure() : 1010 _bs(BarrierSet::barrier_set()->barrier_set_nmethod()), 1011 _cl() { 1012 } 1013 1014 void do_nmethod(nmethod* n) { 1015 ShenandoahNMethod* data = ShenandoahNMethod::gc_data(n); 1016 ShenandoahReentrantLocker locker(data->lock()); 1017 // Setup EvacOOM scope below reentrant lock to avoid deadlock with 1018 // nmethod_entry_barrier 1019 ShenandoahEvacOOMScope oom; 1020 data->oops_do(&_cl, true/*fix relocation*/); 1021 _bs->disarm(n); 1022 } 1023 }; 1024 1025 class ShenandoahConcurrentRootsEvacUpdateTask : public WorkerTask { 1026 private: 1027 ShenandoahPhaseTimings::Phase _phase; 1028 ShenandoahVMRoots<true /*concurrent*/> _vm_roots; 1029 ShenandoahClassLoaderDataRoots<true /*concurrent*/> 1030 _cld_roots; 1031 ShenandoahConcurrentNMethodIterator _nmethod_itr; 1032 1033 public: 1034 ShenandoahConcurrentRootsEvacUpdateTask(ShenandoahPhaseTimings::Phase phase) : 1035 WorkerTask("Shenandoah Evacuate/Update Concurrent Strong Roots"), 1036 _phase(phase), 1037 _vm_roots(phase), 1038 _cld_roots(phase, ShenandoahHeap::heap()->workers()->active_workers(), false /*heap iteration*/), 1039 _nmethod_itr(ShenandoahCodeRoots::table()) { 1040 if (!ShenandoahHeap::heap()->unload_classes()) { 1041 _nmethod_itr.nmethods_do_begin(); 1042 } 1043 } 1044 1045 ~ShenandoahConcurrentRootsEvacUpdateTask() { 1046 if (!ShenandoahHeap::heap()->unload_classes()) { 1047 _nmethod_itr.nmethods_do_end(); 1048 } 1049 } 1050 1051 void work(uint worker_id) { 1052 ShenandoahConcurrentWorkerSession worker_session(worker_id); 1053 { 1054 ShenandoahEvacOOMScope oom; 1055 { 1056 // vm_roots and weak_roots are OopStorage backed roots, concurrent iteration 1057 // may race against OopStorage::release() calls. 1058 ShenandoahContextEvacuateUpdateRootsClosure cl; 1059 _vm_roots.oops_do<ShenandoahContextEvacuateUpdateRootsClosure>(&cl, worker_id); 1060 } 1061 1062 { 1063 ShenandoahEvacuateUpdateMetadataClosure cl; 1064 CLDToOopClosure clds(&cl, ClassLoaderData::_claim_strong); 1065 _cld_roots.cld_do(&clds, worker_id); 1066 } 1067 } 1068 1069 // Cannot setup ShenandoahEvacOOMScope here, due to potential deadlock with nmethod_entry_barrier. 1070 if (!ShenandoahHeap::heap()->unload_classes()) { 1071 ShenandoahWorkerTimingsTracker timer(_phase, ShenandoahPhaseTimings::CodeCacheRoots, worker_id); 1072 ShenandoahEvacUpdateCodeCacheClosure cl; 1073 _nmethod_itr.nmethods_do(&cl); 1074 } 1075 } 1076 }; 1077 1078 void ShenandoahConcurrentGC::op_strong_roots() { 1079 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 1080 assert(heap->is_concurrent_strong_root_in_progress(), "Checked by caller"); 1081 ShenandoahConcurrentRootsEvacUpdateTask task(ShenandoahPhaseTimings::conc_strong_roots); 1082 heap->workers()->run_task(&task); 1083 heap->set_concurrent_strong_root_in_progress(false); 1084 } 1085 1086 void ShenandoahConcurrentGC::op_cleanup_early() { 1087 ShenandoahHeap::heap()->free_set()->recycle_trash(); 1088 } 1089 1090 void ShenandoahConcurrentGC::op_evacuate() { 1091 ShenandoahHeap::heap()->evacuate_collection_set(true /*concurrent*/); 1092 } 1093 1094 void ShenandoahConcurrentGC::op_init_updaterefs() { 1095 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 1096 heap->set_evacuation_in_progress(false); 1097 heap->set_concurrent_weak_root_in_progress(false); 1098 heap->prepare_update_heap_references(true /*concurrent*/); 1099 heap->set_update_refs_in_progress(true); 1100 if (ShenandoahVerify) { 1101 heap->verifier()->verify_before_updaterefs(); 1102 } 1103 if (ShenandoahPacing) { 1104 heap->pacer()->setup_for_updaterefs(); 1105 } 1106 } 1107 1108 void ShenandoahConcurrentGC::op_updaterefs() { 1109 ShenandoahHeap::heap()->update_heap_references(true /*concurrent*/); 1110 } 1111 1112 class ShenandoahUpdateThreadClosure : public HandshakeClosure { 1113 private: 1114 ShenandoahUpdateRefsClosure _cl; 1115 public: 1116 ShenandoahUpdateThreadClosure(); 1117 void do_thread(Thread* thread); 1118 }; 1119 1120 ShenandoahUpdateThreadClosure::ShenandoahUpdateThreadClosure() : 1121 HandshakeClosure("Shenandoah Update Thread Roots") { 1122 } 1123 1124 void ShenandoahUpdateThreadClosure::do_thread(Thread* thread) { 1125 if (thread->is_Java_thread()) { 1126 JavaThread* jt = JavaThread::cast(thread); 1127 ResourceMark rm; 1128 jt->oops_do(&_cl, nullptr); 1129 } 1130 } 1131 1132 void ShenandoahConcurrentGC::op_update_thread_roots() { 1133 ShenandoahUpdateThreadClosure cl; 1134 Handshake::execute(&cl); 1135 } 1136 1137 void ShenandoahConcurrentGC::op_final_updaterefs() { 1138 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 1139 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint"); 1140 assert(!heap->_update_refs_iterator.has_next(), "Should have finished update references"); 1141 1142 heap->finish_concurrent_roots(); 1143 1144 // Clear cancelled GC, if set. On cancellation path, the block before would handle 1145 // everything. 1146 if (heap->cancelled_gc()) { 1147 heap->clear_cancelled_gc(true /* clear oom handler */); 1148 } 1149 1150 // Has to be done before cset is clear 1151 if (ShenandoahVerify) { 1152 heap->verifier()->verify_roots_in_to_space(); 1153 } 1154 1155 // If we are running in generational mode and this is an aging cycle, this will also age active 1156 // regions that haven't been used for allocation. 1157 heap->update_heap_region_states(true /*concurrent*/); 1158 1159 heap->set_update_refs_in_progress(false); 1160 heap->set_has_forwarded_objects(false); 1161 1162 if (heap->mode()->is_generational() && heap->is_concurrent_old_mark_in_progress()) { 1163 // When the SATB barrier is left on to support concurrent old gen mark, it may pick up writes to 1164 // objects in the collection set. After those objects are evacuated, the pointers in the 1165 // SATB are no longer safe. Once we have finished update references, we are guaranteed that 1166 // no more writes to the collection set are possible. 1167 // 1168 // This will transfer any old pointers in _active_ regions from the SATB to the old gen 1169 // mark queues. All other pointers will be discarded. This would also discard any pointers 1170 // in old regions that were included in a mixed evacuation. We aren't using the SATB filter 1171 // methods here because we cannot control when they execute. If the SATB filter runs _after_ 1172 // a region has been recycled, we will not be able to detect the bad pointer. 1173 // 1174 // We are not concerned about skipping this step in abbreviated cycles because regions 1175 // with no live objects cannot have been written to and so cannot have entries in the SATB 1176 // buffers. 1177 heap->old_generation()->transfer_pointers_from_satb(); 1178 1179 // Aging_cycle is only relevant during evacuation cycle for individual objects and during final mark for 1180 // entire regions. Both of these relevant operations occur before final update refs. 1181 ShenandoahGenerationalHeap::heap()->set_aging_cycle(false); 1182 } 1183 1184 if (ShenandoahVerify) { 1185 heap->verifier()->verify_after_updaterefs(); 1186 } 1187 1188 if (VerifyAfterGC) { 1189 Universe::verify(); 1190 } 1191 1192 heap->rebuild_free_set(true /*concurrent*/); 1193 } 1194 1195 void ShenandoahConcurrentGC::op_final_roots() { 1196 1197 ShenandoahHeap *heap = ShenandoahHeap::heap(); 1198 heap->set_concurrent_weak_root_in_progress(false); 1199 heap->set_evacuation_in_progress(false); 1200 1201 if (heap->mode()->is_generational()) { 1202 // If the cycle was shortened for having enough immediate garbage, this could be 1203 // the last GC safepoint before concurrent marking of old resumes. We must be sure 1204 // that old mark threads don't see any pointers to garbage in the SATB buffers. 1205 if (heap->is_concurrent_old_mark_in_progress()) { 1206 heap->old_generation()->transfer_pointers_from_satb(); 1207 } 1208 1209 if (!_generation->is_old()) { 1210 ShenandoahGenerationalHeap::heap()->update_region_ages(_generation->complete_marking_context()); 1211 } 1212 } 1213 } 1214 1215 void ShenandoahConcurrentGC::op_cleanup_complete() { 1216 ShenandoahHeap::heap()->free_set()->recycle_trash(); 1217 } 1218 1219 bool ShenandoahConcurrentGC::check_cancellation_and_abort(ShenandoahDegenPoint point) { 1220 if (ShenandoahHeap::heap()->cancelled_gc()) { 1221 _degen_point = point; 1222 return true; 1223 } 1224 return false; 1225 } 1226 1227 const char* ShenandoahConcurrentGC::init_mark_event_message() const { 1228 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 1229 assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here"); 1230 if (heap->unload_classes()) { 1231 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Init Mark", " (unload classes)"); 1232 } else { 1233 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Init Mark", ""); 1234 } 1235 } 1236 1237 const char* ShenandoahConcurrentGC::final_mark_event_message() const { 1238 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 1239 assert(!heap->has_forwarded_objects() || heap->is_concurrent_old_mark_in_progress(), 1240 "Should not have forwarded objects during final mark, unless old gen concurrent mark is running"); 1241 1242 if (heap->unload_classes()) { 1243 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Final Mark", " (unload classes)"); 1244 } else { 1245 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Final Mark", ""); 1246 } 1247 } 1248 1249 const char* ShenandoahConcurrentGC::conc_mark_event_message() const { 1250 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 1251 assert(!heap->has_forwarded_objects() || heap->is_concurrent_old_mark_in_progress(), 1252 "Should not have forwarded objects concurrent mark, unless old gen concurrent mark is running"); 1253 if (heap->unload_classes()) { 1254 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent marking", " (unload classes)"); 1255 } else { 1256 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent marking", ""); 1257 } 1258 }