1 /* 2 * Copyright (c) 2021, 2022, Red Hat, Inc. All rights reserved. 3 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 28 #include "gc/shared/barrierSetNMethod.hpp" 29 #include "gc/shared/collectorCounters.hpp" 30 #include "gc/shared/continuationGCSupport.inline.hpp" 31 #include "gc/shenandoah/shenandoahBreakpoint.hpp" 32 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp" 33 #include "gc/shenandoah/shenandoahConcurrentGC.hpp" 34 #include "gc/shenandoah/shenandoahFreeSet.hpp" 35 #include "gc/shenandoah/shenandoahGeneration.hpp" 36 #include "gc/shenandoah/shenandoahGenerationalHeap.hpp" 37 #include "gc/shenandoah/shenandoahOldGeneration.hpp" 38 #include "gc/shenandoah/shenandoahYoungGeneration.hpp" 39 #include "gc/shenandoah/shenandoahLock.hpp" 40 #include "gc/shenandoah/shenandoahMark.inline.hpp" 41 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp" 42 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp" 43 #include "gc/shenandoah/shenandoahPhaseTimings.hpp" 44 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp" 45 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp" 46 #include "gc/shenandoah/shenandoahStackWatermark.hpp" 47 #include "gc/shenandoah/shenandoahUtils.hpp" 48 #include "gc/shenandoah/shenandoahVerifier.hpp" 49 #include "gc/shenandoah/shenandoahVMOperations.hpp" 50 #include "gc/shenandoah/shenandoahWorkGroup.hpp" 51 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp" 52 #include "memory/allocation.hpp" 53 #include "prims/jvmtiTagMap.hpp" 54 #include "runtime/vmThread.hpp" 55 #include "utilities/events.hpp" 56 57 // Breakpoint support 58 class ShenandoahBreakpointGCScope : public StackObj { 59 private: 60 const GCCause::Cause _cause; 61 public: 62 ShenandoahBreakpointGCScope(GCCause::Cause cause) : _cause(cause) { 63 if (cause == GCCause::_wb_breakpoint) { 64 ShenandoahBreakpoint::start_gc(); 65 ShenandoahBreakpoint::at_before_gc(); 66 } 67 } 68 69 ~ShenandoahBreakpointGCScope() { 70 if (_cause == GCCause::_wb_breakpoint) { 71 ShenandoahBreakpoint::at_after_gc(); 72 } 73 } 74 }; 75 76 class ShenandoahBreakpointMarkScope : public StackObj { 77 private: 78 const GCCause::Cause _cause; 79 public: 80 ShenandoahBreakpointMarkScope(GCCause::Cause cause) : _cause(cause) { 81 if (_cause == GCCause::_wb_breakpoint) { 82 ShenandoahBreakpoint::at_after_marking_started(); 83 } 84 } 85 86 ~ShenandoahBreakpointMarkScope() { 87 if (_cause == GCCause::_wb_breakpoint) { 88 ShenandoahBreakpoint::at_before_marking_completed(); 89 } 90 } 91 }; 92 93 ShenandoahConcurrentGC::ShenandoahConcurrentGC(ShenandoahGeneration* generation, bool do_old_gc_bootstrap) : 94 _mark(generation), 95 _degen_point(ShenandoahDegenPoint::_degenerated_unset), 96 _abbreviated(false), 97 _do_old_gc_bootstrap(do_old_gc_bootstrap), 98 _generation(generation) { 99 } 100 101 ShenandoahGC::ShenandoahDegenPoint ShenandoahConcurrentGC::degen_point() const { 102 return _degen_point; 103 } 104 105 bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) { 106 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 107 108 ShenandoahBreakpointGCScope breakpoint_gc_scope(cause); 109 110 // Reset for upcoming marking 111 entry_reset(); 112 113 // Start initial mark under STW 114 vmop_entry_init_mark(); 115 116 { 117 ShenandoahBreakpointMarkScope breakpoint_mark_scope(cause); 118 119 // Reset task queue stats here, rather than in mark_concurrent_roots, 120 // because remembered set scan will `push` oops into the queues and 121 // resetting after this happens will lose those counts. 122 TASKQUEUE_STATS_ONLY(_mark.task_queues()->reset_taskqueue_stats()); 123 124 // Concurrent remembered set scanning 125 entry_scan_remembered_set(); 126 // TODO: When RS scanning yields, we will need a check_cancellation_and_abort() degeneration point here. 127 128 // Concurrent mark roots 129 entry_mark_roots(); 130 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_roots)) { 131 return false; 132 } 133 134 // Continue concurrent mark 135 entry_mark(); 136 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_mark)) { 137 return false; 138 } 139 } 140 141 // Complete marking under STW, and start evacuation 142 vmop_entry_final_mark(); 143 144 // If GC was cancelled before final mark, then the safepoint operation will do nothing 145 // and the concurrent mark will still be in progress. In this case it is safe to resume 146 // the degenerated cycle from the marking phase. On the other hand, if the GC is cancelled 147 // after final mark (but before this check), then the final mark safepoint operation 148 // will have finished the mark (setting concurrent mark in progress to false). Final mark 149 // will also have setup state (in concurrent stack processing) that will not be safe to 150 // resume from the marking phase in the degenerated cycle. That is, if the cancellation 151 // occurred after final mark, we must resume the degenerated cycle after the marking phase. 152 if (_generation->is_concurrent_mark_in_progress() && check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_mark)) { 153 assert(!heap->is_concurrent_weak_root_in_progress(), "Weak roots should not be in progress when concurrent mark is in progress"); 154 return false; 155 } 156 157 // Concurrent stack processing 158 if (heap->is_evacuation_in_progress()) { 159 entry_thread_roots(); 160 } 161 162 // Process weak roots that might still point to regions that would be broken by cleanup 163 if (heap->is_concurrent_weak_root_in_progress()) { 164 entry_weak_refs(); 165 entry_weak_roots(); 166 } 167 168 // Final mark might have reclaimed some immediate garbage, kick cleanup to reclaim 169 // the space. This would be the last action if there is nothing to evacuate. Note that 170 // we will not age young-gen objects in the case that we skip evacuation. 171 entry_cleanup_early(); 172 173 { 174 // TODO: Not sure there is value in logging free-set status right here. Note that whenever the free set is rebuilt, 175 // it logs the newly rebuilt status. 176 ShenandoahHeapLocker locker(heap->lock()); 177 heap->free_set()->log_status(); 178 } 179 180 // Perform concurrent class unloading 181 if (heap->unload_classes() && 182 heap->is_concurrent_weak_root_in_progress()) { 183 entry_class_unloading(); 184 } 185 186 // Processing strong roots 187 // This may be skipped if there is nothing to update/evacuate. 188 // If so, strong_root_in_progress would be unset. 189 if (heap->is_concurrent_strong_root_in_progress()) { 190 entry_strong_roots(); 191 } 192 193 // Continue the cycle with evacuation and optional update-refs. 194 // This may be skipped if there is nothing to evacuate. 195 // If so, evac_in_progress would be unset by collection set preparation code. 196 if (heap->is_evacuation_in_progress()) { 197 // Concurrently evacuate 198 entry_evacuate(); 199 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_evac)) { 200 return false; 201 } 202 } 203 204 if (heap->has_forwarded_objects()) { 205 // Perform update-refs phase. 206 vmop_entry_init_updaterefs(); 207 entry_updaterefs(); 208 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_updaterefs)) { 209 return false; 210 } 211 212 // Concurrent update thread roots 213 entry_update_thread_roots(); 214 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_updaterefs)) { 215 return false; 216 } 217 218 vmop_entry_final_updaterefs(); 219 220 // Update references freed up collection set, kick the cleanup to reclaim the space. 221 entry_cleanup_complete(); 222 } else { 223 // We chose not to evacuate because we found sufficient immediate garbage. Note that we 224 // do not check for cancellation here because, at this point, the cycle is effectively 225 // complete. If the cycle has been cancelled here, the control thread will detect it 226 // on its next iteration and run a degenerated young cycle. 227 vmop_entry_final_roots(); 228 _abbreviated = true; 229 } 230 231 // We defer generation resizing actions until after cset regions have been recycled. We do this even following an 232 // abbreviated cycle. 233 if (heap->mode()->is_generational()) { 234 235 ShenandoahGenerationalHeap::TransferResult result; 236 { 237 ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::heap(); 238 ShenandoahHeapLocker locker(gen_heap->lock()); 239 240 result = gen_heap->balance_generations(); 241 gen_heap->reset_generation_reserves(); 242 } 243 244 LogTarget(Info, gc, ergo) lt; 245 if (lt.is_enabled()) { 246 LogStream ls(lt); 247 result.print_on("Concurrent GC", &ls); 248 } 249 } 250 return true; 251 } 252 253 void ShenandoahConcurrentGC::vmop_entry_init_mark() { 254 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 255 TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters()); 256 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_mark_gross); 257 258 heap->try_inject_alloc_failure(); 259 VM_ShenandoahInitMark op(this); 260 VMThread::execute(&op); // jump to entry_init_mark() under safepoint 261 } 262 263 void ShenandoahConcurrentGC::vmop_entry_final_mark() { 264 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 265 TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters()); 266 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_mark_gross); 267 268 heap->try_inject_alloc_failure(); 269 VM_ShenandoahFinalMarkStartEvac op(this); 270 VMThread::execute(&op); // jump to entry_final_mark under safepoint 271 } 272 273 void ShenandoahConcurrentGC::vmop_entry_init_updaterefs() { 274 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 275 TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters()); 276 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_update_refs_gross); 277 278 heap->try_inject_alloc_failure(); 279 VM_ShenandoahInitUpdateRefs op(this); 280 VMThread::execute(&op); 281 } 282 283 void ShenandoahConcurrentGC::vmop_entry_final_updaterefs() { 284 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 285 TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters()); 286 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_update_refs_gross); 287 288 heap->try_inject_alloc_failure(); 289 VM_ShenandoahFinalUpdateRefs op(this); 290 VMThread::execute(&op); 291 } 292 293 void ShenandoahConcurrentGC::vmop_entry_final_roots() { 294 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 295 TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters()); 296 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_roots_gross); 297 298 // This phase does not use workers, no need for setup 299 heap->try_inject_alloc_failure(); 300 VM_ShenandoahFinalRoots op(this); 301 VMThread::execute(&op); 302 } 303 304 void ShenandoahConcurrentGC::entry_init_mark() { 305 const char* msg = init_mark_event_message(); 306 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::init_mark); 307 EventMark em("%s", msg); 308 309 ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(), 310 ShenandoahWorkerPolicy::calc_workers_for_init_marking(), 311 "init marking"); 312 313 op_init_mark(); 314 } 315 316 void ShenandoahConcurrentGC::entry_final_mark() { 317 const char* msg = final_mark_event_message(); 318 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_mark); 319 EventMark em("%s", msg); 320 321 ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(), 322 ShenandoahWorkerPolicy::calc_workers_for_final_marking(), 323 "final marking"); 324 325 op_final_mark(); 326 } 327 328 void ShenandoahConcurrentGC::entry_init_updaterefs() { 329 static const char* msg = "Pause Init Update Refs"; 330 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::init_update_refs); 331 EventMark em("%s", msg); 332 333 // No workers used in this phase, no setup required 334 op_init_updaterefs(); 335 } 336 337 void ShenandoahConcurrentGC::entry_final_updaterefs() { 338 static const char* msg = "Pause Final Update Refs"; 339 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_update_refs); 340 EventMark em("%s", msg); 341 342 ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(), 343 ShenandoahWorkerPolicy::calc_workers_for_final_update_ref(), 344 "final reference update"); 345 346 op_final_updaterefs(); 347 } 348 349 void ShenandoahConcurrentGC::entry_final_roots() { 350 static const char* msg = "Pause Final Roots"; 351 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_roots); 352 EventMark em("%s", msg); 353 354 op_final_roots(); 355 } 356 357 void ShenandoahConcurrentGC::entry_reset() { 358 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 359 heap->try_inject_alloc_failure(); 360 361 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 362 { 363 static const char* msg = "Concurrent reset"; 364 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_reset); 365 EventMark em("%s", msg); 366 367 ShenandoahWorkerScope scope(heap->workers(), 368 ShenandoahWorkerPolicy::calc_workers_for_conc_reset(), 369 msg); 370 op_reset(); 371 } 372 373 if (_do_old_gc_bootstrap) { 374 static const char* msg = "Concurrent reset (OLD)"; 375 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_reset_old); 376 ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(), 377 ShenandoahWorkerPolicy::calc_workers_for_conc_reset(), 378 msg); 379 EventMark em("%s", msg); 380 381 heap->old_generation()->prepare_gc(); 382 } 383 } 384 385 void ShenandoahConcurrentGC::entry_scan_remembered_set() { 386 if (_generation->is_young()) { 387 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 388 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 389 const char* msg = "Concurrent remembered set scanning"; 390 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::init_scan_rset); 391 EventMark em("%s", msg); 392 393 ShenandoahWorkerScope scope(heap->workers(), 394 ShenandoahWorkerPolicy::calc_workers_for_rs_scanning(), 395 msg); 396 397 heap->try_inject_alloc_failure(); 398 _generation->scan_remembered_set(true /* is_concurrent */); 399 } 400 } 401 402 void ShenandoahConcurrentGC::entry_mark_roots() { 403 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 404 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 405 const char* msg = "Concurrent marking roots"; 406 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_mark_roots); 407 EventMark em("%s", msg); 408 409 ShenandoahWorkerScope scope(heap->workers(), 410 ShenandoahWorkerPolicy::calc_workers_for_conc_marking(), 411 "concurrent marking roots"); 412 413 heap->try_inject_alloc_failure(); 414 op_mark_roots(); 415 } 416 417 void ShenandoahConcurrentGC::entry_mark() { 418 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 419 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 420 const char* msg = conc_mark_event_message(); 421 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_mark); 422 EventMark em("%s", msg); 423 424 ShenandoahWorkerScope scope(heap->workers(), 425 ShenandoahWorkerPolicy::calc_workers_for_conc_marking(), 426 "concurrent marking"); 427 428 heap->try_inject_alloc_failure(); 429 op_mark(); 430 } 431 432 void ShenandoahConcurrentGC::entry_thread_roots() { 433 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 434 static const char* msg = "Concurrent thread roots"; 435 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_thread_roots); 436 EventMark em("%s", msg); 437 438 ShenandoahWorkerScope scope(heap->workers(), 439 ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(), 440 msg); 441 442 heap->try_inject_alloc_failure(); 443 op_thread_roots(); 444 } 445 446 void ShenandoahConcurrentGC::entry_weak_refs() { 447 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 448 static const char* msg = "Concurrent weak references"; 449 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_weak_refs); 450 EventMark em("%s", msg); 451 452 ShenandoahWorkerScope scope(heap->workers(), 453 ShenandoahWorkerPolicy::calc_workers_for_conc_refs_processing(), 454 "concurrent weak references"); 455 456 heap->try_inject_alloc_failure(); 457 op_weak_refs(); 458 } 459 460 void ShenandoahConcurrentGC::entry_weak_roots() { 461 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 462 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 463 static const char* msg = "Concurrent weak roots"; 464 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_weak_roots); 465 EventMark em("%s", msg); 466 467 ShenandoahWorkerScope scope(heap->workers(), 468 ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(), 469 "concurrent weak root"); 470 471 heap->try_inject_alloc_failure(); 472 op_weak_roots(); 473 } 474 475 void ShenandoahConcurrentGC::entry_class_unloading() { 476 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 477 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 478 static const char* msg = "Concurrent class unloading"; 479 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_class_unload); 480 EventMark em("%s", msg); 481 482 ShenandoahWorkerScope scope(heap->workers(), 483 ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(), 484 "concurrent class unloading"); 485 486 heap->try_inject_alloc_failure(); 487 op_class_unloading(); 488 } 489 490 void ShenandoahConcurrentGC::entry_strong_roots() { 491 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 492 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 493 static const char* msg = "Concurrent strong roots"; 494 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_strong_roots); 495 EventMark em("%s", msg); 496 497 ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_strong_roots); 498 499 ShenandoahWorkerScope scope(heap->workers(), 500 ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(), 501 "concurrent strong root"); 502 503 heap->try_inject_alloc_failure(); 504 op_strong_roots(); 505 } 506 507 void ShenandoahConcurrentGC::entry_cleanup_early() { 508 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 509 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 510 static const char* msg = "Concurrent cleanup"; 511 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_cleanup_early, true /* log_heap_usage */); 512 EventMark em("%s", msg); 513 514 // This phase does not use workers, no need for setup 515 heap->try_inject_alloc_failure(); 516 op_cleanup_early(); 517 } 518 519 void ShenandoahConcurrentGC::entry_evacuate() { 520 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 521 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 522 523 static const char* msg = "Concurrent evacuation"; 524 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_evac); 525 EventMark em("%s", msg); 526 527 ShenandoahWorkerScope scope(heap->workers(), 528 ShenandoahWorkerPolicy::calc_workers_for_conc_evac(), 529 "concurrent evacuation"); 530 531 heap->try_inject_alloc_failure(); 532 op_evacuate(); 533 } 534 535 void ShenandoahConcurrentGC::entry_update_thread_roots() { 536 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 537 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 538 539 static const char* msg = "Concurrent update thread roots"; 540 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_update_thread_roots); 541 EventMark em("%s", msg); 542 543 // No workers used in this phase, no setup required 544 heap->try_inject_alloc_failure(); 545 op_update_thread_roots(); 546 } 547 548 void ShenandoahConcurrentGC::entry_updaterefs() { 549 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 550 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 551 static const char* msg = "Concurrent update references"; 552 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_update_refs); 553 EventMark em("%s", msg); 554 555 ShenandoahWorkerScope scope(heap->workers(), 556 ShenandoahWorkerPolicy::calc_workers_for_conc_update_ref(), 557 "concurrent reference update"); 558 559 heap->try_inject_alloc_failure(); 560 op_updaterefs(); 561 } 562 563 void ShenandoahConcurrentGC::entry_cleanup_complete() { 564 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 565 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 566 static const char* msg = "Concurrent cleanup"; 567 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_cleanup_complete, true /* log_heap_usage */); 568 EventMark em("%s", msg); 569 570 // This phase does not use workers, no need for setup 571 heap->try_inject_alloc_failure(); 572 op_cleanup_complete(); 573 } 574 575 void ShenandoahConcurrentGC::op_reset() { 576 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 577 if (ShenandoahPacing) { 578 heap->pacer()->setup_for_reset(); 579 } 580 _generation->prepare_gc(); 581 } 582 583 class ShenandoahInitMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure { 584 private: 585 ShenandoahMarkingContext* const _ctx; 586 public: 587 ShenandoahInitMarkUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {} 588 589 void heap_region_do(ShenandoahHeapRegion* r) { 590 assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index()); 591 if (r->is_active()) { 592 // Check if region needs updating its TAMS. We have updated it already during concurrent 593 // reset, so it is very likely we don't need to do another write here. Since most regions 594 // are not "active", this path is relatively rare. 595 if (_ctx->top_at_mark_start(r) != r->top()) { 596 _ctx->capture_top_at_mark_start(r); 597 } 598 } else { 599 assert(_ctx->top_at_mark_start(r) == r->top(), 600 "Region " SIZE_FORMAT " should already have correct TAMS", r->index()); 601 } 602 } 603 604 bool is_thread_safe() { return true; } 605 }; 606 607 void ShenandoahConcurrentGC::start_mark() { 608 _mark.start_mark(); 609 } 610 611 void ShenandoahConcurrentGC::op_init_mark() { 612 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 613 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint"); 614 assert(Thread::current()->is_VM_thread(), "can only do this in VMThread"); 615 616 assert(_generation->is_bitmap_clear(), "need clear marking bitmap"); 617 assert(!_generation->is_mark_complete(), "should not be complete"); 618 assert(!heap->has_forwarded_objects(), "No forwarded objects on this path"); 619 620 621 if (heap->mode()->is_generational()) { 622 if (_generation->is_young() || (_generation->is_global() && ShenandoahVerify)) { 623 // The current implementation of swap_remembered_set() copies the write-card-table 624 // to the read-card-table. The remembered sets are also swapped for GLOBAL collections 625 // so that the verifier works with the correct copy of the card table when verifying. 626 // TODO: This path should not really depend on ShenandoahVerify. 627 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_swap_rset); 628 _generation->swap_remembered_set(); 629 } 630 631 if (_generation->is_global()) { 632 heap->cancel_old_gc(); 633 } else if (heap->is_concurrent_old_mark_in_progress()) { 634 // Purge the SATB buffers, transferring any valid, old pointers to the 635 // old generation mark queue. Any pointers in a young region will be 636 // abandoned. 637 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_transfer_satb); 638 heap->transfer_old_pointers_from_satb(); 639 } 640 } 641 642 if (ShenandoahVerify) { 643 heap->verifier()->verify_before_concmark(); 644 } 645 646 if (VerifyBeforeGC) { 647 Universe::verify(); 648 } 649 650 _generation->set_concurrent_mark_in_progress(true); 651 652 start_mark(); 653 654 if (_do_old_gc_bootstrap) { 655 // Update region state for both young and old regions 656 // TODO: We should be able to pull this out of the safepoint for the bootstrap 657 // cycle. The top of an old region will only move when a GC cycle evacuates 658 // objects into it. When we start an old cycle, we know that nothing can touch 659 // the top of old regions. 660 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states); 661 ShenandoahInitMarkUpdateRegionStateClosure cl; 662 heap->parallel_heap_region_iterate(&cl); 663 heap->old_generation()->ref_processor()->reset_thread_locals(); 664 } else { 665 // Update region state for only young regions 666 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states); 667 ShenandoahInitMarkUpdateRegionStateClosure cl; 668 _generation->parallel_heap_region_iterate(&cl); 669 } 670 671 // Weak reference processing 672 ShenandoahReferenceProcessor* rp = _generation->ref_processor(); 673 rp->reset_thread_locals(); 674 rp->set_soft_reference_policy(heap->soft_ref_policy()->should_clear_all_soft_refs()); 675 676 // Make above changes visible to worker threads 677 OrderAccess::fence(); 678 679 // Arm nmethods for concurrent mark 680 ShenandoahCodeRoots::arm_nmethods_for_mark(); 681 682 ShenandoahStackWatermark::change_epoch_id(); 683 if (ShenandoahPacing) { 684 heap->pacer()->setup_for_mark(); 685 } 686 } 687 688 void ShenandoahConcurrentGC::op_mark_roots() { 689 _mark.mark_concurrent_roots(); 690 } 691 692 void ShenandoahConcurrentGC::op_mark() { 693 _mark.concurrent_mark(); 694 } 695 696 void ShenandoahConcurrentGC::op_final_mark() { 697 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 698 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint"); 699 assert(!heap->has_forwarded_objects(), "No forwarded objects on this path"); 700 701 if (ShenandoahVerify) { 702 heap->verifier()->verify_roots_no_forwarded(); 703 } 704 705 if (!heap->cancelled_gc()) { 706 _mark.finish_mark(); 707 assert(!heap->cancelled_gc(), "STW mark cannot OOM"); 708 709 // Notify JVMTI that the tagmap table will need cleaning. 710 JvmtiTagMap::set_needs_cleaning(); 711 712 // The collection set is chosen by prepare_regions_and_collection_set(). 713 // 714 // TODO: Under severe memory overload conditions that can be checked here, we may want to limit 715 // the inclusion of old-gen candidates within the collection set. This would allow us to prioritize efforts on 716 // evacuating young-gen, This remediation is most appropriate when old-gen availability is very high (so there 717 // are negligible negative impacts from delaying completion of old-gen evacuation) and when young-gen collections 718 // are "under duress" (as signalled by very low availability of memory within young-gen, indicating that/ young-gen 719 // collections are not triggering frequently enough). 720 _generation->prepare_regions_and_collection_set(true /*concurrent*/); 721 722 // Upon return from prepare_regions_and_collection_set(), certain parameters have been established to govern the 723 // evacuation efforts that are about to begin. In particular: 724 // 725 // heap->get_promoted_reserve() represents the amount of memory within old-gen's available memory that has 726 // been set aside to hold objects promoted from young-gen memory. This represents an estimated percentage 727 // of the live young-gen memory within the collection set. If there is more data ready to be promoted than 728 // can fit within this reserve, the promotion of some objects will be deferred until a subsequent evacuation 729 // pass. 730 // 731 // heap->get_old_evac_reserve() represents the amount of memory within old-gen's available memory that has been 732 // set aside to hold objects evacuated from the old-gen collection set. 733 // 734 // heap->get_young_evac_reserve() represents the amount of memory within young-gen's available memory that has 735 // been set aside to hold objects evacuated from the young-gen collection set. Conservatively, this value 736 // equals the entire amount of live young-gen memory within the collection set, even though some of this memory 737 // will likely be promoted. 738 739 // Has to be done after cset selection 740 heap->prepare_concurrent_roots(); 741 742 if (heap->mode()->is_generational()) { 743 if (!heap->collection_set()->is_empty() || heap->old_generation()->has_in_place_promotions()) { 744 // Even if the collection set is empty, we need to do evacuation if there are regions to be promoted in place. 745 // Concurrent evacuation takes responsibility for registering objects and setting the remembered set cards to dirty. 746 747 LogTarget(Debug, gc, cset) lt; 748 if (lt.is_enabled()) { 749 ResourceMark rm; 750 LogStream ls(lt); 751 heap->collection_set()->print_on(&ls); 752 } 753 754 if (ShenandoahVerify) { 755 heap->verifier()->verify_before_evacuation(); 756 } 757 758 heap->set_evacuation_in_progress(true); 759 760 // Verify before arming for concurrent processing. 761 // Otherwise, verification can trigger stack processing. 762 if (ShenandoahVerify) { 763 heap->verifier()->verify_during_evacuation(); 764 } 765 766 // Generational mode may promote objects in place during the evacuation phase. 767 // If that is the only reason we are evacuating, we don't need to update references 768 // and there will be no forwarded objects on the heap. 769 heap->set_has_forwarded_objects(!heap->collection_set()->is_empty()); 770 771 // Arm nmethods/stack for concurrent processing 772 if (!heap->collection_set()->is_empty()) { 773 // Iff objects will be evaluated, arm the nmethod barriers. These will be disarmed 774 // under the same condition (established in prepare_concurrent_roots) after strong 775 // root evacuation has completed (see op_strong_roots). 776 ShenandoahCodeRoots::arm_nmethods_for_evac(); 777 ShenandoahStackWatermark::change_epoch_id(); 778 } 779 780 if (ShenandoahPacing) { 781 heap->pacer()->setup_for_evac(); 782 } 783 } else { 784 if (ShenandoahVerify) { 785 heap->verifier()->verify_after_concmark(); 786 } 787 788 if (VerifyAfterGC) { 789 Universe::verify(); 790 } 791 } 792 } else { 793 // Not is_generational() 794 if (!heap->collection_set()->is_empty()) { 795 LogTarget(Debug, gc, ergo) lt; 796 if (lt.is_enabled()) { 797 ResourceMark rm; 798 LogStream ls(lt); 799 heap->collection_set()->print_on(&ls); 800 } 801 802 if (ShenandoahVerify) { 803 heap->verifier()->verify_before_evacuation(); 804 } 805 806 heap->set_evacuation_in_progress(true); 807 808 // Verify before arming for concurrent processing. 809 // Otherwise, verification can trigger stack processing. 810 if (ShenandoahVerify) { 811 heap->verifier()->verify_during_evacuation(); 812 } 813 814 // From here on, we need to update references. 815 heap->set_has_forwarded_objects(true); 816 817 // Arm nmethods/stack for concurrent processing 818 ShenandoahCodeRoots::arm_nmethods_for_evac(); 819 ShenandoahStackWatermark::change_epoch_id(); 820 821 if (ShenandoahPacing) { 822 heap->pacer()->setup_for_evac(); 823 } 824 } else { 825 if (ShenandoahVerify) { 826 heap->verifier()->verify_after_concmark(); 827 } 828 829 if (VerifyAfterGC) { 830 Universe::verify(); 831 } 832 } 833 } 834 } 835 } 836 837 class ShenandoahConcurrentEvacThreadClosure : public ThreadClosure { 838 private: 839 OopClosure* const _oops; 840 841 public: 842 ShenandoahConcurrentEvacThreadClosure(OopClosure* oops); 843 void do_thread(Thread* thread); 844 }; 845 846 ShenandoahConcurrentEvacThreadClosure::ShenandoahConcurrentEvacThreadClosure(OopClosure* oops) : 847 _oops(oops) { 848 } 849 850 void ShenandoahConcurrentEvacThreadClosure::do_thread(Thread* thread) { 851 JavaThread* const jt = JavaThread::cast(thread); 852 StackWatermarkSet::finish_processing(jt, _oops, StackWatermarkKind::gc); 853 ShenandoahThreadLocalData::enable_plab_promotions(thread); 854 } 855 856 class ShenandoahConcurrentEvacUpdateThreadTask : public WorkerTask { 857 private: 858 ShenandoahJavaThreadsIterator _java_threads; 859 860 public: 861 ShenandoahConcurrentEvacUpdateThreadTask(uint n_workers) : 862 WorkerTask("Shenandoah Evacuate/Update Concurrent Thread Roots"), 863 _java_threads(ShenandoahPhaseTimings::conc_thread_roots, n_workers) { 864 } 865 866 void work(uint worker_id) { 867 Thread* worker_thread = Thread::current(); 868 ShenandoahThreadLocalData::enable_plab_promotions(worker_thread); 869 870 // ShenandoahEvacOOMScope has to be setup by ShenandoahContextEvacuateUpdateRootsClosure. 871 // Otherwise, may deadlock with watermark lock 872 ShenandoahContextEvacuateUpdateRootsClosure oops_cl; 873 ShenandoahConcurrentEvacThreadClosure thr_cl(&oops_cl); 874 _java_threads.threads_do(&thr_cl, worker_id); 875 } 876 }; 877 878 void ShenandoahConcurrentGC::op_thread_roots() { 879 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 880 assert(heap->is_evacuation_in_progress(), "Checked by caller"); 881 ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_thread_roots); 882 ShenandoahConcurrentEvacUpdateThreadTask task(heap->workers()->active_workers()); 883 heap->workers()->run_task(&task); 884 } 885 886 void ShenandoahConcurrentGC::op_weak_refs() { 887 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 888 assert(heap->is_concurrent_weak_root_in_progress(), "Only during this phase"); 889 // Concurrent weak refs processing 890 ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_refs); 891 if (heap->gc_cause() == GCCause::_wb_breakpoint) { 892 ShenandoahBreakpoint::at_after_reference_processing_started(); 893 } 894 _generation->ref_processor()->process_references(ShenandoahPhaseTimings::conc_weak_refs, heap->workers(), true /* concurrent */); 895 } 896 897 class ShenandoahEvacUpdateCleanupOopStorageRootsClosure : public BasicOopIterateClosure { 898 private: 899 ShenandoahHeap* const _heap; 900 ShenandoahMarkingContext* const _mark_context; 901 bool _evac_in_progress; 902 Thread* const _thread; 903 904 public: 905 ShenandoahEvacUpdateCleanupOopStorageRootsClosure(); 906 void do_oop(oop* p); 907 void do_oop(narrowOop* p); 908 }; 909 910 ShenandoahEvacUpdateCleanupOopStorageRootsClosure::ShenandoahEvacUpdateCleanupOopStorageRootsClosure() : 911 _heap(ShenandoahHeap::heap()), 912 _mark_context(ShenandoahHeap::heap()->marking_context()), 913 _evac_in_progress(ShenandoahHeap::heap()->is_evacuation_in_progress()), 914 _thread(Thread::current()) { 915 } 916 917 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(oop* p) { 918 const oop obj = RawAccess<>::oop_load(p); 919 if (!CompressedOops::is_null(obj)) { 920 if (!_mark_context->is_marked(obj)) { 921 if (_heap->is_in_active_generation(obj)) { 922 // TODO: This worries me. Here we are asserting that an unmarked from-space object is 'correct'. 923 // Normally, I would call this a bogus assert, but there seems to be a legitimate use-case for 924 // accessing from-space objects during class unloading. However, the from-space object may have 925 // been "filled". We've made no effort to prevent old generation classes being unloaded by young 926 // gen (and vice-versa). 927 shenandoah_assert_correct(p, obj); 928 ShenandoahHeap::atomic_clear_oop(p, obj); 929 } 930 } else if (_evac_in_progress && _heap->in_collection_set(obj)) { 931 oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj); 932 if (resolved == obj) { 933 resolved = _heap->evacuate_object(obj, _thread); 934 } 935 shenandoah_assert_not_in_cset_except(p, resolved, _heap->cancelled_gc()); 936 ShenandoahHeap::atomic_update_oop(resolved, p, obj); 937 } 938 } 939 } 940 941 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(narrowOop* p) { 942 ShouldNotReachHere(); 943 } 944 945 class ShenandoahIsCLDAliveClosure : public CLDClosure { 946 public: 947 void do_cld(ClassLoaderData* cld) { 948 cld->is_alive(); 949 } 950 }; 951 952 class ShenandoahIsNMethodAliveClosure: public NMethodClosure { 953 public: 954 void do_nmethod(nmethod* n) { 955 n->is_unloading(); 956 } 957 }; 958 959 // This task not only evacuates/updates marked weak roots, but also "null" 960 // dead weak roots. 961 class ShenandoahConcurrentWeakRootsEvacUpdateTask : public WorkerTask { 962 private: 963 ShenandoahVMWeakRoots<true /*concurrent*/> _vm_roots; 964 965 // Roots related to concurrent class unloading 966 ShenandoahClassLoaderDataRoots<true /* concurrent */> 967 _cld_roots; 968 ShenandoahConcurrentNMethodIterator _nmethod_itr; 969 ShenandoahPhaseTimings::Phase _phase; 970 971 public: 972 ShenandoahConcurrentWeakRootsEvacUpdateTask(ShenandoahPhaseTimings::Phase phase) : 973 WorkerTask("Shenandoah Evacuate/Update Concurrent Weak Roots"), 974 _vm_roots(phase), 975 _cld_roots(phase, ShenandoahHeap::heap()->workers()->active_workers(), false /*heap iteration*/), 976 _nmethod_itr(ShenandoahCodeRoots::table()), 977 _phase(phase) { 978 if (ShenandoahHeap::heap()->unload_classes()) { 979 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 980 _nmethod_itr.nmethods_do_begin(); 981 } 982 } 983 984 ~ShenandoahConcurrentWeakRootsEvacUpdateTask() { 985 if (ShenandoahHeap::heap()->unload_classes()) { 986 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 987 _nmethod_itr.nmethods_do_end(); 988 } 989 // Notify runtime data structures of potentially dead oops 990 _vm_roots.report_num_dead(); 991 } 992 993 void work(uint worker_id) { 994 ShenandoahConcurrentWorkerSession worker_session(worker_id); 995 ShenandoahSuspendibleThreadSetJoiner sts_join; 996 { 997 ShenandoahEvacOOMScope oom; 998 // jni_roots and weak_roots are OopStorage backed roots, concurrent iteration 999 // may race against OopStorage::release() calls. 1000 ShenandoahEvacUpdateCleanupOopStorageRootsClosure cl; 1001 _vm_roots.oops_do(&cl, worker_id); 1002 } 1003 1004 // If we are going to perform concurrent class unloading later on, we need to 1005 // cleanup the weak oops in CLD and determinate nmethod's unloading state, so that we 1006 // can cleanup immediate garbage sooner. 1007 if (ShenandoahHeap::heap()->unload_classes()) { 1008 // Applies ShenandoahIsCLDAlive closure to CLDs, native barrier will either null the 1009 // CLD's holder or evacuate it. 1010 { 1011 ShenandoahIsCLDAliveClosure is_cld_alive; 1012 _cld_roots.cld_do(&is_cld_alive, worker_id); 1013 } 1014 1015 // Applies ShenandoahIsNMethodAliveClosure to registered nmethods. 1016 // The closure calls nmethod->is_unloading(). The is_unloading 1017 // state is cached, therefore, during concurrent class unloading phase, 1018 // we will not touch the metadata of unloading nmethods 1019 { 1020 ShenandoahWorkerTimingsTracker timer(_phase, ShenandoahPhaseTimings::CodeCacheRoots, worker_id); 1021 ShenandoahIsNMethodAliveClosure is_nmethod_alive; 1022 _nmethod_itr.nmethods_do(&is_nmethod_alive); 1023 } 1024 } 1025 } 1026 }; 1027 1028 void ShenandoahConcurrentGC::op_weak_roots() { 1029 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 1030 assert(heap->is_concurrent_weak_root_in_progress(), "Only during this phase"); 1031 // Concurrent weak root processing 1032 { 1033 ShenandoahTimingsTracker t(ShenandoahPhaseTimings::conc_weak_roots_work); 1034 ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_roots_work); 1035 ShenandoahConcurrentWeakRootsEvacUpdateTask task(ShenandoahPhaseTimings::conc_weak_roots_work); 1036 heap->workers()->run_task(&task); 1037 } 1038 1039 // Perform handshake to flush out dead oops 1040 { 1041 ShenandoahTimingsTracker t(ShenandoahPhaseTimings::conc_weak_roots_rendezvous); 1042 heap->rendezvous_threads(); 1043 } 1044 } 1045 1046 void ShenandoahConcurrentGC::op_class_unloading() { 1047 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 1048 assert (heap->is_concurrent_weak_root_in_progress() && 1049 heap->unload_classes(), 1050 "Checked by caller"); 1051 heap->do_class_unloading(); 1052 } 1053 1054 class ShenandoahEvacUpdateCodeCacheClosure : public NMethodClosure { 1055 private: 1056 BarrierSetNMethod* const _bs; 1057 ShenandoahEvacuateUpdateMetadataClosure _cl; 1058 1059 public: 1060 ShenandoahEvacUpdateCodeCacheClosure() : 1061 _bs(BarrierSet::barrier_set()->barrier_set_nmethod()), 1062 _cl() { 1063 } 1064 1065 void do_nmethod(nmethod* n) { 1066 ShenandoahNMethod* data = ShenandoahNMethod::gc_data(n); 1067 ShenandoahReentrantLocker locker(data->lock()); 1068 // Setup EvacOOM scope below reentrant lock to avoid deadlock with 1069 // nmethod_entry_barrier 1070 ShenandoahEvacOOMScope oom; 1071 data->oops_do(&_cl, true/*fix relocation*/); 1072 _bs->disarm(n); 1073 } 1074 }; 1075 1076 class ShenandoahConcurrentRootsEvacUpdateTask : public WorkerTask { 1077 private: 1078 ShenandoahPhaseTimings::Phase _phase; 1079 ShenandoahVMRoots<true /*concurrent*/> _vm_roots; 1080 ShenandoahClassLoaderDataRoots<true /*concurrent*/> 1081 _cld_roots; 1082 ShenandoahConcurrentNMethodIterator _nmethod_itr; 1083 1084 public: 1085 ShenandoahConcurrentRootsEvacUpdateTask(ShenandoahPhaseTimings::Phase phase) : 1086 WorkerTask("Shenandoah Evacuate/Update Concurrent Strong Roots"), 1087 _phase(phase), 1088 _vm_roots(phase), 1089 _cld_roots(phase, ShenandoahHeap::heap()->workers()->active_workers(), false /*heap iteration*/), 1090 _nmethod_itr(ShenandoahCodeRoots::table()) { 1091 if (!ShenandoahHeap::heap()->unload_classes()) { 1092 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1093 _nmethod_itr.nmethods_do_begin(); 1094 } 1095 } 1096 1097 ~ShenandoahConcurrentRootsEvacUpdateTask() { 1098 if (!ShenandoahHeap::heap()->unload_classes()) { 1099 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1100 _nmethod_itr.nmethods_do_end(); 1101 } 1102 } 1103 1104 void work(uint worker_id) { 1105 ShenandoahConcurrentWorkerSession worker_session(worker_id); 1106 { 1107 ShenandoahEvacOOMScope oom; 1108 { 1109 // vm_roots and weak_roots are OopStorage backed roots, concurrent iteration 1110 // may race against OopStorage::release() calls. 1111 ShenandoahContextEvacuateUpdateRootsClosure cl; 1112 _vm_roots.oops_do<ShenandoahContextEvacuateUpdateRootsClosure>(&cl, worker_id); 1113 } 1114 1115 { 1116 ShenandoahEvacuateUpdateMetadataClosure cl; 1117 CLDToOopClosure clds(&cl, ClassLoaderData::_claim_strong); 1118 _cld_roots.cld_do(&clds, worker_id); 1119 } 1120 } 1121 1122 // Cannot setup ShenandoahEvacOOMScope here, due to potential deadlock with nmethod_entry_barrier. 1123 if (!ShenandoahHeap::heap()->unload_classes()) { 1124 ShenandoahWorkerTimingsTracker timer(_phase, ShenandoahPhaseTimings::CodeCacheRoots, worker_id); 1125 ShenandoahEvacUpdateCodeCacheClosure cl; 1126 _nmethod_itr.nmethods_do(&cl); 1127 } 1128 } 1129 }; 1130 1131 void ShenandoahConcurrentGC::op_strong_roots() { 1132 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 1133 assert(heap->is_concurrent_strong_root_in_progress(), "Checked by caller"); 1134 ShenandoahConcurrentRootsEvacUpdateTask task(ShenandoahPhaseTimings::conc_strong_roots); 1135 heap->workers()->run_task(&task); 1136 heap->set_concurrent_strong_root_in_progress(false); 1137 } 1138 1139 void ShenandoahConcurrentGC::op_cleanup_early() { 1140 ShenandoahHeap::heap()->free_set()->recycle_trash(); 1141 } 1142 1143 void ShenandoahConcurrentGC::op_evacuate() { 1144 ShenandoahHeap::heap()->evacuate_collection_set(true /*concurrent*/); 1145 } 1146 1147 void ShenandoahConcurrentGC::op_init_updaterefs() { 1148 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 1149 heap->set_evacuation_in_progress(false); 1150 heap->set_concurrent_weak_root_in_progress(false); 1151 heap->prepare_update_heap_references(true /*concurrent*/); 1152 heap->set_update_refs_in_progress(true); 1153 if (ShenandoahVerify) { 1154 heap->verifier()->verify_before_updaterefs(); 1155 } 1156 if (ShenandoahPacing) { 1157 heap->pacer()->setup_for_updaterefs(); 1158 } 1159 } 1160 1161 void ShenandoahConcurrentGC::op_updaterefs() { 1162 ShenandoahHeap::heap()->update_heap_references(true /*concurrent*/); 1163 } 1164 1165 class ShenandoahUpdateThreadClosure : public HandshakeClosure { 1166 private: 1167 ShenandoahUpdateRefsClosure _cl; 1168 public: 1169 ShenandoahUpdateThreadClosure(); 1170 void do_thread(Thread* thread); 1171 }; 1172 1173 ShenandoahUpdateThreadClosure::ShenandoahUpdateThreadClosure() : 1174 HandshakeClosure("Shenandoah Update Thread Roots") { 1175 } 1176 1177 void ShenandoahUpdateThreadClosure::do_thread(Thread* thread) { 1178 if (thread->is_Java_thread()) { 1179 JavaThread* jt = JavaThread::cast(thread); 1180 ResourceMark rm; 1181 jt->oops_do(&_cl, nullptr); 1182 } 1183 } 1184 1185 void ShenandoahConcurrentGC::op_update_thread_roots() { 1186 ShenandoahUpdateThreadClosure cl; 1187 Handshake::execute(&cl); 1188 } 1189 1190 void ShenandoahConcurrentGC::op_final_updaterefs() { 1191 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 1192 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint"); 1193 assert(!heap->_update_refs_iterator.has_next(), "Should have finished update references"); 1194 1195 heap->finish_concurrent_roots(); 1196 1197 // Clear cancelled GC, if set. On cancellation path, the block before would handle 1198 // everything. 1199 if (heap->cancelled_gc()) { 1200 heap->clear_cancelled_gc(true /* clear oom handler */); 1201 } 1202 1203 // Has to be done before cset is clear 1204 if (ShenandoahVerify) { 1205 heap->verifier()->verify_roots_in_to_space(); 1206 } 1207 1208 if (heap->mode()->is_generational() && heap->is_concurrent_old_mark_in_progress()) { 1209 // When the SATB barrier is left on to support concurrent old gen mark, it may pick up writes to 1210 // objects in the collection set. After those objects are evacuated, the pointers in the 1211 // SATB are no longer safe. Once we have finished update references, we are guaranteed that 1212 // no more writes to the collection set are possible. 1213 // 1214 // This will transfer any old pointers in _active_ regions from the SATB to the old gen 1215 // mark queues. All other pointers will be discarded. This would also discard any pointers 1216 // in old regions that were included in a mixed evacuation. We aren't using the SATB filter 1217 // methods here because we cannot control when they execute. If the SATB filter runs _after_ 1218 // a region has been recycled, we will not be able to detect the bad pointer. 1219 // 1220 // We are not concerned about skipping this step in abbreviated cycles because regions 1221 // with no live objects cannot have been written to and so cannot have entries in the SATB 1222 // buffers. 1223 heap->transfer_old_pointers_from_satb(); 1224 } 1225 1226 heap->update_heap_region_states(true /*concurrent*/); 1227 1228 heap->set_update_refs_in_progress(false); 1229 heap->set_has_forwarded_objects(false); 1230 1231 // Aging_cycle is only relevant during evacuation cycle for individual objects and during final mark for 1232 // entire regions. Both of these relevant operations occur before final update refs. 1233 heap->set_aging_cycle(false); 1234 1235 if (ShenandoahVerify) { 1236 heap->verifier()->verify_after_updaterefs(); 1237 } 1238 1239 if (VerifyAfterGC) { 1240 Universe::verify(); 1241 } 1242 1243 heap->rebuild_free_set(true /*concurrent*/); 1244 } 1245 1246 void ShenandoahConcurrentGC::op_final_roots() { 1247 1248 ShenandoahHeap *heap = ShenandoahHeap::heap(); 1249 heap->set_concurrent_weak_root_in_progress(false); 1250 heap->set_evacuation_in_progress(false); 1251 1252 if (heap->mode()->is_generational()) { 1253 // If the cycle was shortened for having enough immediate garbage, this could be 1254 // the last GC safepoint before concurrent marking of old resumes. We must be sure 1255 // that old mark threads don't see any pointers to garbage in the SATB buffers. 1256 if (heap->is_concurrent_old_mark_in_progress()) { 1257 heap->transfer_old_pointers_from_satb(); 1258 } 1259 1260 ShenandoahMarkingContext *ctx = heap->complete_marking_context(); 1261 for (size_t i = 0; i < heap->num_regions(); i++) { 1262 ShenandoahHeapRegion *r = heap->get_region(i); 1263 if (r->is_active() && r->is_young()) { 1264 HeapWord* tams = ctx->top_at_mark_start(r); 1265 HeapWord* top = r->top(); 1266 if (top > tams) { 1267 r->reset_age(); 1268 } else if (heap->is_aging_cycle()) { 1269 r->increment_age(); 1270 } 1271 } 1272 } 1273 } 1274 } 1275 1276 void ShenandoahConcurrentGC::op_cleanup_complete() { 1277 ShenandoahHeap::heap()->free_set()->recycle_trash(); 1278 } 1279 1280 bool ShenandoahConcurrentGC::check_cancellation_and_abort(ShenandoahDegenPoint point) { 1281 if (ShenandoahHeap::heap()->cancelled_gc()) { 1282 _degen_point = point; 1283 return true; 1284 } 1285 return false; 1286 } 1287 1288 const char* ShenandoahConcurrentGC::init_mark_event_message() const { 1289 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 1290 assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here"); 1291 if (heap->unload_classes()) { 1292 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Init Mark", " (unload classes)"); 1293 } else { 1294 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Init Mark", ""); 1295 } 1296 } 1297 1298 const char* ShenandoahConcurrentGC::final_mark_event_message() const { 1299 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 1300 assert(!heap->has_forwarded_objects() || heap->is_concurrent_old_mark_in_progress(), 1301 "Should not have forwarded objects during final mark, unless old gen concurrent mark is running"); 1302 1303 if (heap->unload_classes()) { 1304 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Final Mark", " (unload classes)"); 1305 } else { 1306 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Final Mark", ""); 1307 } 1308 } 1309 1310 const char* ShenandoahConcurrentGC::conc_mark_event_message() const { 1311 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 1312 assert(!heap->has_forwarded_objects() || heap->is_concurrent_old_mark_in_progress(), 1313 "Should not have forwarded objects concurrent mark, unless old gen concurrent mark is running"); 1314 if (heap->unload_classes()) { 1315 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent marking", " (unload classes)"); 1316 } else { 1317 SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent marking", ""); 1318 } 1319 }