1 /* 2 * Copyright (c) 2021, 2022, Red Hat, Inc. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 27 #include "gc/shared/barrierSetNMethod.hpp" 28 #include "gc/shared/collectorCounters.hpp" 29 #include "gc/shared/continuationGCSupport.inline.hpp" 30 #include "gc/shenandoah/shenandoahBreakpoint.hpp" 31 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp" 32 #include "gc/shenandoah/shenandoahConcurrentGC.hpp" 33 #include "gc/shenandoah/shenandoahFreeSet.hpp" 34 #include "gc/shenandoah/shenandoahGeneration.hpp" 35 #include "gc/shenandoah/shenandoahOldGeneration.hpp" 36 #include "gc/shenandoah/shenandoahYoungGeneration.hpp" 37 #include "gc/shenandoah/shenandoahLock.hpp" 38 #include "gc/shenandoah/shenandoahMark.inline.hpp" 39 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp" 40 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp" 41 #include "gc/shenandoah/shenandoahPhaseTimings.hpp" 42 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp" 43 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp" 44 #include "gc/shenandoah/shenandoahStackWatermark.hpp" 45 #include "gc/shenandoah/shenandoahUtils.hpp" 46 #include "gc/shenandoah/shenandoahVerifier.hpp" 47 #include "gc/shenandoah/shenandoahVMOperations.hpp" 48 #include "gc/shenandoah/shenandoahWorkGroup.hpp" 49 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp" 50 #include "memory/allocation.hpp" 51 #include "prims/jvmtiTagMap.hpp" 52 #include "runtime/vmThread.hpp" 53 #include "utilities/events.hpp" 54 55 // Breakpoint support 56 class ShenandoahBreakpointGCScope : public StackObj { 57 private: 58 const GCCause::Cause _cause; 59 public: 60 ShenandoahBreakpointGCScope(GCCause::Cause cause) : _cause(cause) { 61 if (cause == GCCause::_wb_breakpoint) { 62 ShenandoahBreakpoint::start_gc(); 63 ShenandoahBreakpoint::at_before_gc(); 64 } 65 } 66 67 ~ShenandoahBreakpointGCScope() { 68 if (_cause == GCCause::_wb_breakpoint) { 69 ShenandoahBreakpoint::at_after_gc(); 70 } 71 } 72 }; 73 74 class ShenandoahBreakpointMarkScope : public StackObj { 75 private: 76 const GCCause::Cause _cause; 77 public: 78 ShenandoahBreakpointMarkScope(GCCause::Cause cause) : _cause(cause) { 79 if (_cause == GCCause::_wb_breakpoint) { 80 ShenandoahBreakpoint::at_after_marking_started(); 81 } 82 } 83 84 ~ShenandoahBreakpointMarkScope() { 85 if (_cause == GCCause::_wb_breakpoint) { 86 ShenandoahBreakpoint::at_before_marking_completed(); 87 } 88 } 89 }; 90 91 ShenandoahConcurrentGC::ShenandoahConcurrentGC(ShenandoahGeneration* generation, bool do_old_gc_bootstrap) : 92 _mark(generation), 93 _degen_point(ShenandoahDegenPoint::_degenerated_unset), 94 _abbreviated(false), 95 _do_old_gc_bootstrap(do_old_gc_bootstrap), 96 _generation(generation) { 97 } 98 99 ShenandoahGC::ShenandoahDegenPoint ShenandoahConcurrentGC::degen_point() const { 100 return _degen_point; 101 } 102 103 bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) { 104 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 105 heap->start_conc_gc(); 106 107 ShenandoahBreakpointGCScope breakpoint_gc_scope(cause); 108 109 // Reset for upcoming marking 110 entry_reset(); 111 112 // Start initial mark under STW 113 vmop_entry_init_mark(); 114 115 { 116 ShenandoahBreakpointMarkScope breakpoint_mark_scope(cause); 117 118 // Reset task queue stats here, rather than in mark_concurrent_roots 119 // because remembered set scan will `push` oops into the queues and 120 // resetting after this happens will lose those counts. 121 TASKQUEUE_STATS_ONLY(_mark.task_queues()->reset_taskqueue_stats()); 122 123 // Concurrent remembered set scanning 124 entry_scan_remembered_set(); 125 // When RS scanning yields, we will need a check_cancellation_and_abort() 126 // degeneration point here. 127 128 // Concurrent mark roots 129 entry_mark_roots(); 130 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_roots)) return false; 131 132 // Continue concurrent mark 133 entry_mark(); 134 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_mark)) return false; 135 } 136 137 // Complete marking under STW, and start evacuation 138 vmop_entry_final_mark(); 139 140 // If GC was cancelled before final mark, then the safepoint operation will do nothing 141 // and the concurrent mark will still be in progress. In this case it is safe to resume 142 // the degenerated cycle from the marking phase. On the other hand, if the GC is cancelled 143 // after final mark (but before this check), then the final mark safepoint operation 144 // will have finished the mark (setting concurrent mark in progress to false). Final mark 145 // will also have setup state (in concurrent stack processing) that will not be safe to 146 // resume from the marking phase in the degenerated cycle. That is, if the cancellation 147 // occurred after final mark, we must resume the degenerated cycle after the marking phase. 148 if (_generation->is_concurrent_mark_in_progress() && check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_mark)) { 149 assert(!heap->is_concurrent_weak_root_in_progress(), "Weak roots should not be in progress when concurrent mark is in progress"); 150 return false; 151 } 152 153 // Concurrent stack processing 154 if (heap->is_evacuation_in_progress()) { 155 entry_thread_roots(); 156 } 157 158 // Process weak roots that might still point to regions that would be broken by cleanup 159 if (heap->is_concurrent_weak_root_in_progress()) { 160 entry_weak_refs(); 161 entry_weak_roots(); 162 } 163 164 // Final mark might have reclaimed some immediate garbage, kick cleanup to reclaim 165 // the space. This would be the last action if there is nothing to evacuate. Note that 166 // we will not age young-gen objects in the case that we skip evacuation. 167 entry_cleanup_early(); 168 169 { 170 ShenandoahHeapLocker locker(heap->lock()); 171 heap->free_set()->log_status(); 172 } 173 174 // Perform concurrent class unloading 175 if (heap->unload_classes() && 176 heap->is_concurrent_weak_root_in_progress()) { 177 entry_class_unloading(); 178 } 179 180 // Processing strong roots 181 // This may be skipped if there is nothing to update/evacuate. 182 // If so, strong_root_in_progress would be unset. 183 if (heap->is_concurrent_strong_root_in_progress()) { 184 entry_strong_roots(); 185 } 186 187 // Global marking has completed. We need to fill in any unmarked objects in the old generation 188 // so that subsequent remembered set scans will not walk pointers into reclaimed memory. 189 if (!heap->cancelled_gc() && heap->mode()->is_generational() && _generation->generation_mode() == GLOBAL) { 190 entry_global_coalesce_and_fill(); 191 } 192 193 // Continue the cycle with evacuation and optional update-refs. 194 // This may be skipped if there is nothing to evacuate. 195 // If so, evac_in_progress would be unset by collection set preparation code. 196 if (heap->is_evacuation_in_progress()) { 197 // Concurrently evacuate 198 entry_evacuate(); 199 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_evac)) return false; 200 201 // Perform update-refs phase. 202 vmop_entry_init_updaterefs(); 203 entry_updaterefs(); 204 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_updaterefs)) return false; 205 206 // Concurrent update thread roots 207 entry_update_thread_roots(); 208 if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_updaterefs)) return false; 209 210 vmop_entry_final_updaterefs(); 211 212 // Update references freed up collection set, kick the cleanup to reclaim the space. 213 entry_cleanup_complete(); 214 } else { 215 // We chose not to evacuate because we found sufficient immediate garbage. 216 vmop_entry_final_roots(heap->is_aging_cycle()); 217 _abbreviated = true; 218 } 219 220 if (heap->mode()->is_generational()) { 221 size_t old_available, young_available; 222 { 223 ShenandoahYoungGeneration* young_gen = heap->young_generation(); 224 ShenandoahGeneration* old_gen = heap->old_generation(); 225 ShenandoahHeapLocker locker(heap->lock()); 226 227 size_t old_usage_before_evac = heap->capture_old_usage(0); 228 size_t old_usage_now = old_gen->used(); 229 size_t promoted_bytes = old_usage_now - old_usage_before_evac; 230 heap->set_previous_promotion(promoted_bytes); 231 232 young_gen->unadjust_available(); 233 old_gen->unadjust_available(); 234 // No need to old_gen->increase_used(). 235 // That was done when plabs were allocated, accounting for both old evacs and promotions. 236 237 young_available = young_gen->adjusted_available(); 238 old_available = old_gen->adjusted_available(); 239 240 heap->set_alloc_supplement_reserve(0); 241 heap->set_young_evac_reserve(0); 242 heap->set_old_evac_reserve(0); 243 heap->reset_old_evac_expended(); 244 heap->set_promoted_reserve(0); 245 } 246 } 247 return true; 248 } 249 250 void ShenandoahConcurrentGC::vmop_entry_init_mark() { 251 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 252 TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters()); 253 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_mark_gross); 254 255 heap->try_inject_alloc_failure(); 256 VM_ShenandoahInitMark op(this); 257 VMThread::execute(&op); // jump to entry_init_mark() under safepoint 258 } 259 260 void ShenandoahConcurrentGC::vmop_entry_final_mark() { 261 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 262 TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters()); 263 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_mark_gross); 264 265 heap->try_inject_alloc_failure(); 266 VM_ShenandoahFinalMarkStartEvac op(this); 267 VMThread::execute(&op); // jump to entry_final_mark under safepoint 268 } 269 270 void ShenandoahConcurrentGC::vmop_entry_init_updaterefs() { 271 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 272 TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters()); 273 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_update_refs_gross); 274 275 heap->try_inject_alloc_failure(); 276 VM_ShenandoahInitUpdateRefs op(this); 277 VMThread::execute(&op); 278 } 279 280 void ShenandoahConcurrentGC::vmop_entry_final_updaterefs() { 281 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 282 TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters()); 283 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_update_refs_gross); 284 285 heap->try_inject_alloc_failure(); 286 VM_ShenandoahFinalUpdateRefs op(this); 287 VMThread::execute(&op); 288 } 289 290 void ShenandoahConcurrentGC::vmop_entry_final_roots(bool increment_region_ages) { 291 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 292 TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters()); 293 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_roots_gross); 294 295 // This phase does not use workers, no need for setup 296 heap->try_inject_alloc_failure(); 297 VM_ShenandoahFinalRoots op(this, increment_region_ages); 298 VMThread::execute(&op); 299 } 300 301 void ShenandoahConcurrentGC::entry_init_mark() { 302 char msg[1024]; 303 init_mark_event_message(msg, sizeof(msg)); 304 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::init_mark); 305 EventMark em("%s", msg); 306 307 ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(), 308 ShenandoahWorkerPolicy::calc_workers_for_init_marking(), 309 "init marking"); 310 311 op_init_mark(); 312 } 313 314 void ShenandoahConcurrentGC::entry_final_mark() { 315 char msg[1024]; 316 final_mark_event_message(msg, sizeof(msg)); 317 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_mark); 318 EventMark em("%s", msg); 319 320 ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(), 321 ShenandoahWorkerPolicy::calc_workers_for_final_marking(), 322 "final marking"); 323 324 op_final_mark(); 325 } 326 327 void ShenandoahConcurrentGC::entry_init_updaterefs() { 328 static const char* msg = "Pause Init Update Refs"; 329 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::init_update_refs); 330 EventMark em("%s", msg); 331 332 // No workers used in this phase, no setup required 333 op_init_updaterefs(); 334 } 335 336 void ShenandoahConcurrentGC::entry_final_updaterefs() { 337 static const char* msg = "Pause Final Update Refs"; 338 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_update_refs); 339 EventMark em("%s", msg); 340 341 ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(), 342 ShenandoahWorkerPolicy::calc_workers_for_final_update_ref(), 343 "final reference update"); 344 345 op_final_updaterefs(); 346 } 347 348 void ShenandoahConcurrentGC::entry_final_roots() { 349 static const char* msg = "Pause Final Roots"; 350 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_roots); 351 EventMark em("%s", msg); 352 353 op_final_roots(); 354 } 355 356 void ShenandoahConcurrentGC::entry_reset() { 357 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 358 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 359 static const char* msg = "Concurrent reset"; 360 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_reset); 361 EventMark em("%s", msg); 362 363 ShenandoahWorkerScope scope(heap->workers(), 364 ShenandoahWorkerPolicy::calc_workers_for_conc_reset(), 365 "concurrent reset"); 366 367 heap->try_inject_alloc_failure(); 368 op_reset(); 369 } 370 371 void ShenandoahConcurrentGC::entry_scan_remembered_set() { 372 if (_generation->generation_mode() == YOUNG) { 373 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 374 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 375 const char* msg = "Concurrent remembered set scanning"; 376 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::init_scan_rset); 377 EventMark em("%s", msg); 378 379 ShenandoahWorkerScope scope(heap->workers(), 380 ShenandoahWorkerPolicy::calc_workers_for_rs_scanning(), 381 msg); 382 383 heap->try_inject_alloc_failure(); 384 _generation->scan_remembered_set(true /* is_concurrent */); 385 } 386 } 387 388 void ShenandoahConcurrentGC::entry_mark_roots() { 389 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 390 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 391 const char* msg = "Concurrent marking roots"; 392 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_mark_roots); 393 EventMark em("%s", msg); 394 395 ShenandoahWorkerScope scope(heap->workers(), 396 ShenandoahWorkerPolicy::calc_workers_for_conc_marking(), 397 "concurrent marking roots"); 398 399 heap->try_inject_alloc_failure(); 400 op_mark_roots(); 401 } 402 403 void ShenandoahConcurrentGC::entry_mark() { 404 char msg[1024]; 405 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 406 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 407 conc_mark_event_message(msg, sizeof(msg)); 408 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_mark); 409 EventMark em("%s", msg); 410 411 ShenandoahWorkerScope scope(heap->workers(), 412 ShenandoahWorkerPolicy::calc_workers_for_conc_marking(), 413 "concurrent marking"); 414 415 heap->try_inject_alloc_failure(); 416 op_mark(); 417 } 418 419 void ShenandoahConcurrentGC::entry_thread_roots() { 420 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 421 static const char* msg = "Concurrent thread roots"; 422 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_thread_roots); 423 EventMark em("%s", msg); 424 425 ShenandoahWorkerScope scope(heap->workers(), 426 ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(), 427 msg); 428 429 heap->try_inject_alloc_failure(); 430 op_thread_roots(); 431 } 432 433 void ShenandoahConcurrentGC::entry_weak_refs() { 434 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 435 static const char* msg = "Concurrent weak references"; 436 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_weak_refs); 437 EventMark em("%s", msg); 438 439 ShenandoahWorkerScope scope(heap->workers(), 440 ShenandoahWorkerPolicy::calc_workers_for_conc_refs_processing(), 441 "concurrent weak references"); 442 443 heap->try_inject_alloc_failure(); 444 op_weak_refs(); 445 } 446 447 void ShenandoahConcurrentGC::entry_weak_roots() { 448 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 449 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 450 static const char* msg = "Concurrent weak roots"; 451 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_weak_roots); 452 EventMark em("%s", msg); 453 454 ShenandoahWorkerScope scope(heap->workers(), 455 ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(), 456 "concurrent weak root"); 457 458 heap->try_inject_alloc_failure(); 459 op_weak_roots(); 460 } 461 462 void ShenandoahConcurrentGC::entry_class_unloading() { 463 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 464 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 465 static const char* msg = "Concurrent class unloading"; 466 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_class_unload); 467 EventMark em("%s", msg); 468 469 ShenandoahWorkerScope scope(heap->workers(), 470 ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(), 471 "concurrent class unloading"); 472 473 heap->try_inject_alloc_failure(); 474 op_class_unloading(); 475 } 476 477 void ShenandoahConcurrentGC::entry_strong_roots() { 478 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 479 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 480 static const char* msg = "Concurrent strong roots"; 481 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_strong_roots); 482 EventMark em("%s", msg); 483 484 ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_strong_roots); 485 486 ShenandoahWorkerScope scope(heap->workers(), 487 ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(), 488 "concurrent strong root"); 489 490 heap->try_inject_alloc_failure(); 491 op_strong_roots(); 492 } 493 494 void ShenandoahConcurrentGC::entry_cleanup_early() { 495 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 496 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 497 static const char* msg = "Concurrent cleanup"; 498 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_cleanup_early, true /* log_heap_usage */); 499 EventMark em("%s", msg); 500 501 // This phase does not use workers, no need for setup 502 heap->try_inject_alloc_failure(); 503 op_cleanup_early(); 504 } 505 506 void ShenandoahConcurrentGC::entry_evacuate() { 507 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 508 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 509 510 static const char* msg = "Concurrent evacuation"; 511 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_evac); 512 EventMark em("%s", msg); 513 514 ShenandoahWorkerScope scope(heap->workers(), 515 ShenandoahWorkerPolicy::calc_workers_for_conc_evac(), 516 "concurrent evacuation"); 517 518 heap->try_inject_alloc_failure(); 519 op_evacuate(); 520 } 521 522 void ShenandoahConcurrentGC::entry_update_thread_roots() { 523 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 524 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 525 526 static const char* msg = "Concurrent update thread roots"; 527 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_update_thread_roots); 528 EventMark em("%s", msg); 529 530 // No workers used in this phase, no setup required 531 heap->try_inject_alloc_failure(); 532 op_update_thread_roots(); 533 } 534 535 void ShenandoahConcurrentGC::entry_updaterefs() { 536 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 537 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 538 static const char* msg = "Concurrent update references"; 539 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_update_refs); 540 EventMark em("%s", msg); 541 542 ShenandoahWorkerScope scope(heap->workers(), 543 ShenandoahWorkerPolicy::calc_workers_for_conc_update_ref(), 544 "concurrent reference update"); 545 546 heap->try_inject_alloc_failure(); 547 op_updaterefs(); 548 } 549 550 void ShenandoahConcurrentGC::entry_cleanup_complete() { 551 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 552 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 553 static const char* msg = "Concurrent cleanup"; 554 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_cleanup_complete, true /* log_heap_usage */); 555 EventMark em("%s", msg); 556 557 // This phase does not use workers, no need for setup 558 heap->try_inject_alloc_failure(); 559 op_cleanup_complete(); 560 } 561 562 void ShenandoahConcurrentGC::entry_global_coalesce_and_fill() { 563 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 564 565 const char* msg = "Coalescing and filling old regions in global collect"; 566 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::coalesce_and_fill); 567 568 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 569 EventMark em("%s", msg); 570 ShenandoahWorkerScope scope(heap->workers(), 571 ShenandoahWorkerPolicy::calc_workers_for_conc_marking(), 572 "concurrent coalesce and fill"); 573 574 op_global_coalesce_and_fill(); 575 } 576 577 void ShenandoahConcurrentGC::op_reset() { 578 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 579 if (ShenandoahPacing) { 580 heap->pacer()->setup_for_reset(); 581 } 582 _generation->prepare_gc(); 583 } 584 585 class ShenandoahInitMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure { 586 private: 587 ShenandoahMarkingContext* const _ctx; 588 public: 589 ShenandoahInitMarkUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {} 590 591 void heap_region_do(ShenandoahHeapRegion* r) { 592 assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index()); 593 if (r->is_active()) { 594 // Check if region needs updating its TAMS. We have updated it already during concurrent 595 // reset, so it is very likely we don't need to do another write here. Since most regions 596 // are not "active", this path is relatively rare. 597 if (_ctx->top_at_mark_start(r) != r->top()) { 598 _ctx->capture_top_at_mark_start(r); 599 } 600 } else { 601 assert(_ctx->top_at_mark_start(r) == r->top(), 602 "Region " SIZE_FORMAT " should already have correct TAMS", r->index()); 603 } 604 } 605 606 bool is_thread_safe() { return true; } 607 }; 608 609 void ShenandoahConcurrentGC::start_mark() { 610 _mark.start_mark(); 611 } 612 613 void ShenandoahConcurrentGC::op_init_mark() { 614 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 615 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint"); 616 assert(Thread::current()->is_VM_thread(), "can only do this in VMThread"); 617 618 assert(_generation->is_bitmap_clear(), "need clear marking bitmap"); 619 assert(!_generation->is_mark_complete(), "should not be complete"); 620 assert(!heap->has_forwarded_objects(), "No forwarded objects on this path"); 621 622 623 if (heap->mode()->is_generational()) { 624 if (_generation->generation_mode() == YOUNG || (_generation->generation_mode() == GLOBAL && ShenandoahVerify)) { 625 // The current implementation of swap_remembered_set() copies the write-card-table 626 // to the read-card-table. The remembered sets are also swapped for GLOBAL collections 627 // so that the verifier works with the correct copy of the card table when verifying. 628 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_swap_rset); 629 _generation->swap_remembered_set(); 630 } 631 632 if (_generation->generation_mode() == GLOBAL) { 633 heap->cancel_old_gc(); 634 } else if (heap->is_concurrent_old_mark_in_progress()) { 635 // Purge the SATB buffers, transferring any valid, old pointers to the 636 // old generation mark queue. Any pointers in a young region will be 637 // abandoned. 638 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_transfer_satb); 639 heap->transfer_old_pointers_from_satb(); 640 } 641 } 642 643 if (ShenandoahVerify) { 644 heap->verifier()->verify_before_concmark(); 645 } 646 647 if (VerifyBeforeGC) { 648 Universe::verify(); 649 } 650 651 _generation->set_concurrent_mark_in_progress(true); 652 653 start_mark(); 654 655 if (_do_old_gc_bootstrap) { 656 // Update region state for both young and old regions 657 // TODO: We should be able to pull this out of the safepoint for the bootstrap 658 // cycle. The top of an old region will only move when a GC cycle evacuates 659 // objects into it. When we start an old cycle, we know that nothing can touch 660 // the top of old regions. 661 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states); 662 ShenandoahInitMarkUpdateRegionStateClosure cl; 663 heap->parallel_heap_region_iterate(&cl); 664 } else { 665 // Update region state for only young regions 666 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states); 667 ShenandoahInitMarkUpdateRegionStateClosure cl; 668 _generation->parallel_heap_region_iterate(&cl); 669 } 670 671 // Weak reference processing 672 ShenandoahReferenceProcessor* rp = _generation->ref_processor(); 673 rp->reset_thread_locals(); 674 rp->set_soft_reference_policy(heap->soft_ref_policy()->should_clear_all_soft_refs()); 675 676 // Make above changes visible to worker threads 677 OrderAccess::fence(); 678 679 // Arm nmethods for concurrent marking. When a nmethod is about to be executed, 680 // we need to make sure that all its metadata are marked. alternative is to remark 681 // thread roots at final mark pause, but it can be potential latency killer. 682 if (heap->unload_classes()) { 683 ShenandoahCodeRoots::arm_nmethods(); 684 } 685 686 ShenandoahStackWatermark::change_epoch_id(); 687 if (ShenandoahPacing) { 688 heap->pacer()->setup_for_mark(); 689 } 690 } 691 692 void ShenandoahConcurrentGC::op_mark_roots() { 693 _mark.mark_concurrent_roots(); 694 } 695 696 void ShenandoahConcurrentGC::op_mark() { 697 _mark.concurrent_mark(); 698 } 699 700 void ShenandoahConcurrentGC::op_final_mark() { 701 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 702 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint"); 703 assert(!heap->has_forwarded_objects(), "No forwarded objects on this path"); 704 705 if (ShenandoahVerify) { 706 heap->verifier()->verify_roots_no_forwarded(); 707 } 708 709 if (!heap->cancelled_gc()) { 710 _mark.finish_mark(); 711 assert(!heap->cancelled_gc(), "STW mark cannot OOM"); 712 713 // Notify JVMTI that the tagmap table will need cleaning. 714 JvmtiTagMap::set_needs_cleaning(); 715 716 // The collection set is chosen by prepare_regions_and_collection_set(). 717 // 718 // TODO: Under severe memory overload conditions that can be checked here, we may want to limit 719 // the inclusion of old-gen candidates within the collection set. This would allow us to prioritize efforts on 720 // evacuating young-gen, This remediation is most appropriate when old-gen availability is very high (so there 721 // are negligible negative impacts from delaying completion of old-gen evacuation) and when young-gen collections 722 // are "under duress" (as signalled by very low availability of memory within young-gen, indicating that/ young-gen 723 // collections are not triggering frequently enough). 724 _generation->prepare_regions_and_collection_set(true /*concurrent*/); 725 726 // Upon return from prepare_regions_and_collection_set(), certain parameters have been established to govern the 727 // evacuation efforts that are about to begin. In particular: 728 // 729 // heap->get_promoted_reserve() represents the amount of memory within old-gen's available memory that has 730 // been set aside to hold objects promoted from young-gen memory. This represents an estimated percentage 731 // of the live young-gen memory within the collection set. If there is more data ready to be promoted than 732 // can fit within this reserve, the promotion of some objects will be deferred until a subsequent evacuation 733 // pass. 734 // 735 // heap->get_old_evac_reserve() represents the amount of memory within old-gen's available memory that has been 736 // set aside to hold objects evacuated from the old-gen collection set. 737 // 738 // heap->get_young_evac_reserve() represents the amount of memory within young-gen's available memory that has 739 // been set aside to hold objects evacuated from the young-gen collection set. Conservatively, this value 740 // equals the entire amount of live young-gen memory within the collection set, even though some of this memory 741 // will likely be promoted. 742 // 743 // heap->get_alloc_supplement_reserve() represents the amount of old-gen memory that can be allocated during evacuation 744 // and update-refs phases of gc. The young evacuation reserve has already been removed from this quantity. 745 746 // Has to be done after cset selection 747 heap->prepare_concurrent_roots(); 748 749 if (!heap->collection_set()->is_empty()) { 750 if (ShenandoahVerify) { 751 heap->verifier()->verify_before_evacuation(); 752 } 753 754 heap->set_evacuation_in_progress(true); 755 // From here on, we need to update references. 756 heap->set_has_forwarded_objects(true); 757 758 // Verify before arming for concurrent processing. 759 // Otherwise, verification can trigger stack processing. 760 if (ShenandoahVerify) { 761 heap->verifier()->verify_during_evacuation(); 762 } 763 764 // Arm nmethods/stack for concurrent processing 765 ShenandoahCodeRoots::arm_nmethods(); 766 ShenandoahStackWatermark::change_epoch_id(); 767 768 if (heap->mode()->is_generational()) { 769 // Calculate the temporary evacuation allowance supplement to young-gen memory capacity (for allocations 770 // and young-gen evacuations). 771 size_t young_available = heap->young_generation()->adjust_available(heap->get_alloc_supplement_reserve()); 772 // old_available is memory that can hold promotions and evacuations. Subtract out the memory that is being 773 // loaned for young-gen allocations or evacuations. 774 size_t old_available = heap->old_generation()->adjust_available(-heap->get_alloc_supplement_reserve()); 775 776 log_info(gc, ergo)("After generational memory budget adjustments, old available: " SIZE_FORMAT 777 "%s, young_available: " SIZE_FORMAT "%s", 778 byte_size_in_proper_unit(old_available), proper_unit_for_byte_size(old_available), 779 byte_size_in_proper_unit(young_available), proper_unit_for_byte_size(young_available)); 780 } 781 782 if (ShenandoahPacing) { 783 heap->pacer()->setup_for_evac(); 784 } 785 } else { 786 if (ShenandoahVerify) { 787 heap->verifier()->verify_after_concmark(); 788 } 789 790 if (VerifyAfterGC) { 791 Universe::verify(); 792 } 793 } 794 } 795 } 796 797 class ShenandoahConcurrentEvacThreadClosure : public ThreadClosure { 798 private: 799 OopClosure* const _oops; 800 801 public: 802 ShenandoahConcurrentEvacThreadClosure(OopClosure* oops); 803 void do_thread(Thread* thread); 804 }; 805 806 ShenandoahConcurrentEvacThreadClosure::ShenandoahConcurrentEvacThreadClosure(OopClosure* oops) : 807 _oops(oops) { 808 } 809 810 void ShenandoahConcurrentEvacThreadClosure::do_thread(Thread* thread) { 811 JavaThread* const jt = JavaThread::cast(thread); 812 StackWatermarkSet::finish_processing(jt, _oops, StackWatermarkKind::gc); 813 } 814 815 class ShenandoahConcurrentEvacUpdateThreadTask : public WorkerTask { 816 private: 817 ShenandoahJavaThreadsIterator _java_threads; 818 819 public: 820 ShenandoahConcurrentEvacUpdateThreadTask(uint n_workers) : 821 WorkerTask("Shenandoah Evacuate/Update Concurrent Thread Roots"), 822 _java_threads(ShenandoahPhaseTimings::conc_thread_roots, n_workers) { 823 } 824 825 void work(uint worker_id) { 826 // ShenandoahEvacOOMScope has to be setup by ShenandoahContextEvacuateUpdateRootsClosure. 827 // Otherwise, may deadlock with watermark lock 828 ShenandoahContextEvacuateUpdateRootsClosure oops_cl; 829 ShenandoahConcurrentEvacThreadClosure thr_cl(&oops_cl); 830 _java_threads.threads_do(&thr_cl, worker_id); 831 } 832 }; 833 834 void ShenandoahConcurrentGC::op_thread_roots() { 835 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 836 assert(heap->is_evacuation_in_progress(), "Checked by caller"); 837 ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_thread_roots); 838 ShenandoahConcurrentEvacUpdateThreadTask task(heap->workers()->active_workers()); 839 heap->workers()->run_task(&task); 840 } 841 842 void ShenandoahConcurrentGC::op_weak_refs() { 843 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 844 assert(heap->is_concurrent_weak_root_in_progress(), "Only during this phase"); 845 // Concurrent weak refs processing 846 ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_refs); 847 if (heap->gc_cause() == GCCause::_wb_breakpoint) { 848 ShenandoahBreakpoint::at_after_reference_processing_started(); 849 } 850 _generation->ref_processor()->process_references(ShenandoahPhaseTimings::conc_weak_refs, heap->workers(), true /* concurrent */); 851 } 852 853 class ShenandoahEvacUpdateCleanupOopStorageRootsClosure : public BasicOopIterateClosure { 854 private: 855 ShenandoahHeap* const _heap; 856 ShenandoahMarkingContext* const _mark_context; 857 bool _evac_in_progress; 858 Thread* const _thread; 859 860 public: 861 ShenandoahEvacUpdateCleanupOopStorageRootsClosure(); 862 void do_oop(oop* p); 863 void do_oop(narrowOop* p); 864 }; 865 866 ShenandoahEvacUpdateCleanupOopStorageRootsClosure::ShenandoahEvacUpdateCleanupOopStorageRootsClosure() : 867 _heap(ShenandoahHeap::heap()), 868 _mark_context(ShenandoahHeap::heap()->marking_context()), 869 _evac_in_progress(ShenandoahHeap::heap()->is_evacuation_in_progress()), 870 _thread(Thread::current()) { 871 } 872 873 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(oop* p) { 874 const oop obj = RawAccess<>::oop_load(p); 875 if (!CompressedOops::is_null(obj)) { 876 if (!_mark_context->is_marked(obj)) { 877 if (_heap->is_in_active_generation(obj)) { 878 // TODO: This worries me. Here we are asserting that an unmarked from-space object is 'correct'. 879 // Normally, I would call this a bogus assert, but there seems to be a legitimate use-case for 880 // accessing from-space objects during class unloading. However, the from-space object may have 881 // been "filled". We've made no effort to prevent old generation classes being unloaded by young 882 // gen (and vice-versa). 883 shenandoah_assert_correct(p, obj); 884 ShenandoahHeap::atomic_clear_oop(p, obj); 885 } 886 } else if (_evac_in_progress && _heap->in_collection_set(obj)) { 887 oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj); 888 if (resolved == obj) { 889 resolved = _heap->evacuate_object(obj, _thread); 890 } 891 ShenandoahHeap::atomic_update_oop(resolved, p, obj); 892 assert(_heap->cancelled_gc() || 893 _mark_context->is_marked(resolved) && !_heap->in_collection_set(resolved), 894 "Sanity"); 895 } 896 } 897 } 898 899 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(narrowOop* p) { 900 ShouldNotReachHere(); 901 } 902 903 class ShenandoahIsCLDAliveClosure : public CLDClosure { 904 public: 905 void do_cld(ClassLoaderData* cld) { 906 cld->is_alive(); 907 } 908 }; 909 910 class ShenandoahIsNMethodAliveClosure: public NMethodClosure { 911 public: 912 void do_nmethod(nmethod* n) { 913 n->is_unloading(); 914 } 915 }; 916 917 // This task not only evacuates/updates marked weak roots, but also "NULL" 918 // dead weak roots. 919 class ShenandoahConcurrentWeakRootsEvacUpdateTask : public WorkerTask { 920 private: 921 ShenandoahVMWeakRoots<true /*concurrent*/> _vm_roots; 922 923 // Roots related to concurrent class unloading 924 ShenandoahClassLoaderDataRoots<true /* concurrent */> 925 _cld_roots; 926 ShenandoahConcurrentNMethodIterator _nmethod_itr; 927 ShenandoahPhaseTimings::Phase _phase; 928 929 public: 930 ShenandoahConcurrentWeakRootsEvacUpdateTask(ShenandoahPhaseTimings::Phase phase) : 931 WorkerTask("Shenandoah Evacuate/Update Concurrent Weak Roots"), 932 _vm_roots(phase), 933 _cld_roots(phase, ShenandoahHeap::heap()->workers()->active_workers(), false /*heap iteration*/), 934 _nmethod_itr(ShenandoahCodeRoots::table()), 935 _phase(phase) { 936 if (ShenandoahHeap::heap()->unload_classes()) { 937 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 938 _nmethod_itr.nmethods_do_begin(); 939 } 940 } 941 942 ~ShenandoahConcurrentWeakRootsEvacUpdateTask() { 943 if (ShenandoahHeap::heap()->unload_classes()) { 944 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 945 _nmethod_itr.nmethods_do_end(); 946 } 947 // Notify runtime data structures of potentially dead oops 948 _vm_roots.report_num_dead(); 949 } 950 951 void work(uint worker_id) { 952 ShenandoahConcurrentWorkerSession worker_session(worker_id); 953 { 954 ShenandoahEvacOOMScope oom; 955 // jni_roots and weak_roots are OopStorage backed roots, concurrent iteration 956 // may race against OopStorage::release() calls. 957 ShenandoahEvacUpdateCleanupOopStorageRootsClosure cl; 958 _vm_roots.oops_do(&cl, worker_id); 959 } 960 961 // If we are going to perform concurrent class unloading later on, we need to 962 // cleanup the weak oops in CLD and determinate nmethod's unloading state, so that we 963 // can cleanup immediate garbage sooner. 964 if (ShenandoahHeap::heap()->unload_classes()) { 965 // Applies ShenandoahIsCLDAlive closure to CLDs, native barrier will either NULL the 966 // CLD's holder or evacuate it. 967 { 968 ShenandoahIsCLDAliveClosure is_cld_alive; 969 _cld_roots.cld_do(&is_cld_alive, worker_id); 970 } 971 972 // Applies ShenandoahIsNMethodAliveClosure to registered nmethods. 973 // The closure calls nmethod->is_unloading(). The is_unloading 974 // state is cached, therefore, during concurrent class unloading phase, 975 // we will not touch the metadata of unloading nmethods 976 { 977 ShenandoahWorkerTimingsTracker timer(_phase, ShenandoahPhaseTimings::CodeCacheRoots, worker_id); 978 ShenandoahIsNMethodAliveClosure is_nmethod_alive; 979 _nmethod_itr.nmethods_do(&is_nmethod_alive); 980 } 981 } 982 } 983 }; 984 985 void ShenandoahConcurrentGC::op_weak_roots() { 986 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 987 assert(heap->is_concurrent_weak_root_in_progress(), "Only during this phase"); 988 // Concurrent weak root processing 989 { 990 ShenandoahTimingsTracker t(ShenandoahPhaseTimings::conc_weak_roots_work); 991 ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_roots_work); 992 ShenandoahConcurrentWeakRootsEvacUpdateTask task(ShenandoahPhaseTimings::conc_weak_roots_work); 993 heap->workers()->run_task(&task); 994 } 995 996 // Perform handshake to flush out dead oops 997 { 998 ShenandoahTimingsTracker t(ShenandoahPhaseTimings::conc_weak_roots_rendezvous); 999 heap->rendezvous_threads(); 1000 } 1001 } 1002 1003 void ShenandoahConcurrentGC::op_class_unloading() { 1004 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 1005 assert (heap->is_concurrent_weak_root_in_progress() && 1006 heap->unload_classes(), 1007 "Checked by caller"); 1008 heap->do_class_unloading(); 1009 } 1010 1011 class ShenandoahEvacUpdateCodeCacheClosure : public NMethodClosure { 1012 private: 1013 BarrierSetNMethod* const _bs; 1014 ShenandoahEvacuateUpdateMetadataClosure _cl; 1015 1016 public: 1017 ShenandoahEvacUpdateCodeCacheClosure() : 1018 _bs(BarrierSet::barrier_set()->barrier_set_nmethod()), 1019 _cl() { 1020 } 1021 1022 void do_nmethod(nmethod* n) { 1023 ShenandoahNMethod* data = ShenandoahNMethod::gc_data(n); 1024 ShenandoahReentrantLocker locker(data->lock()); 1025 // Setup EvacOOM scope below reentrant lock to avoid deadlock with 1026 // nmethod_entry_barrier 1027 ShenandoahEvacOOMScope oom; 1028 data->oops_do(&_cl, true/*fix relocation*/); 1029 _bs->disarm(n); 1030 } 1031 }; 1032 1033 class ShenandoahConcurrentRootsEvacUpdateTask : public WorkerTask { 1034 private: 1035 ShenandoahPhaseTimings::Phase _phase; 1036 ShenandoahVMRoots<true /*concurrent*/> _vm_roots; 1037 ShenandoahClassLoaderDataRoots<true /*concurrent*/> 1038 _cld_roots; 1039 ShenandoahConcurrentNMethodIterator _nmethod_itr; 1040 1041 public: 1042 ShenandoahConcurrentRootsEvacUpdateTask(ShenandoahPhaseTimings::Phase phase) : 1043 WorkerTask("Shenandoah Evacuate/Update Concurrent Strong Roots"), 1044 _phase(phase), 1045 _vm_roots(phase), 1046 _cld_roots(phase, ShenandoahHeap::heap()->workers()->active_workers(), false /*heap iteration*/), 1047 _nmethod_itr(ShenandoahCodeRoots::table()) { 1048 if (!ShenandoahHeap::heap()->unload_classes()) { 1049 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1050 _nmethod_itr.nmethods_do_begin(); 1051 } 1052 } 1053 1054 ~ShenandoahConcurrentRootsEvacUpdateTask() { 1055 if (!ShenandoahHeap::heap()->unload_classes()) { 1056 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1057 _nmethod_itr.nmethods_do_end(); 1058 } 1059 } 1060 1061 void work(uint worker_id) { 1062 ShenandoahConcurrentWorkerSession worker_session(worker_id); 1063 { 1064 ShenandoahEvacOOMScope oom; 1065 { 1066 // vm_roots and weak_roots are OopStorage backed roots, concurrent iteration 1067 // may race against OopStorage::release() calls. 1068 ShenandoahContextEvacuateUpdateRootsClosure cl; 1069 _vm_roots.oops_do<ShenandoahContextEvacuateUpdateRootsClosure>(&cl, worker_id); 1070 } 1071 1072 { 1073 ShenandoahEvacuateUpdateMetadataClosure cl; 1074 CLDToOopClosure clds(&cl, ClassLoaderData::_claim_strong); 1075 _cld_roots.cld_do(&clds, worker_id); 1076 } 1077 } 1078 1079 // Cannot setup ShenandoahEvacOOMScope here, due to potential deadlock with nmethod_entry_barrier. 1080 if (!ShenandoahHeap::heap()->unload_classes()) { 1081 ShenandoahWorkerTimingsTracker timer(_phase, ShenandoahPhaseTimings::CodeCacheRoots, worker_id); 1082 ShenandoahEvacUpdateCodeCacheClosure cl; 1083 _nmethod_itr.nmethods_do(&cl); 1084 } 1085 } 1086 }; 1087 1088 void ShenandoahConcurrentGC::op_strong_roots() { 1089 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 1090 assert(heap->is_concurrent_strong_root_in_progress(), "Checked by caller"); 1091 ShenandoahConcurrentRootsEvacUpdateTask task(ShenandoahPhaseTimings::conc_strong_roots); 1092 heap->workers()->run_task(&task); 1093 heap->set_concurrent_strong_root_in_progress(false); 1094 } 1095 1096 void ShenandoahConcurrentGC::op_cleanup_early() { 1097 ShenandoahHeap::heap()->free_set()->recycle_trash(); 1098 } 1099 1100 void ShenandoahConcurrentGC::op_evacuate() { 1101 ShenandoahHeap::heap()->evacuate_collection_set(true /*concurrent*/); 1102 } 1103 1104 void ShenandoahConcurrentGC::op_init_updaterefs() { 1105 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 1106 heap->set_evacuation_in_progress(false); 1107 heap->set_concurrent_weak_root_in_progress(false); 1108 heap->prepare_update_heap_references(true /*concurrent*/); 1109 heap->set_update_refs_in_progress(true); 1110 if (ShenandoahVerify) { 1111 heap->verifier()->verify_before_updaterefs(); 1112 } 1113 if (ShenandoahPacing) { 1114 heap->pacer()->setup_for_updaterefs(); 1115 } 1116 } 1117 1118 void ShenandoahConcurrentGC::op_updaterefs() { 1119 ShenandoahHeap::heap()->update_heap_references(true /*concurrent*/); 1120 } 1121 1122 class ShenandoahUpdateThreadClosure : public HandshakeClosure { 1123 private: 1124 ShenandoahUpdateRefsClosure _cl; 1125 public: 1126 ShenandoahUpdateThreadClosure(); 1127 void do_thread(Thread* thread); 1128 }; 1129 1130 ShenandoahUpdateThreadClosure::ShenandoahUpdateThreadClosure() : 1131 HandshakeClosure("Shenandoah Update Thread Roots") { 1132 } 1133 1134 void ShenandoahUpdateThreadClosure::do_thread(Thread* thread) { 1135 if (thread->is_Java_thread()) { 1136 JavaThread* jt = JavaThread::cast(thread); 1137 ResourceMark rm; 1138 jt->oops_do(&_cl, NULL); 1139 } 1140 } 1141 1142 void ShenandoahConcurrentGC::op_update_thread_roots() { 1143 ShenandoahUpdateThreadClosure cl; 1144 Handshake::execute(&cl); 1145 } 1146 1147 void ShenandoahConcurrentGC::op_final_updaterefs() { 1148 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 1149 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint"); 1150 assert(!heap->_update_refs_iterator.has_next(), "Should have finished update references"); 1151 1152 heap->finish_concurrent_roots(); 1153 1154 // Clear cancelled GC, if set. On cancellation path, the block before would handle 1155 // everything. 1156 if (heap->cancelled_gc()) { 1157 heap->clear_cancelled_gc(true /* clear oom handler */); 1158 } 1159 1160 // Has to be done before cset is clear 1161 if (ShenandoahVerify) { 1162 heap->verifier()->verify_roots_in_to_space(); 1163 } 1164 1165 if (heap->mode()->is_generational() && heap->is_concurrent_old_mark_in_progress()) { 1166 // When the SATB barrier is left on to support concurrent old gen mark, it may pick up writes to 1167 // objects in the collection set. After those objects are evacuated, the pointers in the 1168 // SATB are no longer safe. Once we have finished update references, we are guaranteed that 1169 // no more writes to the collection set are possible. 1170 // 1171 // This will transfer any old pointers in _active_ regions from the SATB to the old gen 1172 // mark queues. All other pointers will be discarded. This would also discard any pointers 1173 // in old regions that were included in a mixed evacuation. We aren't using the SATB filter 1174 // methods here because we cannot control when they execute. If the SATB filter runs _after_ 1175 // a region has been recycled, we will not be able to detect the bad pointer. 1176 // 1177 // We are not concerned about skipping this step in abbreviated cycles because regions 1178 // with no live objects cannot have been written to and so cannot have entries in the SATB 1179 // buffers. 1180 heap->transfer_old_pointers_from_satb(); 1181 } 1182 1183 heap->update_heap_region_states(true /*concurrent*/); 1184 1185 heap->set_update_refs_in_progress(false); 1186 heap->set_has_forwarded_objects(false); 1187 1188 // Aging_cycle is only relevant during evacuation cycle for individual objects and during final mark for 1189 // entire regions. Both of these relevant operations occur before final update refs. 1190 heap->set_aging_cycle(false); 1191 1192 if (ShenandoahVerify) { 1193 heap->verifier()->verify_after_updaterefs(); 1194 } 1195 1196 if (VerifyAfterGC) { 1197 Universe::verify(); 1198 } 1199 1200 heap->rebuild_free_set(true /*concurrent*/); 1201 heap->adjust_generation_sizes(); 1202 } 1203 1204 void ShenandoahConcurrentGC::op_final_roots() { 1205 ShenandoahHeap::heap()->set_concurrent_weak_root_in_progress(false); 1206 } 1207 1208 void ShenandoahConcurrentGC::op_cleanup_complete() { 1209 ShenandoahHeap::heap()->free_set()->recycle_trash(); 1210 } 1211 1212 void ShenandoahConcurrentGC::op_global_coalesce_and_fill() { 1213 ShenandoahHeap::heap()->coalesce_and_fill_old_regions(); 1214 } 1215 1216 bool ShenandoahConcurrentGC::check_cancellation_and_abort(ShenandoahDegenPoint point) { 1217 if (ShenandoahHeap::heap()->cancelled_gc()) { 1218 _degen_point = point; 1219 return true; 1220 } 1221 return false; 1222 } 1223 1224 void ShenandoahConcurrentGC::init_mark_event_message(char* buf, size_t len) const { 1225 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 1226 assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here"); 1227 if (heap->unload_classes()) { 1228 jio_snprintf(buf, len, "Pause Init Mark (%s) (unload classes)", _generation->name()); 1229 } else { 1230 jio_snprintf(buf, len, "Pause Init Mark (%s)", _generation->name()); 1231 } 1232 } 1233 1234 void ShenandoahConcurrentGC::final_mark_event_message(char* buf, size_t len) const { 1235 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 1236 assert(!heap->has_forwarded_objects() || heap->is_concurrent_old_mark_in_progress(), 1237 "Should not have forwarded objects during final mark (unless old gen concurrent mark is running)"); 1238 if (heap->unload_classes()) { 1239 jio_snprintf(buf, len, "Pause Final Mark (%s) (unload classes)", _generation->name()); 1240 } else { 1241 jio_snprintf(buf, len, "Pause Final Mark (%s)", _generation->name()); 1242 } 1243 } 1244 1245 void ShenandoahConcurrentGC::conc_mark_event_message(char* buf, size_t len) const { 1246 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 1247 assert(!heap->has_forwarded_objects() || heap->is_concurrent_old_mark_in_progress(), 1248 "Should not have forwarded objects concurrent mark (unless old gen concurrent mark is running"); 1249 if (heap->unload_classes()) { 1250 jio_snprintf(buf, len, "Concurrent marking (%s) (unload classes)", _generation->name()); 1251 } else { 1252 jio_snprintf(buf, len, "Concurrent marking (%s)", _generation->name()); 1253 } 1254 }