1 /*
   2  * Copyright (c) 2024, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2021, 2022, Red Hat, Inc. All rights reserved.
   4  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
   5  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   6  *
   7  * This code is free software; you can redistribute it and/or modify it
   8  * under the terms of the GNU General Public License version 2 only, as
   9  * published by the Free Software Foundation.
  10  *
  11  * This code is distributed in the hope that it will be useful, but WITHOUT
  12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  14  * version 2 for more details (a copy is included in the LICENSE file that
  15  * accompanied this code).
  16  *
  17  * You should have received a copy of the GNU General Public License version
  18  * 2 along with this work; if not, write to the Free Software Foundation,
  19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20  *
  21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22  * or visit www.oracle.com if you need additional information or have any
  23  * questions.
  24  *
  25  */
  26 
  27 
  28 #include "gc/shared/barrierSetNMethod.hpp"
  29 #include "gc/shared/collectorCounters.hpp"
  30 #include "gc/shared/continuationGCSupport.inline.hpp"
  31 #include "gc/shenandoah/shenandoahBreakpoint.hpp"
  32 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
  33 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  34 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
  35 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  36 #include "gc/shenandoah/shenandoahGeneration.hpp"
  37 #include "gc/shenandoah/shenandoahGenerationalHeap.hpp"
  38 #include "gc/shenandoah/shenandoahLock.hpp"
  39 #include "gc/shenandoah/shenandoahMark.inline.hpp"
  40 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  41 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
  42 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  43 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
  44 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
  45 #include "gc/shenandoah/shenandoahStackWatermark.hpp"
  46 #include "gc/shenandoah/shenandoahUtils.hpp"
  47 #include "gc/shenandoah/shenandoahVerifier.hpp"
  48 #include "gc/shenandoah/shenandoahVMOperations.hpp"
  49 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  50 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
  51 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
  52 #include "memory/allocation.hpp"
  53 #include "prims/jvmtiTagMap.hpp"
  54 #include "runtime/vmThread.hpp"
  55 #include "utilities/events.hpp"
  56 
  57 // Breakpoint support
  58 class ShenandoahBreakpointGCScope : public StackObj {
  59 private:
  60   const GCCause::Cause _cause;
  61 public:
  62   ShenandoahBreakpointGCScope(GCCause::Cause cause) : _cause(cause) {
  63     if (cause == GCCause::_wb_breakpoint) {
  64       ShenandoahBreakpoint::start_gc();
  65       ShenandoahBreakpoint::at_before_gc();
  66     }
  67   }
  68 
  69   ~ShenandoahBreakpointGCScope() {
  70     if (_cause == GCCause::_wb_breakpoint) {
  71       ShenandoahBreakpoint::at_after_gc();
  72     }
  73   }
  74 };
  75 
  76 class ShenandoahBreakpointMarkScope : public StackObj {
  77 private:
  78   const GCCause::Cause _cause;
  79 public:
  80   ShenandoahBreakpointMarkScope(GCCause::Cause cause) : _cause(cause) {
  81     if (_cause == GCCause::_wb_breakpoint) {
  82       ShenandoahBreakpoint::at_after_marking_started();
  83     }
  84   }
  85 
  86   ~ShenandoahBreakpointMarkScope() {
  87     if (_cause == GCCause::_wb_breakpoint) {
  88       ShenandoahBreakpoint::at_before_marking_completed();
  89     }
  90   }
  91 };
  92 
  93 ShenandoahConcurrentGC::ShenandoahConcurrentGC(ShenandoahGeneration* generation, bool do_old_gc_bootstrap) :
  94   ShenandoahGC(generation),
  95   _mark(generation),
  96   _degen_point(ShenandoahDegenPoint::_degenerated_unset),
  97   _abbreviated(false),
  98   _do_old_gc_bootstrap(do_old_gc_bootstrap) {
  99 }
 100 
 101 ShenandoahGC::ShenandoahDegenPoint ShenandoahConcurrentGC::degen_point() const {
 102   return _degen_point;
 103 }
 104 
 105 void ShenandoahConcurrentGC::entry_concurrent_update_refs_prepare(ShenandoahHeap* const heap) {
 106   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 107   const char* msg = conc_init_update_refs_event_message();
 108   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_update_refs_prepare);
 109   EventMark em("%s", msg);
 110 
 111   // Evacuation is complete, retire gc labs and change gc state
 112   heap->concurrent_prepare_for_update_refs();
 113 }
 114 
 115 bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) {
 116   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 117   _generation->ref_processor()->set_soft_reference_policy(
 118       GCCause::should_clear_all_soft_refs(cause));
 119 
 120   ShenandoahBreakpointGCScope breakpoint_gc_scope(cause);
 121 
 122   // Reset for upcoming marking
 123   entry_reset();
 124 
 125   // Start initial mark under STW
 126   vmop_entry_init_mark();
 127 
 128   {
 129     ShenandoahBreakpointMarkScope breakpoint_mark_scope(cause);
 130 
 131     // Reset task queue stats here, rather than in mark_concurrent_roots,
 132     // because remembered set scan will `push` oops into the queues and
 133     // resetting after this happens will lose those counts.
 134     TASKQUEUE_STATS_ONLY(_mark.task_queues()->reset_taskqueue_stats());
 135 
 136     // Concurrent remembered set scanning
 137     entry_scan_remembered_set();
 138 
 139     // Concurrent mark roots
 140     entry_mark_roots();
 141     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_roots)) {
 142       return false;
 143     }
 144 
 145     // Continue concurrent mark
 146     entry_mark();
 147     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_mark)) {
 148       return false;
 149     }
 150   }
 151 
 152   // Complete marking under STW, and start evacuation
 153   vmop_entry_final_mark();
 154 
 155   // If the GC was cancelled before final mark, nothing happens on the safepoint. We are still
 156   // in the marking phase and must resume the degenerated cycle from there. If the GC was cancelled
 157   // after final mark, then we've entered the evacuation phase and must resume the degenerated cycle
 158   // from that phase.
 159   if (_generation->is_concurrent_mark_in_progress()) {
 160     bool cancelled = check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_mark);
 161     assert(cancelled, "GC must have been cancelled between concurrent and final mark");
 162     return false;
 163   }
 164 
 165   assert(heap->is_concurrent_weak_root_in_progress(), "Must be doing weak roots now");
 166 
 167   // Concurrent stack processing
 168   if (heap->is_evacuation_in_progress()) {
 169     entry_thread_roots();
 170   }
 171 
 172   // Process weak roots that might still point to regions that would be broken by cleanup.
 173   // We cannot recycle regions because weak roots need to know what is marked in trashed regions.
 174   entry_weak_refs();
 175   entry_weak_roots();
 176 
 177   // Perform concurrent class unloading before any regions get recycled. Class unloading may
 178   // need to inspect unmarked objects in trashed regions.
 179   if (heap->unload_classes()) {
 180     entry_class_unloading();
 181   }
 182 
 183   // Final mark might have reclaimed some immediate garbage, kick cleanup to reclaim
 184   // the space. This would be the last action if there is nothing to evacuate.  Note that
 185   // we will not age young-gen objects in the case that we skip evacuation.
 186   entry_cleanup_early();
 187 
 188   heap->free_set()->log_status_under_lock();
 189 
 190   // Processing strong roots
 191   // This may be skipped if there is nothing to update/evacuate.
 192   // If so, strong_root_in_progress would be unset.
 193   if (heap->is_concurrent_strong_root_in_progress()) {
 194     entry_strong_roots();
 195   }
 196 
 197   // Continue the cycle with evacuation and optional update-refs.
 198   // This may be skipped if there is nothing to evacuate.
 199   // If so, evac_in_progress would be unset by collection set preparation code.
 200   if (heap->is_evacuation_in_progress()) {
 201     // Concurrently evacuate
 202     entry_evacuate();
 203     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_evac)) {
 204       return false;
 205     }
 206 
 207     // Perform update-refs phase.
 208     entry_concurrent_update_refs_prepare(heap);
 209     if (ShenandoahVerify) {
 210       vmop_entry_init_update_refs();
 211     }
 212 
 213     entry_update_refs();
 214     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_update_refs)) {
 215       return false;
 216     }
 217 
 218     // Concurrent update thread roots
 219     entry_update_thread_roots();
 220     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_update_refs)) {
 221       return false;
 222     }
 223 
 224     vmop_entry_final_update_refs();
 225 
 226     // Update references freed up collection set, kick the cleanup to reclaim the space.
 227     entry_cleanup_complete();
 228   } else {
 229     _abbreviated = true;
 230     if (!entry_final_roots()) {
 231       assert(_degen_point != _degenerated_unset, "Need to know where to start degenerated cycle");
 232       return false;
 233     }
 234 
 235     if (VerifyAfterGC) {
 236       vmop_entry_verify_final_roots();
 237     }
 238   }
 239 
 240   // We defer generation resizing actions until after cset regions have been recycled.  We do this even following an
 241   // abbreviated cycle.
 242   if (heap->mode()->is_generational()) {
 243     ShenandoahGenerationalHeap::heap()->complete_concurrent_cycle();
 244   }
 245 
 246   // Instead of always resetting immediately before the start of a new GC, we can often reset at the end of the
 247   // previous GC. This allows us to start the next GC cycle more quickly after a trigger condition is detected,
 248   // reducing the likelihood that GC will degenerate.
 249   entry_reset_after_collect();
 250 
 251   return true;
 252 }
 253 
 254 bool ShenandoahConcurrentGC::complete_abbreviated_cycle() {
 255   shenandoah_assert_generational();
 256 
 257   ShenandoahGenerationalHeap* const heap = ShenandoahGenerationalHeap::heap();
 258 
 259   // We chose not to evacuate because we found sufficient immediate garbage.
 260   // However, there may still be regions to promote in place, so do that now.
 261   if (heap->old_generation()->has_in_place_promotions()) {
 262     entry_promote_in_place();
 263 
 264     // If the promote-in-place operation was cancelled, we can have the degenerated
 265     // cycle complete the operation. It will see that no evacuations are in progress,
 266     // and that there are regions wanting promotion. The risk with not handling the
 267     // cancellation would be failing to restore top for these regions and leaving
 268     // them unable to serve allocations for the old generation.This will leave the weak
 269     // roots flag set (the degenerated cycle will unset it).
 270     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_evac)) {
 271       return false;
 272     }
 273   }
 274 
 275   // At this point, the cycle is effectively complete. If the cycle has been cancelled here,
 276   // the control thread will detect it on its next iteration and run a degenerated young cycle.
 277   if (!_generation->is_old()) {
 278     heap->update_region_ages(_generation->complete_marking_context());
 279   }
 280 
 281   return true;
 282 }
 283 
 284 void ShenandoahConcurrentGC::vmop_entry_init_mark() {
 285   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 286   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 287   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_mark_gross);
 288 
 289   heap->try_inject_alloc_failure();
 290   VM_ShenandoahInitMark op(this);
 291   VMThread::execute(&op); // jump to entry_init_mark() under safepoint
 292 }
 293 
 294 void ShenandoahConcurrentGC::vmop_entry_final_mark() {
 295   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 296   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 297   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_mark_gross);
 298 
 299   heap->try_inject_alloc_failure();
 300   VM_ShenandoahFinalMarkStartEvac op(this);
 301   VMThread::execute(&op); // jump to entry_final_mark under safepoint
 302 }
 303 
 304 void ShenandoahConcurrentGC::vmop_entry_init_update_refs() {
 305   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 306   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 307   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_update_refs_gross);
 308 
 309   heap->try_inject_alloc_failure();
 310   VM_ShenandoahInitUpdateRefs op(this);
 311   VMThread::execute(&op);
 312 }
 313 
 314 void ShenandoahConcurrentGC::vmop_entry_final_update_refs() {
 315   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 316   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 317   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_update_refs_gross);
 318 
 319   heap->try_inject_alloc_failure();
 320   VM_ShenandoahFinalUpdateRefs op(this);
 321   VMThread::execute(&op);
 322 }
 323 
 324 void ShenandoahConcurrentGC::vmop_entry_verify_final_roots() {
 325   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 326   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 327   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_roots_gross);
 328 
 329   // This phase does not use workers, no need for setup
 330   heap->try_inject_alloc_failure();
 331   VM_ShenandoahFinalRoots op(this);
 332   VMThread::execute(&op);
 333 }
 334 
 335 void ShenandoahConcurrentGC::entry_init_mark() {
 336   const char* msg = init_mark_event_message();
 337   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::init_mark);
 338   EventMark em("%s", msg);
 339 
 340   ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
 341                               ShenandoahWorkerPolicy::calc_workers_for_init_marking(),
 342                               "init marking");
 343 
 344   op_init_mark();
 345 }
 346 
 347 void ShenandoahConcurrentGC::entry_final_mark() {
 348   const char* msg = final_mark_event_message();
 349   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_mark);
 350   EventMark em("%s", msg);
 351 
 352   ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
 353                               ShenandoahWorkerPolicy::calc_workers_for_final_marking(),
 354                               "final marking");
 355 
 356   op_final_mark();
 357 }
 358 
 359 void ShenandoahConcurrentGC::entry_init_update_refs() {
 360   static const char* msg = "Pause Init Update Refs";
 361   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::init_update_refs);
 362   EventMark em("%s", msg);
 363 
 364   // No workers used in this phase, no setup required
 365   op_init_update_refs();
 366 }
 367 
 368 void ShenandoahConcurrentGC::entry_final_update_refs() {
 369   static const char* msg = "Pause Final Update Refs";
 370   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_update_refs);
 371   EventMark em("%s", msg);
 372 
 373   ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
 374                               ShenandoahWorkerPolicy::calc_workers_for_final_update_ref(),
 375                               "final reference update");
 376 
 377   op_final_update_refs();
 378 }
 379 
 380 void ShenandoahConcurrentGC::entry_verify_final_roots() {
 381   const char* msg = verify_final_roots_event_message();
 382   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_roots);
 383   EventMark em("%s", msg);
 384 
 385   op_verify_final_roots();
 386 }
 387 
 388 void ShenandoahConcurrentGC::entry_reset() {
 389   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 390   heap->try_inject_alloc_failure();
 391 
 392   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 393   {
 394     const char* msg = conc_reset_event_message();
 395     ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_reset);
 396     EventMark em("%s", msg);
 397 
 398     ShenandoahWorkerScope scope(heap->workers(),
 399                                 ShenandoahWorkerPolicy::calc_workers_for_conc_reset(),
 400                                 msg);
 401     op_reset();
 402   }
 403 }
 404 
 405 void ShenandoahConcurrentGC::entry_scan_remembered_set() {
 406   if (_generation->is_young()) {
 407     ShenandoahHeap* const heap = ShenandoahHeap::heap();
 408     TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 409     const char* msg = "Concurrent remembered set scanning";
 410     ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::init_scan_rset);
 411     EventMark em("%s", msg);
 412 
 413     ShenandoahWorkerScope scope(heap->workers(),
 414                                 ShenandoahWorkerPolicy::calc_workers_for_rs_scanning(),
 415                                 msg);
 416 
 417     heap->try_inject_alloc_failure();
 418     _generation->scan_remembered_set(true /* is_concurrent */);
 419   }
 420 }
 421 
 422 void ShenandoahConcurrentGC::entry_mark_roots() {
 423   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 424   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 425   const char* msg = "Concurrent marking roots";
 426   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_mark_roots);
 427   EventMark em("%s", msg);
 428 
 429   ShenandoahWorkerScope scope(heap->workers(),
 430                               ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
 431                               "concurrent marking roots");
 432 
 433   heap->try_inject_alloc_failure();
 434   op_mark_roots();
 435 }
 436 
 437 void ShenandoahConcurrentGC::entry_mark() {
 438   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 439   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 440   const char* msg = conc_mark_event_message();
 441   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_mark);
 442   EventMark em("%s", msg);
 443 
 444   ShenandoahWorkerScope scope(heap->workers(),
 445                               ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
 446                               "concurrent marking");
 447 
 448   heap->try_inject_alloc_failure();
 449   op_mark();
 450 }
 451 
 452 void ShenandoahConcurrentGC::entry_thread_roots() {
 453   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 454   static const char* msg = "Concurrent thread roots";
 455   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_thread_roots);
 456   EventMark em("%s", msg);
 457 
 458   ShenandoahWorkerScope scope(heap->workers(),
 459                               ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
 460                               msg);
 461 
 462   heap->try_inject_alloc_failure();
 463   op_thread_roots();
 464 }
 465 
 466 void ShenandoahConcurrentGC::entry_weak_refs() {
 467   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 468   const char* msg = conc_weak_refs_event_message();
 469   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_weak_refs);
 470   EventMark em("%s", msg);
 471 
 472   ShenandoahWorkerScope scope(heap->workers(),
 473                               ShenandoahWorkerPolicy::calc_workers_for_conc_refs_processing(),
 474                               "concurrent weak references");
 475 
 476   heap->try_inject_alloc_failure();
 477   op_weak_refs();
 478 }
 479 
 480 void ShenandoahConcurrentGC::entry_weak_roots() {
 481   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 482   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 483   const char* msg = conc_weak_roots_event_message();
 484   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_weak_roots);
 485   EventMark em("%s", msg);
 486 
 487   ShenandoahWorkerScope scope(heap->workers(),
 488                               ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
 489                               "concurrent weak root");
 490 
 491   heap->try_inject_alloc_failure();
 492   op_weak_roots();
 493 }
 494 
 495 void ShenandoahConcurrentGC::entry_class_unloading() {
 496   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 497   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 498   static const char* msg = "Concurrent class unloading";
 499   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_class_unload);
 500   EventMark em("%s", msg);
 501 
 502   ShenandoahWorkerScope scope(heap->workers(),
 503                               ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
 504                               "concurrent class unloading");
 505 
 506   heap->try_inject_alloc_failure();
 507   op_class_unloading();
 508 }
 509 
 510 void ShenandoahConcurrentGC::entry_strong_roots() {
 511   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 512   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 513   static const char* msg = "Concurrent strong roots";
 514   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_strong_roots);
 515   EventMark em("%s", msg);
 516 
 517   ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_strong_roots);
 518 
 519   ShenandoahWorkerScope scope(heap->workers(),
 520                               ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
 521                               "concurrent strong root");
 522 
 523   heap->try_inject_alloc_failure();
 524   op_strong_roots();
 525 }
 526 
 527 void ShenandoahConcurrentGC::entry_cleanup_early() {
 528   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 529   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 530   const char* msg = conc_cleanup_event_message();
 531   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_cleanup_early, true /* log_heap_usage */);
 532   EventMark em("%s", msg);
 533 
 534   // This phase does not use workers, no need for setup
 535   heap->try_inject_alloc_failure();
 536   op_cleanup_early();
 537   if (!heap->is_evacuation_in_progress()) {
 538     // This is an abbreviated cycle.  Rebuild the freeset in order to establish reserves for the next GC cycle.  Doing
 539     // the rebuild ASAP also expedites availability of immediate trash, reducing the likelihood that we will degenerate
 540     // during promote-in-place processing.
 541     heap->rebuild_free_set(true /*concurrent*/);
 542   }
 543 }
 544 
 545 void ShenandoahConcurrentGC::entry_evacuate() {
 546   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 547   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 548 
 549   static const char* msg = "Concurrent evacuation";
 550   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_evac);
 551   EventMark em("%s", msg);
 552 
 553   ShenandoahWorkerScope scope(heap->workers(),
 554                               ShenandoahWorkerPolicy::calc_workers_for_conc_evac(),
 555                               "concurrent evacuation");
 556 
 557   heap->try_inject_alloc_failure();
 558   op_evacuate();
 559 }
 560 
 561 void ShenandoahConcurrentGC::entry_promote_in_place() const {
 562   shenandoah_assert_generational();
 563 
 564   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::promote_in_place);
 565   ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::promote_in_place);
 566   EventMark em("%s", "Promote in place");
 567 
 568   ShenandoahGenerationalHeap::heap()->promote_regions_in_place(_generation, true);
 569 }
 570 
 571 void ShenandoahConcurrentGC::entry_update_thread_roots() {
 572   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 573   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 574 
 575   static const char* msg = "Concurrent update thread roots";
 576   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_update_thread_roots);
 577   EventMark em("%s", msg);
 578 
 579   // No workers used in this phase, no setup required
 580   heap->try_inject_alloc_failure();
 581   op_update_thread_roots();
 582 }
 583 
 584 void ShenandoahConcurrentGC::entry_update_refs() {
 585   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 586   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 587   static const char* msg = "Concurrent update references";
 588   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_update_refs);
 589   EventMark em("%s", msg);
 590 
 591   ShenandoahWorkerScope scope(heap->workers(),
 592                               ShenandoahWorkerPolicy::calc_workers_for_conc_update_ref(),
 593                               "concurrent reference update");
 594 
 595   heap->try_inject_alloc_failure();
 596   op_update_refs();
 597 }
 598 
 599 void ShenandoahConcurrentGC::entry_cleanup_complete() {
 600   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 601   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 602   const char* msg = conc_cleanup_event_message();
 603   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_cleanup_complete, true /* log_heap_usage */);
 604   EventMark em("%s", msg);
 605 
 606   // This phase does not use workers, no need for setup
 607   heap->try_inject_alloc_failure();
 608   op_cleanup_complete();
 609 }
 610 
 611 void ShenandoahConcurrentGC::entry_reset_after_collect() {
 612   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 613   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 614   const char* msg = conc_reset_after_collect_event_message();
 615   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_reset_after_collect);
 616   EventMark em("%s", msg);
 617 
 618   op_reset_after_collect();
 619 }
 620 
 621 void ShenandoahConcurrentGC::op_reset() {
 622   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 623 
 624   // If it is old GC bootstrap cycle, always clear bitmap for global gen
 625   // to ensure bitmap for old gen is clear for old GC cycle after this.
 626   if (_do_old_gc_bootstrap) {
 627     assert(!heap->is_prepare_for_old_mark_in_progress(), "Cannot reset old without making it parsable");
 628     heap->global_generation()->prepare_gc();
 629   } else {
 630     _generation->prepare_gc();
 631   }
 632 
 633   if (heap->mode()->is_generational()) {
 634     heap->old_generation()->card_scan()->mark_read_table_as_clean();
 635   }
 636 }
 637 
 638 class ShenandoahInitMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
 639 private:
 640   ShenandoahMarkingContext* const _ctx;
 641 public:
 642   ShenandoahInitMarkUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
 643 
 644   void heap_region_do(ShenandoahHeapRegion* r) {
 645     assert(!r->has_live(), "Region %zu should have no live data", r->index());
 646     if (r->is_active()) {
 647       // Check if region needs updating its TAMS. We have updated it already during concurrent
 648       // reset, so it is very likely we don't need to do another write here.  Since most regions
 649       // are not "active", this path is relatively rare.
 650       if (_ctx->top_at_mark_start(r) != r->top()) {
 651         _ctx->capture_top_at_mark_start(r);
 652       }
 653     } else {
 654       assert(_ctx->top_at_mark_start(r) == r->top(),
 655              "Region %zu should already have correct TAMS", r->index());
 656     }
 657   }
 658 
 659   bool is_thread_safe() { return true; }
 660 };
 661 
 662 void ShenandoahConcurrentGC::start_mark() {
 663   _mark.start_mark();
 664 }
 665 
 666 void ShenandoahConcurrentGC::op_init_mark() {
 667   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 668   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
 669   assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
 670 
 671   assert(_generation->is_bitmap_clear(), "need clear marking bitmap");
 672   assert(!_generation->is_mark_complete(), "should not be complete");
 673   assert(!heap->has_forwarded_objects(), "No forwarded objects on this path");
 674 
 675   if (heap->mode()->is_generational()) {
 676     if (_generation->is_global()) {
 677       heap->old_generation()->cancel_gc();
 678     }
 679 
 680     {
 681       // After we swap card table below, the write-table is all clean, and the read table holds
 682       // cards dirty prior to the start of GC. Young and bootstrap collection will update
 683       // the write card table as a side effect of remembered set scanning. Global collection will
 684       // update the card table as a side effect of global marking of old objects.
 685       ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_swap_rset);
 686       _generation->swap_card_tables();
 687     }
 688   }
 689 
 690   if (ShenandoahVerify) {
 691     ShenandoahTimingsTracker v(ShenandoahPhaseTimings::init_mark_verify);
 692     heap->verifier()->verify_before_concmark(_generation);
 693   }
 694 
 695   if (VerifyBeforeGC) {
 696     Universe::verify();
 697   }
 698 
 699   _generation->set_concurrent_mark_in_progress(true);
 700 
 701   start_mark();
 702 
 703   if (_do_old_gc_bootstrap) {
 704     shenandoah_assert_generational();
 705     // Update region state for both young and old regions
 706     ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states);
 707     ShenandoahInitMarkUpdateRegionStateClosure cl;
 708     heap->parallel_heap_region_iterate(&cl);
 709     heap->old_generation()->ref_processor()->reset_thread_locals();
 710   } else {
 711     // Update region state for only young regions
 712     ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states);
 713     ShenandoahInitMarkUpdateRegionStateClosure cl;
 714     _generation->parallel_heap_region_iterate(&cl);
 715   }
 716 
 717   // Weak reference processing
 718   ShenandoahReferenceProcessor* rp = _generation->ref_processor();
 719   rp->reset_thread_locals();
 720 
 721   // Make above changes visible to worker threads
 722   OrderAccess::fence();
 723 
 724   // Arm nmethods for concurrent mark
 725   ShenandoahCodeRoots::arm_nmethods_for_mark();
 726 
 727   ShenandoahStackWatermark::change_epoch_id();
 728 
 729   {
 730     ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_propagate_gc_state);
 731     heap->propagate_gc_state_to_all_threads();
 732   }
 733 }
 734 
 735 void ShenandoahConcurrentGC::op_mark_roots() {
 736   _mark.mark_concurrent_roots();
 737 }
 738 
 739 void ShenandoahConcurrentGC::op_mark() {
 740   _mark.concurrent_mark();
 741 }
 742 
 743 void ShenandoahConcurrentGC::op_final_mark() {
 744   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 745   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
 746   assert(!heap->has_forwarded_objects(), "No forwarded objects on this path");
 747 
 748   if (ShenandoahVerify) {
 749     heap->verifier()->verify_roots_no_forwarded(_generation);
 750   }
 751 
 752   if (!heap->cancelled_gc()) {
 753     _mark.finish_mark();
 754     assert(!heap->cancelled_gc(), "STW mark cannot OOM");
 755 
 756     // Notify JVMTI that the tagmap table will need cleaning.
 757     JvmtiTagMap::set_needs_cleaning();
 758 
 759     // The collection set is chosen by prepare_regions_and_collection_set(). Additionally, certain parameters have been
 760     // established to govern the evacuation efforts that are about to begin.  Refer to comments on reserve members in
 761     // ShenandoahGeneration and ShenandoahOldGeneration for more detail.
 762     _generation->prepare_regions_and_collection_set(true /*concurrent*/);
 763 
 764     // Has to be done after cset selection
 765     heap->prepare_concurrent_roots();
 766 
 767     if (!heap->collection_set()->is_empty()) {
 768       LogTarget(Debug, gc, cset) lt;
 769       if (lt.is_enabled()) {
 770         ResourceMark rm;
 771         LogStream ls(lt);
 772         heap->collection_set()->print_on(&ls);
 773       }
 774 
 775       if (ShenandoahVerify) {
 776         ShenandoahTimingsTracker v(ShenandoahPhaseTimings::final_mark_verify);
 777         heap->verifier()->verify_before_evacuation(_generation);
 778       }
 779 
 780       heap->set_evacuation_in_progress(true);
 781       // From here on, we need to update references.
 782       heap->set_has_forwarded_objects(true);
 783 
 784       // Arm nmethods/stack for concurrent processing
 785       ShenandoahCodeRoots::arm_nmethods_for_evac();
 786       ShenandoahStackWatermark::change_epoch_id();
 787 
 788     } else {
 789       if (ShenandoahVerify) {
 790         ShenandoahTimingsTracker v(ShenandoahPhaseTimings::final_mark_verify);
 791         if (has_in_place_promotions(heap)) {
 792           heap->verifier()->verify_after_concmark_with_promotions(_generation);
 793         } else {
 794           heap->verifier()->verify_after_concmark(_generation);
 795         }
 796       }
 797     }
 798   }
 799 
 800   {
 801     ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_mark_propagate_gc_state);
 802     heap->propagate_gc_state_to_all_threads();
 803   }
 804 }
 805 
 806 bool ShenandoahConcurrentGC::has_in_place_promotions(ShenandoahHeap* heap) {
 807   return heap->mode()->is_generational() && heap->old_generation()->has_in_place_promotions();
 808 }
 809 
 810 class ShenandoahConcurrentEvacThreadClosure : public ThreadClosure {
 811 private:
 812   OopClosure* const _oops;
 813 public:
 814   explicit ShenandoahConcurrentEvacThreadClosure(OopClosure* oops) : _oops(oops) {}
 815 
 816   void do_thread(Thread* thread) override {
 817     JavaThread* const jt = JavaThread::cast(thread);
 818     StackWatermarkSet::finish_processing(jt, _oops, StackWatermarkKind::gc);
 819   }
 820 };
 821 
 822 class ShenandoahConcurrentEvacUpdateThreadTask : public WorkerTask {
 823 private:
 824   ShenandoahJavaThreadsIterator _java_threads;
 825 
 826 public:
 827   explicit ShenandoahConcurrentEvacUpdateThreadTask(uint n_workers) :
 828     WorkerTask("Shenandoah Evacuate/Update Concurrent Thread Roots"),
 829     _java_threads(ShenandoahPhaseTimings::conc_thread_roots, n_workers) {
 830   }
 831 
 832   void work(uint worker_id) override {
 833     // ShenandoahEvacOOMScope has to be setup by ShenandoahContextEvacuateUpdateRootsClosure.
 834     // Otherwise, may deadlock with watermark lock
 835     ShenandoahContextEvacuateUpdateRootsClosure oops_cl;
 836     ShenandoahConcurrentEvacThreadClosure thr_cl(&oops_cl);
 837     _java_threads.threads_do(&thr_cl, worker_id);
 838   }
 839 };
 840 
 841 void ShenandoahConcurrentGC::op_thread_roots() {
 842   const ShenandoahHeap* const heap = ShenandoahHeap::heap();
 843   assert(heap->is_evacuation_in_progress(), "Checked by caller");
 844   ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_thread_roots);
 845   ShenandoahConcurrentEvacUpdateThreadTask task(heap->workers()->active_workers());
 846   heap->workers()->run_task(&task);
 847 }
 848 
 849 void ShenandoahConcurrentGC::op_weak_refs() {
 850   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 851   assert(heap->is_concurrent_weak_root_in_progress(), "Only during this phase");
 852   // Concurrent weak refs processing
 853   ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_refs);
 854   if (heap->gc_cause() == GCCause::_wb_breakpoint) {
 855     ShenandoahBreakpoint::at_after_reference_processing_started();
 856   }
 857   _generation->ref_processor()->process_references(ShenandoahPhaseTimings::conc_weak_refs, heap->workers(), true /* concurrent */);
 858 }
 859 
 860 class ShenandoahEvacUpdateCleanupOopStorageRootsClosure : public BasicOopIterateClosure {
 861 private:
 862   ShenandoahHeap* const _heap;
 863   ShenandoahGeneration* const _generation;
 864   ShenandoahMarkingContext* const _mark_context;
 865   bool  _evac_in_progress;
 866   Thread* const _thread;
 867 
 868 public:
 869   explicit ShenandoahEvacUpdateCleanupOopStorageRootsClosure(ShenandoahGeneration* generation);
 870   void do_oop(oop* p);
 871   void do_oop(narrowOop* p);
 872 };
 873 
 874 ShenandoahEvacUpdateCleanupOopStorageRootsClosure::ShenandoahEvacUpdateCleanupOopStorageRootsClosure(ShenandoahGeneration* generation) :
 875   _heap(ShenandoahHeap::heap()),
 876   _generation(generation),
 877   _mark_context(ShenandoahHeap::heap()->marking_context()),
 878   _evac_in_progress(ShenandoahHeap::heap()->is_evacuation_in_progress()),
 879   _thread(Thread::current()) {
 880 }
 881 
 882 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(oop* p) {
 883   const oop obj = RawAccess<>::oop_load(p);
 884   if (!CompressedOops::is_null(obj)) {
 885     if (!_mark_context->is_marked(obj)) {
 886       if (_generation->contains(obj)) {
 887         // Note: The obj is dead here. Do not touch it, just clear.
 888         ShenandoahHeap::atomic_clear_oop(p, obj);
 889       }
 890     } else if (_evac_in_progress && _heap->in_collection_set(obj)) {
 891       oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
 892       if (resolved == obj) {
 893         resolved = _heap->evacuate_object(obj, _thread);
 894       }
 895       shenandoah_assert_not_in_cset_except(p, resolved, _heap->cancelled_gc());
 896       ShenandoahHeap::atomic_update_oop(resolved, p, obj);
 897     }
 898   }
 899 }
 900 
 901 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(narrowOop* p) {
 902   ShouldNotReachHere();
 903 }
 904 
 905 class ShenandoahIsCLDAliveClosure : public CLDClosure {
 906 public:
 907   void do_cld(ClassLoaderData* cld) {
 908     cld->is_alive();
 909   }
 910 };
 911 
 912 class ShenandoahIsNMethodAliveClosure: public NMethodClosure {
 913 public:
 914   void do_nmethod(nmethod* n) {
 915     n->is_unloading();
 916   }
 917 };
 918 
 919 // This task not only evacuates/updates marked weak roots, but also "null"
 920 // dead weak roots.
 921 class ShenandoahConcurrentWeakRootsEvacUpdateTask : public WorkerTask {
 922 private:
 923   ShenandoahVMWeakRoots<true /*concurrent*/> _vm_roots;
 924 
 925   // Roots related to concurrent class unloading
 926   ShenandoahClassLoaderDataRoots<true /* concurrent */>
 927                                              _cld_roots;
 928   ShenandoahConcurrentNMethodIterator        _nmethod_itr;
 929   ShenandoahGeneration*                      _generation;
 930   ShenandoahPhaseTimings::Phase              _phase;
 931 
 932 public:
 933   ShenandoahConcurrentWeakRootsEvacUpdateTask(ShenandoahGeneration* generation, ShenandoahPhaseTimings::Phase phase) :
 934     WorkerTask("Shenandoah Evacuate/Update Concurrent Weak Roots"),
 935     _vm_roots(phase),
 936     _cld_roots(phase, ShenandoahHeap::heap()->workers()->active_workers(), false /*heap iteration*/),
 937     _nmethod_itr(ShenandoahCodeRoots::table()),
 938     _generation(generation),
 939     _phase(phase) {}
 940 
 941   ~ShenandoahConcurrentWeakRootsEvacUpdateTask() {
 942     // Notify runtime data structures of potentially dead oops
 943     _vm_roots.report_num_dead();
 944   }
 945 
 946   void work(uint worker_id) override {
 947     ShenandoahConcurrentWorkerSession worker_session(worker_id);
 948     ShenandoahSuspendibleThreadSetJoiner sts_join;
 949     {
 950       ShenandoahEvacOOMScope oom;
 951       // jni_roots and weak_roots are OopStorage backed roots, concurrent iteration
 952       // may race against OopStorage::release() calls.
 953       ShenandoahEvacUpdateCleanupOopStorageRootsClosure cl(_generation);
 954       _vm_roots.oops_do(&cl, worker_id);
 955     }
 956 
 957     // If we are going to perform concurrent class unloading later on, we need to
 958     // clean up the weak oops in CLD and determine nmethod's unloading state, so that we
 959     // can clean up immediate garbage sooner.
 960     if (ShenandoahHeap::heap()->unload_classes()) {
 961       // Applies ShenandoahIsCLDAlive closure to CLDs, native barrier will either null the
 962       // CLD's holder or evacuate it.
 963       {
 964         ShenandoahIsCLDAliveClosure is_cld_alive;
 965         _cld_roots.cld_do(&is_cld_alive, worker_id);
 966       }
 967 
 968       // Applies ShenandoahIsNMethodAliveClosure to registered nmethods.
 969       // The closure calls nmethod->is_unloading(). The is_unloading
 970       // state is cached, therefore, during concurrent class unloading phase,
 971       // we will not touch the metadata of unloading nmethods
 972       {
 973         ShenandoahWorkerTimingsTracker timer(_phase, ShenandoahPhaseTimings::CodeCacheRoots, worker_id);
 974         ShenandoahIsNMethodAliveClosure is_nmethod_alive;
 975         _nmethod_itr.nmethods_do(&is_nmethod_alive);
 976       }
 977     }
 978   }
 979 };
 980 
 981 void ShenandoahConcurrentGC::op_weak_roots() {
 982   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 983   assert(heap->is_concurrent_weak_root_in_progress(), "Only during this phase");
 984   {
 985     // Concurrent weak root processing
 986     ShenandoahTimingsTracker t(ShenandoahPhaseTimings::conc_weak_roots_work);
 987     ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_roots_work);
 988     ShenandoahConcurrentWeakRootsEvacUpdateTask task(_generation, ShenandoahPhaseTimings::conc_weak_roots_work);
 989     heap->workers()->run_task(&task);
 990   }
 991 
 992   {
 993     // It is possible for mutators executing the load reference barrier to have
 994     // loaded an oop through a weak handle that has since been nulled out by
 995     // weak root processing. Handshaking here forces them to complete the
 996     // barrier before the GC cycle continues and does something that would
 997     // change the evaluation of the barrier (for example, resetting the TAMS
 998     // on trashed regions could make an oop appear to be marked _after_ the
 999     // region has been recycled).
1000     ShenandoahTimingsTracker t(ShenandoahPhaseTimings::conc_weak_roots_rendezvous);
1001     heap->rendezvous_threads("Shenandoah Concurrent Weak Roots");
1002   }
1003 }
1004 
1005 void ShenandoahConcurrentGC::op_class_unloading() {
1006   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1007   assert (heap->is_concurrent_weak_root_in_progress() &&
1008           heap->unload_classes(),
1009           "Checked by caller");
1010   heap->do_class_unloading();
1011 }
1012 
1013 class ShenandoahEvacUpdateCodeCacheClosure : public NMethodClosure {
1014 private:
1015   BarrierSetNMethod* const                  _bs;
1016   ShenandoahEvacuateUpdateMetadataClosure   _cl;
1017 
1018 public:
1019   ShenandoahEvacUpdateCodeCacheClosure() :
1020     _bs(BarrierSet::barrier_set()->barrier_set_nmethod()),
1021     _cl() {
1022   }
1023 
1024   void do_nmethod(nmethod* n) {
1025     ShenandoahNMethod* data = ShenandoahNMethod::gc_data(n);
1026     ShenandoahNMethodLocker locker(data->lock());
1027     // Setup EvacOOM scope below reentrant lock to avoid deadlock with
1028     // nmethod_entry_barrier
1029     ShenandoahEvacOOMScope oom;
1030     data->oops_do(&_cl, true/*fix relocation*/);
1031     _bs->disarm(n);
1032   }
1033 };
1034 
1035 class ShenandoahConcurrentRootsEvacUpdateTask : public WorkerTask {
1036 private:
1037   ShenandoahPhaseTimings::Phase                 _phase;
1038   ShenandoahVMRoots<true /*concurrent*/>        _vm_roots;
1039   ShenandoahClassLoaderDataRoots<true /*concurrent*/>
1040                                                 _cld_roots;
1041   ShenandoahConcurrentNMethodIterator           _nmethod_itr;
1042 
1043 public:
1044   ShenandoahConcurrentRootsEvacUpdateTask(ShenandoahPhaseTimings::Phase phase) :
1045     WorkerTask("Shenandoah Evacuate/Update Concurrent Strong Roots"),
1046     _phase(phase),
1047     _vm_roots(phase),
1048     _cld_roots(phase, ShenandoahHeap::heap()->workers()->active_workers(), false /*heap iteration*/),
1049     _nmethod_itr(ShenandoahCodeRoots::table()) {}
1050 
1051   void work(uint worker_id) {
1052     ShenandoahConcurrentWorkerSession worker_session(worker_id);
1053     {
1054       ShenandoahEvacOOMScope oom;
1055       {
1056         // vm_roots and weak_roots are OopStorage backed roots, concurrent iteration
1057         // may race against OopStorage::release() calls.
1058         ShenandoahContextEvacuateUpdateRootsClosure cl;
1059         _vm_roots.oops_do<ShenandoahContextEvacuateUpdateRootsClosure>(&cl, worker_id);
1060       }
1061 
1062       {
1063         ShenandoahEvacuateUpdateMetadataClosure cl;
1064         CLDToOopClosure clds(&cl, ClassLoaderData::_claim_strong);
1065         _cld_roots.cld_do(&clds, worker_id);
1066       }
1067     }
1068 
1069     // Cannot setup ShenandoahEvacOOMScope here, due to potential deadlock with nmethod_entry_barrier.
1070     if (!ShenandoahHeap::heap()->unload_classes()) {
1071       ShenandoahWorkerTimingsTracker timer(_phase, ShenandoahPhaseTimings::CodeCacheRoots, worker_id);
1072       ShenandoahEvacUpdateCodeCacheClosure cl;
1073       _nmethod_itr.nmethods_do(&cl);
1074     }
1075   }
1076 };
1077 
1078 void ShenandoahConcurrentGC::op_strong_roots() {
1079   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1080   assert(heap->is_concurrent_strong_root_in_progress(), "Checked by caller");
1081   ShenandoahConcurrentRootsEvacUpdateTask task(ShenandoahPhaseTimings::conc_strong_roots);
1082   heap->workers()->run_task(&task);
1083   heap->set_concurrent_strong_root_in_progress(false);
1084 }
1085 
1086 void ShenandoahConcurrentGC::op_cleanup_early() {
1087   ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
1088                               ShenandoahWorkerPolicy::calc_workers_for_conc_cleanup(),
1089                               "cleanup early.");
1090   ShenandoahHeap::heap()->recycle_trash();
1091 }
1092 
1093 void ShenandoahConcurrentGC::op_evacuate() {
1094   ShenandoahHeap::heap()->evacuate_collection_set(_generation, true /*concurrent*/);
1095 }
1096 
1097 void ShenandoahConcurrentGC::op_init_update_refs() {
1098   if (ShenandoahVerify) {
1099     ShenandoahHeap* const heap = ShenandoahHeap::heap();
1100     ShenandoahTimingsTracker v(ShenandoahPhaseTimings::init_update_refs_verify);
1101     heap->verifier()->verify_before_update_refs(_generation);
1102   }
1103 }
1104 
1105 void ShenandoahConcurrentGC::op_update_refs() {
1106   ShenandoahHeap::heap()->update_heap_references(_generation, true /*concurrent*/);
1107 }
1108 
1109 class ShenandoahUpdateThreadHandshakeClosure : public HandshakeClosure {
1110 private:
1111   // This closure runs when thread is stopped for handshake, which means
1112   // we can use non-concurrent closure here, as long as it only updates
1113   // locations modified by the thread itself, i.e. stack locations.
1114   ShenandoahNonConcUpdateRefsClosure _cl;
1115 public:
1116   ShenandoahUpdateThreadHandshakeClosure();
1117   void do_thread(Thread* thread) override;
1118 };
1119 
1120 ShenandoahUpdateThreadHandshakeClosure::ShenandoahUpdateThreadHandshakeClosure() :
1121   HandshakeClosure("Shenandoah Update Thread Roots") {
1122 }
1123 
1124 void ShenandoahUpdateThreadHandshakeClosure::do_thread(Thread* thread) {
1125   if (thread->is_Java_thread()) {
1126     JavaThread* jt = JavaThread::cast(thread);
1127     ResourceMark rm;
1128     jt->oops_do(&_cl, nullptr);
1129   }
1130 }
1131 
1132 class ShenandoahUpdateThreadRootsAndFlushOldSatbBuffers final : public HandshakeClosure {
1133   // When Shenandoah is marking the old generation, it is possible for the SATB barrier
1134   // to pick up overwritten pointers that point into a cset region. If these pointers
1135   // are accessed by mark threads, they will crash. Once update refs has completed, it is
1136   // no longer possible for a mutator thread to overwrite a pointer into a cset region.
1137   //
1138   // Therefore, at the end of update refs, we use this closure to update the thread roots
1139   // and 'complete' all the thread local SATB buffers. Completing these will filter out
1140   // anything that has already been marked or anything that points to a region which is
1141   // not old. We do not need to worry about ABA situations where a region may become old
1142   // after the pointer is enqueued but before it is filtered. There are only two ways a
1143   // region may become old:
1144   //  1. The region is promoted in place. This is safe because such regions will never
1145   //     be in the collection set. If this happens, the pointer will be preserved, essentially
1146   //     becoming part of the old snapshot.
1147   //  2. The region is allocated during evacuation of old. This is also not a concern because
1148   //     we haven't yet finished marking old so no mixed evacuations will happen.
1149   ShenandoahUpdateThreadHandshakeClosure _update_roots;
1150   ShenandoahFlushSATB _flush_all_satb;
1151 
1152 public:
1153   ShenandoahUpdateThreadRootsAndFlushOldSatbBuffers() :
1154     HandshakeClosure("Shenandoah Update Thread Roots and Flush SATB"),
1155     _flush_all_satb(ShenandoahBarrierSet::satb_mark_queue_set()) {
1156     assert(ShenandoahBarrierSet::satb_mark_queue_set().get_filter_out_young(),
1157            "Should be filtering pointers outside of old during old marking");
1158   }
1159 
1160   void do_thread(Thread* thread) override {
1161     _update_roots.do_thread(thread);
1162     _flush_all_satb.do_thread(thread);
1163   }
1164 };
1165 
1166 void ShenandoahConcurrentGC::op_update_thread_roots() {
1167   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1168   if (heap->is_concurrent_old_mark_in_progress()) {
1169     ShenandoahUpdateThreadRootsAndFlushOldSatbBuffers cl;
1170     Handshake::execute(&cl);
1171   } else {
1172     ShenandoahUpdateThreadHandshakeClosure cl;
1173     Handshake::execute(&cl);
1174   }
1175 }
1176 
1177 void ShenandoahConcurrentGC::op_final_update_refs() {
1178   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1179   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
1180   assert(!heap->_update_refs_iterator.has_next(), "Should have finished update references");
1181 
1182   heap->finish_concurrent_roots();
1183 
1184   // Clear cancelled GC, if set. On cancellation path, the block before would handle
1185   // everything.
1186   if (heap->cancelled_gc()) {
1187     heap->clear_cancelled_gc();
1188   }
1189 
1190   // Has to be done before cset is clear
1191   if (ShenandoahVerify) {
1192     heap->verifier()->verify_roots_in_to_space(_generation);
1193   }
1194 
1195   // If we are running in generational mode and this is an aging cycle, this will also age active
1196   // regions that haven't been used for allocation.
1197   heap->update_heap_region_states(true /*concurrent*/);
1198 
1199   heap->set_update_refs_in_progress(false);
1200   heap->set_has_forwarded_objects(false);
1201 
1202   if (heap->mode()->is_generational() && heap->is_concurrent_old_mark_in_progress()) {
1203     // Aging_cycle is only relevant during evacuation cycle for individual objects and during final mark for
1204     // entire regions.  Both of these relevant operations occur before final update refs.
1205     ShenandoahGenerationalHeap::heap()->set_aging_cycle(false);
1206   }
1207 
1208   if (ShenandoahVerify) {
1209     ShenandoahTimingsTracker v(ShenandoahPhaseTimings::final_update_refs_verify);
1210     heap->verifier()->verify_after_update_refs(_generation);
1211   }
1212 
1213   if (VerifyAfterGC) {
1214     Universe::verify();
1215   }
1216 
1217   heap->rebuild_free_set(true /*concurrent*/);
1218   _generation->heuristics()->start_idle_span();
1219 
1220   {
1221     ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_update_refs_propagate_gc_state);
1222     heap->propagate_gc_state_to_all_threads();
1223   }
1224 }
1225 
1226 bool ShenandoahConcurrentGC::entry_final_roots() {
1227   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1228   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
1229 
1230 
1231   const char* msg = conc_final_roots_event_message();
1232   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_final_roots);
1233   EventMark em("%s", msg);
1234   ShenandoahWorkerScope scope(heap->workers(),
1235                               ShenandoahWorkerPolicy::calc_workers_for_conc_evac(),
1236                               msg);
1237 
1238   if (heap->mode()->is_generational()) {
1239     if (!complete_abbreviated_cycle()) {
1240       return false;
1241     }
1242   }
1243 
1244   heap->concurrent_final_roots();
1245   return true;
1246 }
1247 
1248 void ShenandoahConcurrentGC::op_verify_final_roots() {
1249   if (VerifyAfterGC) {
1250     Universe::verify();
1251   }
1252 }
1253 
1254 void ShenandoahConcurrentGC::op_cleanup_complete() {
1255   ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
1256                               ShenandoahWorkerPolicy::calc_workers_for_conc_cleanup(),
1257                               "cleanup complete.");
1258   ShenandoahHeap::heap()->recycle_trash();
1259 }
1260 
1261 void ShenandoahConcurrentGC::op_reset_after_collect() {
1262   ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
1263                           ShenandoahWorkerPolicy::calc_workers_for_conc_reset(),
1264                           "reset after collection.");
1265 
1266   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1267   if (heap->mode()->is_generational()) {
1268     // If we are in the midst of an old gc bootstrap or an old marking, we want to leave the mark bit map of
1269     // the young generation intact. In particular, reference processing in the old generation may potentially
1270     // need the reachability of a young generation referent of a Reference object in the old generation.
1271     if (!_do_old_gc_bootstrap && !heap->is_concurrent_old_mark_in_progress()) {
1272       heap->young_generation()->reset_mark_bitmap<false>();
1273     }
1274   } else {
1275     _generation->reset_mark_bitmap<false>();
1276   }
1277 }
1278 
1279 bool ShenandoahConcurrentGC::check_cancellation_and_abort(ShenandoahDegenPoint point) {
1280   if (ShenandoahHeap::heap()->cancelled_gc()) {
1281     _degen_point = point;
1282     return true;
1283   }
1284   return false;
1285 }
1286 
1287 const char* ShenandoahConcurrentGC::init_mark_event_message() const {
1288   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1289   assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here");
1290   if (heap->unload_classes()) {
1291     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Init Mark", " (unload classes)");
1292   } else {
1293     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Init Mark", "");
1294   }
1295 }
1296 
1297 const char* ShenandoahConcurrentGC::final_mark_event_message() const {
1298   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1299   assert(!heap->has_forwarded_objects() || heap->is_concurrent_old_mark_in_progress(),
1300          "Should not have forwarded objects during final mark, unless old gen concurrent mark is running");
1301 
1302   if (heap->unload_classes()) {
1303     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Final Mark", " (unload classes)");
1304   } else {
1305     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Final Mark", "");
1306   }
1307 }
1308 
1309 const char* ShenandoahConcurrentGC::conc_mark_event_message() const {
1310   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1311   assert(!heap->has_forwarded_objects() || heap->is_concurrent_old_mark_in_progress(),
1312          "Should not have forwarded objects concurrent mark, unless old gen concurrent mark is running");
1313   if (heap->unload_classes()) {
1314     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent marking", " (unload classes)");
1315   } else {
1316     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent marking", "");
1317   }
1318 }
1319 
1320 const char* ShenandoahConcurrentGC::conc_reset_event_message() const {
1321   if (ShenandoahHeap::heap()->unload_classes()) {
1322     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent reset", " (unload classes)");
1323   } else {
1324     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent reset", "");
1325   }
1326 }
1327 
1328 const char* ShenandoahConcurrentGC::conc_reset_after_collect_event_message() const {
1329   if (ShenandoahHeap::heap()->unload_classes()) {
1330     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent reset after collect", " (unload classes)");
1331   } else {
1332     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent reset after collect", "");
1333   }
1334 }
1335 
1336 const char* ShenandoahConcurrentGC::verify_final_roots_event_message() const {
1337   if (ShenandoahHeap::heap()->unload_classes()) {
1338     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Verify Final Roots", " (unload classes)");
1339   } else {
1340     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Verify Final Roots", "");
1341   }
1342 }
1343 
1344 const char* ShenandoahConcurrentGC::conc_final_roots_event_message() const {
1345   if (ShenandoahHeap::heap()->unload_classes()) {
1346     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent Final Roots", " (unload classes)");
1347   } else {
1348     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent Final Roots", "");
1349   }
1350 }
1351 
1352 const char* ShenandoahConcurrentGC::conc_weak_refs_event_message() const {
1353   if (ShenandoahHeap::heap()->unload_classes()) {
1354     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent weak references", " (unload classes)");
1355   } else {
1356     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent weak references", "");
1357   }
1358 }
1359 
1360 const char* ShenandoahConcurrentGC::conc_weak_roots_event_message() const {
1361   if (ShenandoahHeap::heap()->unload_classes()) {
1362     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent weak roots", " (unload classes)");
1363   } else {
1364     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent weak roots", "");
1365   }
1366 }
1367 
1368 const char* ShenandoahConcurrentGC::conc_cleanup_event_message() const {
1369   if (ShenandoahHeap::heap()->unload_classes()) {
1370     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent cleanup", " (unload classes)");
1371   } else {
1372     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent cleanup", "");
1373   }
1374 }
1375 
1376 const char* ShenandoahConcurrentGC::conc_init_update_refs_event_message() const {
1377   if (ShenandoahHeap::heap()->unload_classes()) {
1378     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent Init Update Refs", " (unload classes)");
1379   } else {
1380     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent Init Update Refs", "");
1381   }
1382 }