1 /*
   2  * Copyright (c) 2024, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2021, 2022, Red Hat, Inc. All rights reserved.
   4  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
   5  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   6  *
   7  * This code is free software; you can redistribute it and/or modify it
   8  * under the terms of the GNU General Public License version 2 only, as
   9  * published by the Free Software Foundation.
  10  *
  11  * This code is distributed in the hope that it will be useful, but WITHOUT
  12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  14  * version 2 for more details (a copy is included in the LICENSE file that
  15  * accompanied this code).
  16  *
  17  * You should have received a copy of the GNU General Public License version
  18  * 2 along with this work; if not, write to the Free Software Foundation,
  19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20  *
  21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22  * or visit www.oracle.com if you need additional information or have any
  23  * questions.
  24  *
  25  */
  26 
  27 
  28 #include "gc/shared/barrierSetNMethod.hpp"
  29 #include "gc/shared/collectorCounters.hpp"
  30 #include "gc/shared/continuationGCSupport.inline.hpp"
  31 #include "gc/shenandoah/shenandoahBreakpoint.hpp"
  32 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
  33 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  34 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
  35 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  36 #include "gc/shenandoah/shenandoahGeneration.hpp"
  37 #include "gc/shenandoah/shenandoahGenerationalHeap.hpp"
  38 #include "gc/shenandoah/shenandoahLock.hpp"
  39 #include "gc/shenandoah/shenandoahMark.inline.hpp"
  40 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  41 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
  42 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  43 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
  44 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
  45 #include "gc/shenandoah/shenandoahStackWatermark.hpp"
  46 #include "gc/shenandoah/shenandoahUtils.hpp"
  47 #include "gc/shenandoah/shenandoahVerifier.hpp"
  48 #include "gc/shenandoah/shenandoahVMOperations.hpp"
  49 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  50 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
  51 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
  52 #include "memory/allocation.hpp"
  53 #include "prims/jvmtiTagMap.hpp"
  54 #include "runtime/vmThread.hpp"
  55 #include "utilities/events.hpp"
  56 
  57 // Breakpoint support
  58 class ShenandoahBreakpointGCScope : public StackObj {
  59 private:
  60   const GCCause::Cause _cause;
  61 public:
  62   ShenandoahBreakpointGCScope(GCCause::Cause cause) : _cause(cause) {
  63     if (cause == GCCause::_wb_breakpoint) {
  64       ShenandoahBreakpoint::start_gc();
  65       ShenandoahBreakpoint::at_before_gc();
  66     }
  67   }
  68 
  69   ~ShenandoahBreakpointGCScope() {
  70     if (_cause == GCCause::_wb_breakpoint) {
  71       ShenandoahBreakpoint::at_after_gc();
  72     }
  73   }
  74 };
  75 
  76 class ShenandoahBreakpointMarkScope : public StackObj {
  77 private:
  78   const GCCause::Cause _cause;
  79 public:
  80   ShenandoahBreakpointMarkScope(GCCause::Cause cause) : _cause(cause) {
  81     if (_cause == GCCause::_wb_breakpoint) {
  82       ShenandoahBreakpoint::at_after_marking_started();
  83     }
  84   }
  85 
  86   ~ShenandoahBreakpointMarkScope() {
  87     if (_cause == GCCause::_wb_breakpoint) {
  88       ShenandoahBreakpoint::at_before_marking_completed();
  89     }
  90   }
  91 };
  92 
  93 ShenandoahConcurrentGC::ShenandoahConcurrentGC(ShenandoahGeneration* generation, bool do_old_gc_bootstrap) :
  94   ShenandoahGC(generation),
  95   _mark(generation),
  96   _degen_point(ShenandoahDegenPoint::_degenerated_unset),
  97   _abbreviated(false),
  98   _do_old_gc_bootstrap(do_old_gc_bootstrap) {
  99 }
 100 
 101 ShenandoahGC::ShenandoahDegenPoint ShenandoahConcurrentGC::degen_point() const {
 102   return _degen_point;
 103 }
 104 
 105 void ShenandoahConcurrentGC::entry_concurrent_update_refs_prepare(ShenandoahHeap* const heap) {
 106   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 107   const char* msg = conc_init_update_refs_event_message();
 108   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_update_refs_prepare);
 109   EventMark em("%s", msg);
 110 
 111   // Evacuation is complete, retire gc labs and change gc state
 112   heap->concurrent_prepare_for_update_refs();
 113 }
 114 
 115 bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) {
 116   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 117   _generation->ref_processor()->set_soft_reference_policy(
 118       GCCause::should_clear_all_soft_refs(cause));
 119 
 120   ShenandoahBreakpointGCScope breakpoint_gc_scope(cause);
 121 
 122   // Reset for upcoming marking
 123   entry_reset();
 124 
 125   // Start initial mark under STW
 126   vmop_entry_init_mark();
 127 
 128   {
 129     ShenandoahBreakpointMarkScope breakpoint_mark_scope(cause);
 130 
 131     // Reset task queue stats here, rather than in mark_concurrent_roots,
 132     // because remembered set scan will `push` oops into the queues and
 133     // resetting after this happens will lose those counts.
 134     TASKQUEUE_STATS_ONLY(_mark.task_queues()->reset_taskqueue_stats());
 135 
 136     // Concurrent remembered set scanning
 137     entry_scan_remembered_set();
 138 
 139     // Concurrent mark roots
 140     entry_mark_roots();
 141     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_roots)) {
 142       return false;
 143     }
 144 
 145     // Continue concurrent mark
 146     entry_mark();
 147     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_mark)) {
 148       return false;
 149     }
 150   }
 151 
 152   // Complete marking under STW, and start evacuation
 153   vmop_entry_final_mark();
 154 
 155   // If the GC was cancelled before final mark, nothing happens on the safepoint. We are still
 156   // in the marking phase and must resume the degenerated cycle from there. If the GC was cancelled
 157   // after final mark, then we've entered the evacuation phase and must resume the degenerated cycle
 158   // from that phase.
 159   if (_generation->is_concurrent_mark_in_progress()) {
 160     bool cancelled = check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_mark);
 161     assert(cancelled, "GC must have been cancelled between concurrent and final mark");
 162     return false;
 163   }
 164 
 165   assert(heap->is_concurrent_weak_root_in_progress(), "Must be doing weak roots now");
 166 
 167   // Concurrent stack processing
 168   if (heap->is_evacuation_in_progress()) {
 169     entry_thread_roots();
 170   }
 171 
 172   // Process weak roots that might still point to regions that would be broken by cleanup.
 173   // We cannot recycle regions because weak roots need to know what is marked in trashed regions.
 174   entry_weak_refs();
 175   entry_weak_roots();
 176 
 177   // Perform concurrent class unloading before any regions get recycled. Class unloading may
 178   // need to inspect unmarked objects in trashed regions.
 179   if (heap->unload_classes()) {
 180     entry_class_unloading();
 181   }
 182 
 183   // Final mark might have reclaimed some immediate garbage, kick cleanup to reclaim
 184   // the space. This would be the last action if there is nothing to evacuate.  Note that
 185   // we will not age young-gen objects in the case that we skip evacuation.
 186   entry_cleanup_early();
 187 
 188   heap->free_set()->log_status_under_lock();
 189 
 190   // Processing strong roots
 191   // This may be skipped if there is nothing to update/evacuate.
 192   // If so, strong_root_in_progress would be unset.
 193   if (heap->is_concurrent_strong_root_in_progress()) {
 194     entry_strong_roots();
 195   }
 196 
 197   // Continue the cycle with evacuation and optional update-refs.
 198   // This may be skipped if there is nothing to evacuate.
 199   // If so, evac_in_progress would be unset by collection set preparation code.
 200   if (heap->is_evacuation_in_progress()) {
 201     // Concurrently evacuate
 202     entry_evacuate();
 203     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_evac)) {
 204       return false;
 205     }
 206 
 207     // Perform update-refs phase.
 208     entry_concurrent_update_refs_prepare(heap);
 209     if (ShenandoahVerify) {
 210       vmop_entry_init_update_refs();
 211     }
 212 
 213     entry_update_refs();
 214     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_update_refs)) {
 215       return false;
 216     }
 217 
 218     // Concurrent update thread roots
 219     entry_update_thread_roots();
 220     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_update_refs)) {
 221       return false;
 222     }
 223 
 224     vmop_entry_final_update_refs();
 225 
 226     // Update references freed up collection set, kick the cleanup to reclaim the space.
 227     entry_cleanup_complete();
 228   } else {
 229     _abbreviated = true;
 230     if (!entry_final_roots()) {
 231       assert(_degen_point != _degenerated_unset, "Need to know where to start degenerated cycle");
 232       return false;
 233     }
 234 
 235     if (VerifyAfterGC) {
 236       vmop_entry_verify_final_roots();
 237     }
 238   }
 239 
 240   // We defer generation resizing actions until after cset regions have been recycled.  We do this even following an
 241   // abbreviated cycle.
 242   if (heap->mode()->is_generational()) {
 243     ShenandoahGenerationalHeap::heap()->complete_concurrent_cycle();
 244   }
 245 
 246   // Instead of always resetting immediately before the start of a new GC, we can often reset at the end of the
 247   // previous GC. This allows us to start the next GC cycle more quickly after a trigger condition is detected,
 248   // reducing the likelihood that GC will degenerate.
 249   entry_reset_after_collect();
 250 
 251   return true;
 252 }
 253 
 254 bool ShenandoahConcurrentGC::complete_abbreviated_cycle() {
 255   shenandoah_assert_generational();
 256 
 257   ShenandoahGenerationalHeap* const heap = ShenandoahGenerationalHeap::heap();
 258 
 259   // We chose not to evacuate because we found sufficient immediate garbage.
 260   // However, there may still be regions to promote in place, so do that now.
 261   if (heap->old_generation()->has_in_place_promotions()) {
 262     entry_promote_in_place();
 263 
 264     // If the promote-in-place operation was cancelled, we can have the degenerated
 265     // cycle complete the operation. It will see that no evacuations are in progress,
 266     // and that there are regions wanting promotion. The risk with not handling the
 267     // cancellation would be failing to restore top for these regions and leaving
 268     // them unable to serve allocations for the old generation.This will leave the weak
 269     // roots flag set (the degenerated cycle will unset it).
 270     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_evac)) {
 271       return false;
 272     }
 273   }
 274 
 275   // At this point, the cycle is effectively complete. If the cycle has been cancelled here,
 276   // the control thread will detect it on its next iteration and run a degenerated young cycle.
 277   if (!_generation->is_old()) {
 278     heap->update_region_ages(_generation->complete_marking_context());
 279   }
 280 
 281   return true;
 282 }
 283 
 284 void ShenandoahConcurrentGC::vmop_entry_init_mark() {
 285   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 286   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 287   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_mark_gross);
 288 
 289   heap->try_inject_alloc_failure();
 290   VM_ShenandoahInitMark op(this);
 291   VMThread::execute(&op); // jump to entry_init_mark() under safepoint
 292 }
 293 
 294 void ShenandoahConcurrentGC::vmop_entry_final_mark() {
 295   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 296   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 297   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_mark_gross);
 298 
 299   heap->try_inject_alloc_failure();
 300   VM_ShenandoahFinalMarkStartEvac op(this);
 301   VMThread::execute(&op); // jump to entry_final_mark under safepoint
 302 }
 303 
 304 void ShenandoahConcurrentGC::vmop_entry_init_update_refs() {
 305   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 306   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 307   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_update_refs_gross);
 308 
 309   heap->try_inject_alloc_failure();
 310   VM_ShenandoahInitUpdateRefs op(this);
 311   VMThread::execute(&op);
 312 }
 313 
 314 void ShenandoahConcurrentGC::vmop_entry_final_update_refs() {
 315   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 316   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 317   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_update_refs_gross);
 318 
 319   heap->try_inject_alloc_failure();
 320   VM_ShenandoahFinalUpdateRefs op(this);
 321   VMThread::execute(&op);
 322 }
 323 
 324 void ShenandoahConcurrentGC::vmop_entry_verify_final_roots() {
 325   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 326   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 327   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_roots_gross);
 328 
 329   // This phase does not use workers, no need for setup
 330   heap->try_inject_alloc_failure();
 331   VM_ShenandoahFinalRoots op(this);
 332   VMThread::execute(&op);
 333 }
 334 
 335 void ShenandoahConcurrentGC::entry_init_mark() {
 336   const char* msg = init_mark_event_message();
 337   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::init_mark);
 338   EventMark em("%s", msg);
 339 
 340   ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
 341                               ShenandoahWorkerPolicy::calc_workers_for_init_marking(),
 342                               "init marking");
 343 
 344   op_init_mark();
 345 }
 346 
 347 void ShenandoahConcurrentGC::entry_final_mark() {
 348   const char* msg = final_mark_event_message();
 349   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_mark);
 350   EventMark em("%s", msg);
 351 
 352   ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
 353                               ShenandoahWorkerPolicy::calc_workers_for_final_marking(),
 354                               "final marking");
 355 
 356   op_final_mark();
 357 }
 358 
 359 void ShenandoahConcurrentGC::entry_init_update_refs() {
 360   static const char* msg = "Pause Init Update Refs";
 361   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::init_update_refs);
 362   EventMark em("%s", msg);
 363 
 364   // No workers used in this phase, no setup required
 365   op_init_update_refs();
 366 }
 367 
 368 void ShenandoahConcurrentGC::entry_final_update_refs() {
 369   static const char* msg = "Pause Final Update Refs";
 370   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_update_refs);
 371   EventMark em("%s", msg);
 372 
 373   ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
 374                               ShenandoahWorkerPolicy::calc_workers_for_final_update_ref(),
 375                               "final reference update");
 376 
 377   op_final_update_refs();
 378 }
 379 
 380 void ShenandoahConcurrentGC::entry_verify_final_roots() {
 381   const char* msg = verify_final_roots_event_message();
 382   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_roots);
 383   EventMark em("%s", msg);
 384 
 385   op_verify_final_roots();
 386 }
 387 
 388 void ShenandoahConcurrentGC::entry_reset() {
 389   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 390   heap->try_inject_alloc_failure();
 391 
 392   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 393   {
 394     const char* msg = conc_reset_event_message();
 395     ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_reset);
 396     EventMark em("%s", msg);
 397 
 398     ShenandoahWorkerScope scope(heap->workers(),
 399                                 ShenandoahWorkerPolicy::calc_workers_for_conc_reset(),
 400                                 msg);
 401     op_reset();
 402   }
 403 }
 404 
 405 void ShenandoahConcurrentGC::entry_scan_remembered_set() {
 406   if (_generation->is_young()) {
 407     ShenandoahHeap* const heap = ShenandoahHeap::heap();
 408     TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 409     const char* msg = "Concurrent remembered set scanning";
 410     ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::init_scan_rset);
 411     EventMark em("%s", msg);
 412 
 413     ShenandoahWorkerScope scope(heap->workers(),
 414                                 ShenandoahWorkerPolicy::calc_workers_for_rs_scanning(),
 415                                 msg);
 416 
 417     heap->try_inject_alloc_failure();
 418     _generation->scan_remembered_set(true /* is_concurrent */);
 419   }
 420 }
 421 
 422 void ShenandoahConcurrentGC::entry_mark_roots() {
 423   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 424   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 425   const char* msg = "Concurrent marking roots";
 426   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_mark_roots);
 427   EventMark em("%s", msg);
 428 
 429   ShenandoahWorkerScope scope(heap->workers(),
 430                               ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
 431                               "concurrent marking roots");
 432 
 433   heap->try_inject_alloc_failure();
 434   op_mark_roots();
 435 }
 436 
 437 void ShenandoahConcurrentGC::entry_mark() {
 438   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 439   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 440   const char* msg = conc_mark_event_message();
 441   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_mark);
 442   EventMark em("%s", msg);
 443 
 444   ShenandoahWorkerScope scope(heap->workers(),
 445                               ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
 446                               "concurrent marking");
 447 
 448   heap->try_inject_alloc_failure();
 449   op_mark();
 450 }
 451 
 452 void ShenandoahConcurrentGC::entry_thread_roots() {
 453   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 454   static const char* msg = "Concurrent thread roots";
 455   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_thread_roots);
 456   EventMark em("%s", msg);
 457 
 458   ShenandoahWorkerScope scope(heap->workers(),
 459                               ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
 460                               msg);
 461 
 462   heap->try_inject_alloc_failure();
 463   op_thread_roots();
 464 }
 465 
 466 void ShenandoahConcurrentGC::entry_weak_refs() {
 467   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 468   const char* msg = conc_weak_refs_event_message();
 469   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_weak_refs);
 470   EventMark em("%s", msg);
 471 
 472   ShenandoahWorkerScope scope(heap->workers(),
 473                               ShenandoahWorkerPolicy::calc_workers_for_conc_refs_processing(),
 474                               "concurrent weak references");
 475 
 476   heap->try_inject_alloc_failure();
 477   op_weak_refs();
 478 }
 479 
 480 void ShenandoahConcurrentGC::entry_weak_roots() {
 481   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 482   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 483   const char* msg = conc_weak_roots_event_message();
 484   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_weak_roots);
 485   EventMark em("%s", msg);
 486 
 487   ShenandoahWorkerScope scope(heap->workers(),
 488                               ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
 489                               "concurrent weak root");
 490 
 491   heap->try_inject_alloc_failure();
 492   op_weak_roots();
 493 }
 494 
 495 void ShenandoahConcurrentGC::entry_class_unloading() {
 496   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 497   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 498   static const char* msg = "Concurrent class unloading";
 499   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_class_unload);
 500   EventMark em("%s", msg);
 501 
 502   ShenandoahWorkerScope scope(heap->workers(),
 503                               ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
 504                               "concurrent class unloading");
 505 
 506   heap->try_inject_alloc_failure();
 507   op_class_unloading();
 508 }
 509 
 510 void ShenandoahConcurrentGC::entry_strong_roots() {
 511   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 512   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 513   static const char* msg = "Concurrent strong roots";
 514   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_strong_roots);
 515   EventMark em("%s", msg);
 516 
 517   ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_strong_roots);
 518 
 519   ShenandoahWorkerScope scope(heap->workers(),
 520                               ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
 521                               "concurrent strong root");
 522 
 523   heap->try_inject_alloc_failure();
 524   op_strong_roots();
 525 }
 526 
 527 void ShenandoahConcurrentGC::entry_cleanup_early() {
 528   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 529   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 530   const char* msg = conc_cleanup_event_message();
 531   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_cleanup_early, true /* log_heap_usage */);
 532   EventMark em("%s", msg);
 533 
 534   // This phase does not use workers, no need for setup
 535   heap->try_inject_alloc_failure();
 536   op_cleanup_early();
 537   if (!heap->is_evacuation_in_progress()) {
 538     // This is an abbreviated cycle.  Rebuild the freeset in order to establish reserves for the next GC cycle.  Doing
 539     // the rebuild ASAP also expedites availability of immediate trash, reducing the likelihood that we will degenerate
 540     // during promote-in-place processing.
 541     heap->rebuild_free_set(true /*concurrent*/);
 542   }
 543 }
 544 
 545 void ShenandoahConcurrentGC::entry_evacuate() {
 546   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 547   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 548 
 549   static const char* msg = "Concurrent evacuation";
 550   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_evac);
 551   EventMark em("%s", msg);
 552 
 553   ShenandoahWorkerScope scope(heap->workers(),
 554                               ShenandoahWorkerPolicy::calc_workers_for_conc_evac(),
 555                               "concurrent evacuation");
 556 
 557   heap->try_inject_alloc_failure();
 558   op_evacuate();
 559 }
 560 
 561 void ShenandoahConcurrentGC::entry_promote_in_place() const {
 562   shenandoah_assert_generational();
 563 
 564   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::promote_in_place);
 565   ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::promote_in_place);
 566   EventMark em("%s", "Promote in place");
 567 
 568   ShenandoahGenerationalHeap::heap()->promote_regions_in_place(_generation, true);
 569 }
 570 
 571 void ShenandoahConcurrentGC::entry_update_thread_roots() {
 572   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 573   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 574 
 575   static const char* msg = "Concurrent update thread roots";
 576   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_update_thread_roots);
 577   EventMark em("%s", msg);
 578 
 579   // No workers used in this phase, no setup required
 580   heap->try_inject_alloc_failure();
 581   op_update_thread_roots();
 582 }
 583 
 584 void ShenandoahConcurrentGC::entry_update_refs() {
 585   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 586   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 587   static const char* msg = "Concurrent update references";
 588   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_update_refs);
 589   EventMark em("%s", msg);
 590 
 591   ShenandoahWorkerScope scope(heap->workers(),
 592                               ShenandoahWorkerPolicy::calc_workers_for_conc_update_ref(),
 593                               "concurrent reference update");
 594 
 595   heap->try_inject_alloc_failure();
 596   op_update_refs();
 597 }
 598 
 599 void ShenandoahConcurrentGC::entry_cleanup_complete() {
 600   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 601   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 602   const char* msg = conc_cleanup_event_message();
 603   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_cleanup_complete, true /* log_heap_usage */);
 604   EventMark em("%s", msg);
 605 
 606   // This phase does not use workers, no need for setup
 607   heap->try_inject_alloc_failure();
 608   op_cleanup_complete();
 609 }
 610 
 611 void ShenandoahConcurrentGC::entry_reset_after_collect() {
 612   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 613   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 614   const char* msg = conc_reset_after_collect_event_message();
 615   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_reset_after_collect);
 616   EventMark em("%s", msg);
 617 
 618   op_reset_after_collect();
 619 }
 620 
 621 void ShenandoahConcurrentGC::op_reset() {
 622   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 623 
 624   // If it is old GC bootstrap cycle, always clear bitmap for global gen
 625   // to ensure bitmap for old gen is clear for old GC cycle after this.
 626   if (_do_old_gc_bootstrap) {
 627     assert(!heap->is_prepare_for_old_mark_in_progress(), "Cannot reset old without making it parsable");
 628     heap->global_generation()->prepare_gc();
 629   } else {
 630     _generation->prepare_gc();
 631   }
 632 
 633   if (heap->mode()->is_generational()) {
 634     heap->old_generation()->card_scan()->mark_read_table_as_clean();
 635   }
 636 }
 637 
 638 class ShenandoahInitMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
 639 private:
 640   ShenandoahMarkingContext* const _ctx;
 641 public:
 642   ShenandoahInitMarkUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
 643 
 644   void heap_region_do(ShenandoahHeapRegion* r) {
 645     assert(!r->has_live(), "Region %zu should have no live data", r->index());
 646     if (r->is_active()) {
 647       // Check if region needs updating its TAMS. We have updated it already during concurrent
 648       // reset, so it is very likely we don't need to do another write here.  Since most regions
 649       // are not "active", this path is relatively rare.
 650       if (_ctx->top_at_mark_start(r) != r->top()) {
 651         _ctx->capture_top_at_mark_start(r);
 652       }
 653     } else {
 654       assert(_ctx->top_at_mark_start(r) == r->top(),
 655              "Region %zu should already have correct TAMS", r->index());
 656     }
 657   }
 658 
 659   bool is_thread_safe() { return true; }
 660 };
 661 
 662 void ShenandoahConcurrentGC::start_mark() {
 663   _mark.start_mark();
 664 }
 665 
 666 void ShenandoahConcurrentGC::op_init_mark() {
 667   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 668   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
 669   assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
 670 
 671   assert(_generation->is_bitmap_clear(), "need clear marking bitmap");
 672   assert(!_generation->is_mark_complete(), "should not be complete");
 673   assert(!heap->has_forwarded_objects(), "No forwarded objects on this path");
 674 
 675   if (heap->mode()->is_generational()) {
 676     if (_generation->is_global()) {
 677       heap->old_generation()->cancel_gc();
 678     }
 679 
 680     {
 681       // After we swap card table below, the write-table is all clean, and the read table holds
 682       // cards dirty prior to the start of GC. Young and bootstrap collection will update
 683       // the write card table as a side effect of remembered set scanning. Global collection will
 684       // update the card table as a side effect of global marking of old objects.
 685       ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_swap_rset);
 686       _generation->swap_card_tables();
 687     }
 688   }
 689 
 690   if (ShenandoahVerify) {
 691     ShenandoahTimingsTracker v(ShenandoahPhaseTimings::init_mark_verify);
 692     heap->verifier()->verify_before_concmark(_generation);
 693   }
 694 
 695   if (VerifyBeforeGC) {
 696     Universe::verify();
 697   }
 698 
 699   _generation->set_concurrent_mark_in_progress(true);
 700 
 701   start_mark();
 702 
 703   if (_do_old_gc_bootstrap) {
 704     shenandoah_assert_generational();
 705     // Update region state for both young and old regions
 706     ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states);
 707     ShenandoahInitMarkUpdateRegionStateClosure cl;
 708     heap->parallel_heap_region_iterate(&cl);
 709     heap->old_generation()->ref_processor()->reset_thread_locals();
 710   } else {
 711     // Update region state for only young regions
 712     ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states);
 713     ShenandoahInitMarkUpdateRegionStateClosure cl;
 714     _generation->parallel_heap_region_iterate(&cl);
 715   }
 716 
 717   // Weak reference processing
 718   ShenandoahReferenceProcessor* rp = _generation->ref_processor();
 719   rp->reset_thread_locals();
 720 
 721   // Make above changes visible to worker threads
 722   OrderAccess::fence();
 723 
 724   // Arm nmethods for concurrent mark
 725   ShenandoahCodeRoots::arm_nmethods_for_mark();
 726 
 727   ShenandoahStackWatermark::change_epoch_id();
 728 
 729   {
 730     ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_propagate_gc_state);
 731     heap->propagate_gc_state_to_all_threads();
 732   }
 733 }
 734 
 735 void ShenandoahConcurrentGC::op_mark_roots() {
 736   _mark.mark_concurrent_roots();
 737 }
 738 
 739 void ShenandoahConcurrentGC::op_mark() {
 740   _mark.concurrent_mark();
 741   if (ShenandoahDelayGC > 0) {
 742     os::naked_sleep(ShenandoahDelayGC);
 743   }
 744 }
 745 
 746 void ShenandoahConcurrentGC::op_final_mark() {
 747   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 748   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
 749   assert(!heap->has_forwarded_objects(), "No forwarded objects on this path");
 750 
 751   if (ShenandoahVerify) {
 752     heap->verifier()->verify_roots_no_forwarded(_generation);
 753   }
 754 
 755   if (!heap->cancelled_gc()) {
 756     _mark.finish_mark();
 757     assert(!heap->cancelled_gc(), "STW mark cannot OOM");
 758 
 759     // Notify JVMTI that the tagmap table will need cleaning.
 760     JvmtiTagMap::set_needs_cleaning();
 761 
 762     // The collection set is chosen by prepare_regions_and_collection_set(). Additionally, certain parameters have been
 763     // established to govern the evacuation efforts that are about to begin.  Refer to comments on reserve members in
 764     // ShenandoahGeneration and ShenandoahOldGeneration for more detail.
 765     _generation->prepare_regions_and_collection_set(true /*concurrent*/);
 766 
 767     // Has to be done after cset selection
 768     heap->prepare_concurrent_roots();
 769 
 770     if (!heap->collection_set()->is_empty()) {
 771       LogTarget(Debug, gc, cset) lt;
 772       if (lt.is_enabled()) {
 773         ResourceMark rm;
 774         LogStream ls(lt);
 775         heap->collection_set()->print_on(&ls);
 776       }
 777 
 778       if (ShenandoahVerify) {
 779         ShenandoahTimingsTracker v(ShenandoahPhaseTimings::final_mark_verify);
 780         heap->verifier()->verify_before_evacuation(_generation);
 781       }
 782 
 783       heap->set_evacuation_in_progress(true);
 784       // From here on, we need to update references.
 785       heap->set_has_forwarded_objects(true);
 786 
 787       // Arm nmethods/stack for concurrent processing
 788       ShenandoahCodeRoots::arm_nmethods_for_evac();
 789       ShenandoahStackWatermark::change_epoch_id();
 790 
 791     } else {
 792       if (ShenandoahVerify) {
 793         ShenandoahTimingsTracker v(ShenandoahPhaseTimings::final_mark_verify);
 794         if (has_in_place_promotions(heap)) {
 795           heap->verifier()->verify_after_concmark_with_promotions(_generation);
 796         } else {
 797           heap->verifier()->verify_after_concmark(_generation);
 798         }
 799       }
 800     }
 801   }
 802 
 803   {
 804     ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_mark_propagate_gc_state);
 805     heap->propagate_gc_state_to_all_threads();
 806   }
 807 }
 808 
 809 bool ShenandoahConcurrentGC::has_in_place_promotions(ShenandoahHeap* heap) {
 810   return heap->mode()->is_generational() && heap->old_generation()->has_in_place_promotions();
 811 }
 812 
 813 class ShenandoahConcurrentEvacThreadClosure : public ThreadClosure {
 814 private:
 815   OopClosure* const _oops;
 816 public:
 817   explicit ShenandoahConcurrentEvacThreadClosure(OopClosure* oops) : _oops(oops) {}
 818 
 819   void do_thread(Thread* thread) override {
 820     JavaThread* const jt = JavaThread::cast(thread);
 821     StackWatermarkSet::finish_processing(jt, _oops, StackWatermarkKind::gc);
 822   }
 823 };
 824 
 825 class ShenandoahConcurrentEvacUpdateThreadTask : public WorkerTask {
 826 private:
 827   ShenandoahJavaThreadsIterator _java_threads;
 828 
 829 public:
 830   explicit ShenandoahConcurrentEvacUpdateThreadTask(uint n_workers) :
 831     WorkerTask("Shenandoah Evacuate/Update Concurrent Thread Roots"),
 832     _java_threads(ShenandoahPhaseTimings::conc_thread_roots, n_workers) {
 833   }
 834 
 835   void work(uint worker_id) override {
 836     // ShenandoahEvacOOMScope has to be setup by ShenandoahContextEvacuateUpdateRootsClosure.
 837     // Otherwise, may deadlock with watermark lock
 838     ShenandoahContextEvacuateUpdateRootsClosure oops_cl;
 839     ShenandoahConcurrentEvacThreadClosure thr_cl(&oops_cl);
 840     _java_threads.threads_do(&thr_cl, worker_id);
 841   }
 842 };
 843 
 844 void ShenandoahConcurrentGC::op_thread_roots() {
 845   const ShenandoahHeap* const heap = ShenandoahHeap::heap();
 846   assert(heap->is_evacuation_in_progress(), "Checked by caller");
 847   ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_thread_roots);
 848   ShenandoahConcurrentEvacUpdateThreadTask task(heap->workers()->active_workers());
 849   heap->workers()->run_task(&task);
 850 }
 851 
 852 void ShenandoahConcurrentGC::op_weak_refs() {
 853   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 854   assert(heap->is_concurrent_weak_root_in_progress(), "Only during this phase");
 855   // Concurrent weak refs processing
 856   ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_refs);
 857   if (heap->gc_cause() == GCCause::_wb_breakpoint) {
 858     ShenandoahBreakpoint::at_after_reference_processing_started();
 859   }
 860   _generation->ref_processor()->process_references(ShenandoahPhaseTimings::conc_weak_refs, heap->workers(), true /* concurrent */);
 861 }
 862 
 863 class ShenandoahEvacUpdateCleanupOopStorageRootsClosure : public BasicOopIterateClosure {
 864 private:
 865   ShenandoahHeap* const _heap;
 866   ShenandoahGeneration* const _generation;
 867   ShenandoahMarkingContext* const _mark_context;
 868   bool  _evac_in_progress;
 869   Thread* const _thread;
 870 
 871 public:
 872   explicit ShenandoahEvacUpdateCleanupOopStorageRootsClosure(ShenandoahGeneration* generation);
 873   void do_oop(oop* p);
 874   void do_oop(narrowOop* p);
 875 };
 876 
 877 ShenandoahEvacUpdateCleanupOopStorageRootsClosure::ShenandoahEvacUpdateCleanupOopStorageRootsClosure(ShenandoahGeneration* generation) :
 878   _heap(ShenandoahHeap::heap()),
 879   _generation(generation),
 880   _mark_context(ShenandoahHeap::heap()->marking_context()),
 881   _evac_in_progress(ShenandoahHeap::heap()->is_evacuation_in_progress()),
 882   _thread(Thread::current()) {
 883 }
 884 
 885 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(oop* p) {
 886   const oop obj = RawAccess<>::oop_load(p);
 887   if (!CompressedOops::is_null(obj)) {
 888     if (!_mark_context->is_marked(obj)) {
 889       if (_generation->contains(obj)) {
 890         // Note: The obj is dead here. Do not touch it, just clear.
 891         ShenandoahHeap::atomic_clear_oop(p, obj);
 892       }
 893     } else if (_evac_in_progress && _heap->in_collection_set(obj)) {
 894       oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
 895       if (resolved == obj) {
 896         resolved = _heap->evacuate_object(obj, _thread);
 897       }
 898       shenandoah_assert_not_in_cset_except(p, resolved, _heap->cancelled_gc());
 899       ShenandoahHeap::atomic_update_oop(resolved, p, obj);
 900     }
 901   }
 902 }
 903 
 904 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(narrowOop* p) {
 905   ShouldNotReachHere();
 906 }
 907 
 908 class ShenandoahIsCLDAliveClosure : public CLDClosure {
 909 public:
 910   void do_cld(ClassLoaderData* cld) {
 911     cld->is_alive();
 912   }
 913 };
 914 
 915 class ShenandoahIsNMethodAliveClosure: public NMethodClosure {
 916 public:
 917   void do_nmethod(nmethod* n) {
 918     n->is_unloading();
 919   }
 920 };
 921 
 922 // This task not only evacuates/updates marked weak roots, but also "null"
 923 // dead weak roots.
 924 class ShenandoahConcurrentWeakRootsEvacUpdateTask : public WorkerTask {
 925 private:
 926   ShenandoahVMWeakRoots<true /*concurrent*/> _vm_roots;
 927 
 928   // Roots related to concurrent class unloading
 929   ShenandoahClassLoaderDataRoots<true /* concurrent */>
 930                                              _cld_roots;
 931   ShenandoahConcurrentNMethodIterator        _nmethod_itr;
 932   ShenandoahGeneration*                      _generation;
 933   ShenandoahPhaseTimings::Phase              _phase;
 934 
 935 public:
 936   ShenandoahConcurrentWeakRootsEvacUpdateTask(ShenandoahGeneration* generation, ShenandoahPhaseTimings::Phase phase) :
 937     WorkerTask("Shenandoah Evacuate/Update Concurrent Weak Roots"),
 938     _vm_roots(phase),
 939     _cld_roots(phase, ShenandoahHeap::heap()->workers()->active_workers(), false /*heap iteration*/),
 940     _nmethod_itr(ShenandoahCodeRoots::table()),
 941     _generation(generation),
 942     _phase(phase) {}
 943 
 944   ~ShenandoahConcurrentWeakRootsEvacUpdateTask() {
 945     // Notify runtime data structures of potentially dead oops
 946     _vm_roots.report_num_dead();
 947   }
 948 
 949   void work(uint worker_id) override {
 950     ShenandoahConcurrentWorkerSession worker_session(worker_id);
 951     ShenandoahSuspendibleThreadSetJoiner sts_join;
 952     {
 953       ShenandoahEvacOOMScope oom;
 954       // jni_roots and weak_roots are OopStorage backed roots, concurrent iteration
 955       // may race against OopStorage::release() calls.
 956       ShenandoahEvacUpdateCleanupOopStorageRootsClosure cl(_generation);
 957       _vm_roots.oops_do(&cl, worker_id);
 958     }
 959 
 960     // If we are going to perform concurrent class unloading later on, we need to
 961     // clean up the weak oops in CLD and determine nmethod's unloading state, so that we
 962     // can clean up immediate garbage sooner.
 963     if (ShenandoahHeap::heap()->unload_classes()) {
 964       // Applies ShenandoahIsCLDAlive closure to CLDs, native barrier will either null the
 965       // CLD's holder or evacuate it.
 966       {
 967         ShenandoahIsCLDAliveClosure is_cld_alive;
 968         _cld_roots.cld_do(&is_cld_alive, worker_id);
 969       }
 970 
 971       // Applies ShenandoahIsNMethodAliveClosure to registered nmethods.
 972       // The closure calls nmethod->is_unloading(). The is_unloading
 973       // state is cached, therefore, during concurrent class unloading phase,
 974       // we will not touch the metadata of unloading nmethods
 975       {
 976         ShenandoahWorkerTimingsTracker timer(_phase, ShenandoahPhaseTimings::CodeCacheRoots, worker_id);
 977         ShenandoahIsNMethodAliveClosure is_nmethod_alive;
 978         _nmethod_itr.nmethods_do(&is_nmethod_alive);
 979       }
 980     }
 981   }
 982 };
 983 
 984 void ShenandoahConcurrentGC::op_weak_roots() {
 985   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 986   assert(heap->is_concurrent_weak_root_in_progress(), "Only during this phase");
 987   {
 988     // Concurrent weak root processing
 989     ShenandoahTimingsTracker t(ShenandoahPhaseTimings::conc_weak_roots_work);
 990     ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_roots_work);
 991     ShenandoahConcurrentWeakRootsEvacUpdateTask task(_generation, ShenandoahPhaseTimings::conc_weak_roots_work);
 992     heap->workers()->run_task(&task);
 993   }
 994 
 995   {
 996     // It is possible for mutators executing the load reference barrier to have
 997     // loaded an oop through a weak handle that has since been nulled out by
 998     // weak root processing. Handshaking here forces them to complete the
 999     // barrier before the GC cycle continues and does something that would
1000     // change the evaluation of the barrier (for example, resetting the TAMS
1001     // on trashed regions could make an oop appear to be marked _after_ the
1002     // region has been recycled).
1003     ShenandoahTimingsTracker t(ShenandoahPhaseTimings::conc_weak_roots_rendezvous);
1004     heap->rendezvous_threads("Shenandoah Concurrent Weak Roots");
1005   }
1006 }
1007 
1008 void ShenandoahConcurrentGC::op_class_unloading() {
1009   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1010   assert (heap->is_concurrent_weak_root_in_progress() &&
1011           heap->unload_classes(),
1012           "Checked by caller");
1013   heap->do_class_unloading();
1014 }
1015 
1016 class ShenandoahEvacUpdateCodeCacheClosure : public NMethodClosure {
1017 private:
1018   BarrierSetNMethod* const                  _bs;
1019   ShenandoahEvacuateUpdateMetadataClosure   _cl;
1020 
1021 public:
1022   ShenandoahEvacUpdateCodeCacheClosure() :
1023     _bs(BarrierSet::barrier_set()->barrier_set_nmethod()),
1024     _cl() {
1025   }
1026 
1027   void do_nmethod(nmethod* n) {
1028     ShenandoahNMethod* data = ShenandoahNMethod::gc_data(n);
1029     ShenandoahNMethodLocker locker(data->lock());
1030     // Setup EvacOOM scope below reentrant lock to avoid deadlock with
1031     // nmethod_entry_barrier
1032     ShenandoahEvacOOMScope oom;
1033     data->oops_do(&_cl, true/*fix relocation*/);
1034     _bs->disarm(n);
1035   }
1036 };
1037 
1038 class ShenandoahConcurrentRootsEvacUpdateTask : public WorkerTask {
1039 private:
1040   ShenandoahPhaseTimings::Phase                 _phase;
1041   ShenandoahVMRoots<true /*concurrent*/>        _vm_roots;
1042   ShenandoahClassLoaderDataRoots<true /*concurrent*/>
1043                                                 _cld_roots;
1044   ShenandoahConcurrentNMethodIterator           _nmethod_itr;
1045 
1046 public:
1047   ShenandoahConcurrentRootsEvacUpdateTask(ShenandoahPhaseTimings::Phase phase) :
1048     WorkerTask("Shenandoah Evacuate/Update Concurrent Strong Roots"),
1049     _phase(phase),
1050     _vm_roots(phase),
1051     _cld_roots(phase, ShenandoahHeap::heap()->workers()->active_workers(), false /*heap iteration*/),
1052     _nmethod_itr(ShenandoahCodeRoots::table()) {}
1053 
1054   void work(uint worker_id) {
1055     ShenandoahConcurrentWorkerSession worker_session(worker_id);
1056     {
1057       ShenandoahEvacOOMScope oom;
1058       {
1059         // vm_roots and weak_roots are OopStorage backed roots, concurrent iteration
1060         // may race against OopStorage::release() calls.
1061         ShenandoahContextEvacuateUpdateRootsClosure cl;
1062         _vm_roots.oops_do<ShenandoahContextEvacuateUpdateRootsClosure>(&cl, worker_id);
1063       }
1064 
1065       {
1066         ShenandoahEvacuateUpdateMetadataClosure cl;
1067         CLDToOopClosure clds(&cl, ClassLoaderData::_claim_strong);
1068         _cld_roots.cld_do(&clds, worker_id);
1069       }
1070     }
1071 
1072     // Cannot setup ShenandoahEvacOOMScope here, due to potential deadlock with nmethod_entry_barrier.
1073     if (!ShenandoahHeap::heap()->unload_classes()) {
1074       ShenandoahWorkerTimingsTracker timer(_phase, ShenandoahPhaseTimings::CodeCacheRoots, worker_id);
1075       ShenandoahEvacUpdateCodeCacheClosure cl;
1076       _nmethod_itr.nmethods_do(&cl);
1077     }
1078   }
1079 };
1080 
1081 void ShenandoahConcurrentGC::op_strong_roots() {
1082   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1083   assert(heap->is_concurrent_strong_root_in_progress(), "Checked by caller");
1084   ShenandoahConcurrentRootsEvacUpdateTask task(ShenandoahPhaseTimings::conc_strong_roots);
1085   heap->workers()->run_task(&task);
1086   heap->set_concurrent_strong_root_in_progress(false);
1087 }
1088 
1089 void ShenandoahConcurrentGC::op_cleanup_early() {
1090   ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
1091                               ShenandoahWorkerPolicy::calc_workers_for_conc_cleanup(),
1092                               "cleanup early.");
1093   ShenandoahHeap::heap()->recycle_trash();
1094 }
1095 
1096 void ShenandoahConcurrentGC::op_evacuate() {
1097   ShenandoahHeap::heap()->evacuate_collection_set(_generation, true /*concurrent*/);
1098   if (ShenandoahDelayGC > 0) {
1099     os::naked_sleep(ShenandoahDelayGC);
1100   }
1101 }
1102 
1103 void ShenandoahConcurrentGC::op_init_update_refs() {
1104   if (ShenandoahVerify) {
1105     ShenandoahHeap* const heap = ShenandoahHeap::heap();
1106     ShenandoahTimingsTracker v(ShenandoahPhaseTimings::init_update_refs_verify);
1107     heap->verifier()->verify_before_update_refs(_generation);
1108   }
1109 }
1110 
1111 void ShenandoahConcurrentGC::op_update_refs() {
1112   ShenandoahHeap::heap()->update_heap_references(_generation, true /*concurrent*/);
1113   if (ShenandoahDelayGC > 0) {
1114     os::naked_sleep(ShenandoahDelayGC);
1115   }
1116 }
1117 
1118 class ShenandoahUpdateThreadHandshakeClosure : public HandshakeClosure {
1119 private:
1120   // This closure runs when thread is stopped for handshake, which means
1121   // we can use non-concurrent closure here, as long as it only updates
1122   // locations modified by the thread itself, i.e. stack locations.
1123   ShenandoahNonConcUpdateRefsClosure _cl;
1124 public:
1125   ShenandoahUpdateThreadHandshakeClosure();
1126   void do_thread(Thread* thread) override;
1127 };
1128 
1129 ShenandoahUpdateThreadHandshakeClosure::ShenandoahUpdateThreadHandshakeClosure() :
1130   HandshakeClosure("Shenandoah Update Thread Roots") {
1131 }
1132 
1133 void ShenandoahUpdateThreadHandshakeClosure::do_thread(Thread* thread) {
1134   if (thread->is_Java_thread()) {
1135     JavaThread* jt = JavaThread::cast(thread);
1136     ResourceMark rm;
1137     jt->oops_do(&_cl, nullptr);
1138   }
1139 }
1140 
1141 class ShenandoahUpdateThreadRootsAndFlushOldSatbBuffers final : public HandshakeClosure {
1142   // When Shenandoah is marking the old generation, it is possible for the SATB barrier
1143   // to pick up overwritten pointers that point into a cset region. If these pointers
1144   // are accessed by mark threads, they will crash. Once update refs has completed, it is
1145   // no longer possible for a mutator thread to overwrite a pointer into a cset region.
1146   //
1147   // Therefore, at the end of update refs, we use this closure to update the thread roots
1148   // and 'complete' all the thread local SATB buffers. Completing these will filter out
1149   // anything that has already been marked or anything that points to a region which is
1150   // not old. We do not need to worry about ABA situations where a region may become old
1151   // after the pointer is enqueued but before it is filtered. There are only two ways a
1152   // region may become old:
1153   //  1. The region is promoted in place. This is safe because such regions will never
1154   //     be in the collection set. If this happens, the pointer will be preserved, essentially
1155   //     becoming part of the old snapshot.
1156   //  2. The region is allocated during evacuation of old. This is also not a concern because
1157   //     we haven't yet finished marking old so no mixed evacuations will happen.
1158   ShenandoahUpdateThreadHandshakeClosure _update_roots;
1159   ShenandoahFlushSATB _flush_all_satb;
1160 
1161 public:
1162   ShenandoahUpdateThreadRootsAndFlushOldSatbBuffers() :
1163     HandshakeClosure("Shenandoah Update Thread Roots and Flush SATB"),
1164     _flush_all_satb(ShenandoahBarrierSet::satb_mark_queue_set()) {
1165     assert(ShenandoahBarrierSet::satb_mark_queue_set().get_filter_out_young(),
1166            "Should be filtering pointers outside of old during old marking");
1167   }
1168 
1169   void do_thread(Thread* thread) override {
1170     _update_roots.do_thread(thread);
1171     _flush_all_satb.do_thread(thread);
1172   }
1173 };
1174 
1175 void ShenandoahConcurrentGC::op_update_thread_roots() {
1176   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1177   if (heap->is_concurrent_old_mark_in_progress()) {
1178     ShenandoahUpdateThreadRootsAndFlushOldSatbBuffers cl;
1179     Handshake::execute(&cl);
1180   } else {
1181     ShenandoahUpdateThreadHandshakeClosure cl;
1182     Handshake::execute(&cl);
1183   }
1184 }
1185 
1186 void ShenandoahConcurrentGC::op_final_update_refs() {
1187   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1188   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
1189   assert(!heap->_update_refs_iterator.has_next(), "Should have finished update references");
1190 
1191   heap->finish_concurrent_roots();
1192 
1193   // Clear cancelled GC, if set. On cancellation path, the block before would handle
1194   // everything.
1195   if (heap->cancelled_gc()) {
1196     heap->clear_cancelled_gc();
1197   }
1198 
1199   // Has to be done before cset is clear
1200   if (ShenandoahVerify) {
1201     heap->verifier()->verify_roots_in_to_space(_generation);
1202   }
1203 
1204   // If we are running in generational mode and this is an aging cycle, this will also age active
1205   // regions that haven't been used for allocation.
1206   heap->update_heap_region_states(true /*concurrent*/);
1207 
1208   heap->set_update_refs_in_progress(false);
1209   heap->set_has_forwarded_objects(false);
1210 
1211   if (heap->mode()->is_generational() && heap->is_concurrent_old_mark_in_progress()) {
1212     // Aging_cycle is only relevant during evacuation cycle for individual objects and during final mark for
1213     // entire regions.  Both of these relevant operations occur before final update refs.
1214     ShenandoahGenerationalHeap::heap()->set_aging_cycle(false);
1215   }
1216 
1217   if (ShenandoahVerify) {
1218     ShenandoahTimingsTracker v(ShenandoahPhaseTimings::final_update_refs_verify);
1219     heap->verifier()->verify_after_update_refs(_generation);
1220   }
1221 
1222   if (VerifyAfterGC) {
1223     Universe::verify();
1224   }
1225 
1226   heap->rebuild_free_set(true /*concurrent*/);
1227   _generation->heuristics()->start_idle_span();
1228 
1229   {
1230     ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_update_refs_propagate_gc_state);
1231     heap->propagate_gc_state_to_all_threads();
1232   }
1233 }
1234 
1235 bool ShenandoahConcurrentGC::entry_final_roots() {
1236   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1237   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
1238 
1239 
1240   const char* msg = conc_final_roots_event_message();
1241   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_final_roots);
1242   EventMark em("%s", msg);
1243   ShenandoahWorkerScope scope(heap->workers(),
1244                               ShenandoahWorkerPolicy::calc_workers_for_conc_evac(),
1245                               msg);
1246 
1247   if (heap->mode()->is_generational()) {
1248     if (!complete_abbreviated_cycle()) {
1249       return false;
1250     }
1251   }
1252 
1253   heap->concurrent_final_roots();
1254   return true;
1255 }
1256 
1257 void ShenandoahConcurrentGC::op_verify_final_roots() {
1258   if (VerifyAfterGC) {
1259     Universe::verify();
1260   }
1261 }
1262 
1263 void ShenandoahConcurrentGC::op_cleanup_complete() {
1264   ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
1265                               ShenandoahWorkerPolicy::calc_workers_for_conc_cleanup(),
1266                               "cleanup complete.");
1267   ShenandoahHeap::heap()->recycle_trash();
1268 }
1269 
1270 void ShenandoahConcurrentGC::op_reset_after_collect() {
1271   ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
1272                           ShenandoahWorkerPolicy::calc_workers_for_conc_reset(),
1273                           "reset after collection.");
1274 
1275   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1276   if (heap->mode()->is_generational()) {
1277     // If we are in the midst of an old gc bootstrap or an old marking, we want to leave the mark bit map of
1278     // the young generation intact. In particular, reference processing in the old generation may potentially
1279     // need the reachability of a young generation referent of a Reference object in the old generation.
1280     if (!_do_old_gc_bootstrap && !heap->is_concurrent_old_mark_in_progress()) {
1281       heap->young_generation()->reset_mark_bitmap<false>();
1282     }
1283   } else {
1284     _generation->reset_mark_bitmap<false>();
1285   }
1286 }
1287 
1288 bool ShenandoahConcurrentGC::check_cancellation_and_abort(ShenandoahDegenPoint point) {
1289   if (ShenandoahHeap::heap()->cancelled_gc()) {
1290     _degen_point = point;
1291     return true;
1292   }
1293   return false;
1294 }
1295 
1296 const char* ShenandoahConcurrentGC::init_mark_event_message() const {
1297   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1298   assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here");
1299   if (heap->unload_classes()) {
1300     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Init Mark", " (unload classes)");
1301   } else {
1302     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Init Mark", "");
1303   }
1304 }
1305 
1306 const char* ShenandoahConcurrentGC::final_mark_event_message() const {
1307   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1308   assert(!heap->has_forwarded_objects() || heap->is_concurrent_old_mark_in_progress(),
1309          "Should not have forwarded objects during final mark, unless old gen concurrent mark is running");
1310 
1311   if (heap->unload_classes()) {
1312     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Final Mark", " (unload classes)");
1313   } else {
1314     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Final Mark", "");
1315   }
1316 }
1317 
1318 const char* ShenandoahConcurrentGC::conc_mark_event_message() const {
1319   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1320   assert(!heap->has_forwarded_objects() || heap->is_concurrent_old_mark_in_progress(),
1321          "Should not have forwarded objects concurrent mark, unless old gen concurrent mark is running");
1322   if (heap->unload_classes()) {
1323     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent marking", " (unload classes)");
1324   } else {
1325     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent marking", "");
1326   }
1327 }
1328 
1329 const char* ShenandoahConcurrentGC::conc_reset_event_message() const {
1330   if (ShenandoahHeap::heap()->unload_classes()) {
1331     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent reset", " (unload classes)");
1332   } else {
1333     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent reset", "");
1334   }
1335 }
1336 
1337 const char* ShenandoahConcurrentGC::conc_reset_after_collect_event_message() const {
1338   if (ShenandoahHeap::heap()->unload_classes()) {
1339     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent reset after collect", " (unload classes)");
1340   } else {
1341     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent reset after collect", "");
1342   }
1343 }
1344 
1345 const char* ShenandoahConcurrentGC::verify_final_roots_event_message() const {
1346   if (ShenandoahHeap::heap()->unload_classes()) {
1347     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Verify Final Roots", " (unload classes)");
1348   } else {
1349     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Verify Final Roots", "");
1350   }
1351 }
1352 
1353 const char* ShenandoahConcurrentGC::conc_final_roots_event_message() const {
1354   if (ShenandoahHeap::heap()->unload_classes()) {
1355     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent Final Roots", " (unload classes)");
1356   } else {
1357     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent Final Roots", "");
1358   }
1359 }
1360 
1361 const char* ShenandoahConcurrentGC::conc_weak_refs_event_message() const {
1362   if (ShenandoahHeap::heap()->unload_classes()) {
1363     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent weak references", " (unload classes)");
1364   } else {
1365     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent weak references", "");
1366   }
1367 }
1368 
1369 const char* ShenandoahConcurrentGC::conc_weak_roots_event_message() const {
1370   if (ShenandoahHeap::heap()->unload_classes()) {
1371     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent weak roots", " (unload classes)");
1372   } else {
1373     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent weak roots", "");
1374   }
1375 }
1376 
1377 const char* ShenandoahConcurrentGC::conc_cleanup_event_message() const {
1378   if (ShenandoahHeap::heap()->unload_classes()) {
1379     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent cleanup", " (unload classes)");
1380   } else {
1381     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent cleanup", "");
1382   }
1383 }
1384 
1385 const char* ShenandoahConcurrentGC::conc_init_update_refs_event_message() const {
1386   if (ShenandoahHeap::heap()->unload_classes()) {
1387     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent Init Update Refs", " (unload classes)");
1388   } else {
1389     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent Init Update Refs", "");
1390   }
1391 }