1 /*
   2  * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2021, 2022, Red Hat, Inc. All rights reserved.
   4  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
   5  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   6  *
   7  * This code is free software; you can redistribute it and/or modify it
   8  * under the terms of the GNU General Public License version 2 only, as
   9  * published by the Free Software Foundation.
  10  *
  11  * This code is distributed in the hope that it will be useful, but WITHOUT
  12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  14  * version 2 for more details (a copy is included in the LICENSE file that
  15  * accompanied this code).
  16  *
  17  * You should have received a copy of the GNU General Public License version
  18  * 2 along with this work; if not, write to the Free Software Foundation,
  19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20  *
  21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22  * or visit www.oracle.com if you need additional information or have any
  23  * questions.
  24  *
  25  */
  26 
  27 #include "precompiled.hpp"
  28 
  29 #include "gc/shared/barrierSetNMethod.hpp"
  30 #include "gc/shared/collectorCounters.hpp"
  31 #include "gc/shared/continuationGCSupport.inline.hpp"
  32 #include "gc/shenandoah/shenandoahBreakpoint.hpp"
  33 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  34 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
  35 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  36 #include "gc/shenandoah/shenandoahGeneration.hpp"
  37 #include "gc/shenandoah/shenandoahGenerationalHeap.hpp"
  38 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
  39 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
  40 #include "gc/shenandoah/shenandoahLock.hpp"
  41 #include "gc/shenandoah/shenandoahMark.inline.hpp"
  42 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  43 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  44 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  45 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
  46 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
  47 #include "gc/shenandoah/shenandoahStackWatermark.hpp"
  48 #include "gc/shenandoah/shenandoahUtils.hpp"
  49 #include "gc/shenandoah/shenandoahVerifier.hpp"
  50 #include "gc/shenandoah/shenandoahVMOperations.hpp"
  51 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
  52 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  53 #include "memory/allocation.hpp"
  54 #include "prims/jvmtiTagMap.hpp"
  55 #include "runtime/vmThread.hpp"
  56 #include "utilities/events.hpp"
  57 
  58 // Breakpoint support
  59 class ShenandoahBreakpointGCScope : public StackObj {
  60 private:
  61   const GCCause::Cause _cause;
  62 public:
  63   ShenandoahBreakpointGCScope(GCCause::Cause cause) : _cause(cause) {
  64     if (cause == GCCause::_wb_breakpoint) {
  65       ShenandoahBreakpoint::start_gc();
  66       ShenandoahBreakpoint::at_before_gc();
  67     }
  68   }
  69 
  70   ~ShenandoahBreakpointGCScope() {
  71     if (_cause == GCCause::_wb_breakpoint) {
  72       ShenandoahBreakpoint::at_after_gc();
  73     }
  74   }
  75 };
  76 
  77 class ShenandoahBreakpointMarkScope : public StackObj {
  78 private:
  79   const GCCause::Cause _cause;
  80 public:
  81   ShenandoahBreakpointMarkScope(GCCause::Cause cause) : _cause(cause) {
  82     if (_cause == GCCause::_wb_breakpoint) {
  83       ShenandoahBreakpoint::at_after_marking_started();
  84     }
  85   }
  86 
  87   ~ShenandoahBreakpointMarkScope() {
  88     if (_cause == GCCause::_wb_breakpoint) {
  89       ShenandoahBreakpoint::at_before_marking_completed();
  90     }
  91   }
  92 };
  93 
  94 ShenandoahConcurrentGC::ShenandoahConcurrentGC(ShenandoahGeneration* generation, bool do_old_gc_bootstrap) :
  95   _mark(generation),
  96   _degen_point(ShenandoahDegenPoint::_degenerated_unset),
  97   _abbreviated(false),
  98   _do_old_gc_bootstrap(do_old_gc_bootstrap),
  99   _generation(generation) {
 100 }
 101 
 102 ShenandoahGC::ShenandoahDegenPoint ShenandoahConcurrentGC::degen_point() const {
 103   return _degen_point;
 104 }
 105 
 106 bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) {
 107   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 108 
 109   ShenandoahBreakpointGCScope breakpoint_gc_scope(cause);
 110 
 111   // Reset for upcoming marking
 112   entry_reset();
 113 
 114   // Start initial mark under STW
 115   vmop_entry_init_mark();
 116 
 117   {
 118     ShenandoahBreakpointMarkScope breakpoint_mark_scope(cause);
 119 
 120     // Reset task queue stats here, rather than in mark_concurrent_roots,
 121     // because remembered set scan will `push` oops into the queues and
 122     // resetting after this happens will lose those counts.
 123     TASKQUEUE_STATS_ONLY(_mark.task_queues()->reset_taskqueue_stats());
 124 
 125     // Concurrent remembered set scanning
 126     entry_scan_remembered_set();
 127 
 128     // Concurrent mark roots
 129     entry_mark_roots();
 130     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_roots)) {
 131       return false;
 132     }
 133 
 134     // Continue concurrent mark
 135     entry_mark();
 136     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_mark)) {
 137       return false;
 138     }
 139   }
 140 
 141   // Complete marking under STW, and start evacuation
 142   vmop_entry_final_mark();
 143 
 144   // If the GC was cancelled just before final mark (but after the preceding cancellation check),
 145   // then the safepoint operation will do nothing and the concurrent mark will still be in progress.
 146   // In this case it is safe (and necessary) to resume the degenerated cycle from the marking phase.
 147   //
 148   // On the other hand, if the GC is cancelled after final mark (but before this check), then the
 149   // final mark safepoint operation will have finished the mark (setting concurrent mark in progress
 150   // to false). In this case (final mark has completed), we need control to fall past the next
 151   // cancellation check and resume the degenerated cycle from the evacuation phase.
 152   if (_generation->is_concurrent_mark_in_progress()) {
 153     // If the concurrent mark is still in progress after the final mark safepoint, then the GC has
 154     // been cancelled. The degenerated cycle must resume from the marking phase. Without this check,
 155     // the non-generational mode may fall all the way to the end of this collect routine without
 156     // having done anything (besides mark most of the heap). Without having collected anything, we
 157     // can expect an 'out of cycle' degenerated GC which will again mark the entire heap. This is
 158     // not optimal.
 159     // For the generational mode, we cannot allow this. The generational mode relies on marking
 160     // (including the final mark) to rebuild portions of the card table. If the generational mode does
 161     // not complete marking after it has swapped the card tables, the root set on subsequent GCs will
 162     // be incomplete, heap corruption may follow.
 163     bool cancelled = check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_mark);
 164     assert(cancelled, "GC must have been cancelled between concurrent and final mark");
 165     return false;
 166   }
 167 
 168   // Concurrent stack processing
 169   if (heap->is_evacuation_in_progress()) {
 170     entry_thread_roots();
 171   }
 172 
 173   // Process weak roots that might still point to regions that would be broken by cleanup
 174   if (heap->is_concurrent_weak_root_in_progress()) {
 175     entry_weak_refs();
 176     entry_weak_roots();
 177   }
 178 
 179   // Final mark might have reclaimed some immediate garbage, kick cleanup to reclaim
 180   // the space. This would be the last action if there is nothing to evacuate.  Note that
 181   // we will not age young-gen objects in the case that we skip evacuation.
 182   entry_cleanup_early();
 183 
 184   heap->free_set()->log_status_under_lock();
 185 
 186   // Perform concurrent class unloading
 187   if (heap->unload_classes() &&
 188       heap->is_concurrent_weak_root_in_progress()) {
 189     entry_class_unloading();
 190   }
 191 
 192   // Processing strong roots
 193   // This may be skipped if there is nothing to update/evacuate.
 194   // If so, strong_root_in_progress would be unset.
 195   if (heap->is_concurrent_strong_root_in_progress()) {
 196     entry_strong_roots();
 197   }
 198 
 199   // Continue the cycle with evacuation and optional update-refs.
 200   // This may be skipped if there is nothing to evacuate.
 201   // If so, evac_in_progress would be unset by collection set preparation code.
 202   if (heap->is_evacuation_in_progress()) {
 203     // Concurrently evacuate
 204     entry_evacuate();
 205     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_evac)) {
 206       return false;
 207     }
 208 
 209     // Perform update-refs phase.
 210     vmop_entry_init_updaterefs();
 211     entry_updaterefs();
 212     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_updaterefs)) {
 213       return false;
 214     }
 215 
 216     // Concurrent update thread roots
 217     entry_update_thread_roots();
 218     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_updaterefs)) {
 219       return false;
 220     }
 221 
 222     vmop_entry_final_updaterefs();
 223 
 224     // Update references freed up collection set, kick the cleanup to reclaim the space.
 225     entry_cleanup_complete();
 226   } else {
 227     // We chose not to evacuate because we found sufficient immediate garbage.
 228     // However, there may still be regions to promote in place, so do that now.
 229     if (has_in_place_promotions(heap)) {
 230       entry_promote_in_place();
 231 
 232       // If the promote-in-place operation was cancelled, we can have the degenerated
 233       // cycle complete the operation. It will see that no evacuations are in progress,
 234       // and that there are regions wanting promotion. The risk with not handling the
 235       // cancellation would be failing to restore top for these regions and leaving
 236       // them unable to serve allocations for the old generation.
 237       if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_evac)) {
 238         return false;
 239       }
 240     }
 241 
 242     // At this point, the cycle is effectively complete. If the cycle has been cancelled here,
 243     // the control thread will detect it on its next iteration and run a degenerated young cycle.
 244     vmop_entry_final_roots();
 245     _abbreviated = true;
 246   }
 247 
 248   // We defer generation resizing actions until after cset regions have been recycled.  We do this even following an
 249   // abbreviated cycle.
 250   if (heap->mode()->is_generational()) {
 251     ShenandoahGenerationalHeap::heap()->complete_concurrent_cycle();
 252   }
 253   return true;
 254 }
 255 
 256 void ShenandoahConcurrentGC::vmop_entry_init_mark() {
 257   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 258   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 259   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_mark_gross);
 260 
 261   heap->try_inject_alloc_failure();
 262   VM_ShenandoahInitMark op(this);
 263   VMThread::execute(&op); // jump to entry_init_mark() under safepoint
 264 }
 265 
 266 void ShenandoahConcurrentGC::vmop_entry_final_mark() {
 267   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 268   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 269   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_mark_gross);
 270 
 271   heap->try_inject_alloc_failure();
 272   VM_ShenandoahFinalMarkStartEvac op(this);
 273   VMThread::execute(&op); // jump to entry_final_mark under safepoint
 274 }
 275 
 276 void ShenandoahConcurrentGC::vmop_entry_init_updaterefs() {
 277   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 278   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 279   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_update_refs_gross);
 280 
 281   heap->try_inject_alloc_failure();
 282   VM_ShenandoahInitUpdateRefs op(this);
 283   VMThread::execute(&op);
 284 }
 285 
 286 void ShenandoahConcurrentGC::vmop_entry_final_updaterefs() {
 287   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 288   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 289   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_update_refs_gross);
 290 
 291   heap->try_inject_alloc_failure();
 292   VM_ShenandoahFinalUpdateRefs op(this);
 293   VMThread::execute(&op);
 294 }
 295 
 296 void ShenandoahConcurrentGC::vmop_entry_final_roots() {
 297   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 298   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 299   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_roots_gross);
 300 
 301   // This phase does not use workers, no need for setup
 302   heap->try_inject_alloc_failure();
 303   VM_ShenandoahFinalRoots op(this);
 304   VMThread::execute(&op);
 305 }
 306 
 307 void ShenandoahConcurrentGC::entry_init_mark() {
 308   const char* msg = init_mark_event_message();
 309   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::init_mark);
 310   EventMark em("%s", msg);
 311 
 312   ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
 313                               ShenandoahWorkerPolicy::calc_workers_for_init_marking(),
 314                               "init marking");
 315 
 316   op_init_mark();
 317 }
 318 
 319 void ShenandoahConcurrentGC::entry_final_mark() {
 320   const char* msg = final_mark_event_message();
 321   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_mark);
 322   EventMark em("%s", msg);
 323 
 324   ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
 325                               ShenandoahWorkerPolicy::calc_workers_for_final_marking(),
 326                               "final marking");
 327 
 328   op_final_mark();
 329 }
 330 
 331 void ShenandoahConcurrentGC::entry_init_updaterefs() {
 332   static const char* msg = "Pause Init Update Refs";
 333   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::init_update_refs);
 334   EventMark em("%s", msg);
 335 
 336   // No workers used in this phase, no setup required
 337   op_init_updaterefs();
 338 }
 339 
 340 void ShenandoahConcurrentGC::entry_final_updaterefs() {
 341   static const char* msg = "Pause Final Update Refs";
 342   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_update_refs);
 343   EventMark em("%s", msg);
 344 
 345   ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
 346                               ShenandoahWorkerPolicy::calc_workers_for_final_update_ref(),
 347                               "final reference update");
 348 
 349   op_final_updaterefs();
 350 }
 351 
 352 void ShenandoahConcurrentGC::entry_final_roots() {
 353   const char* msg = final_roots_event_message();
 354   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_roots);
 355   EventMark em("%s", msg);
 356 
 357   op_final_roots();
 358 }
 359 
 360 void ShenandoahConcurrentGC::entry_reset() {
 361   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 362   heap->try_inject_alloc_failure();
 363 
 364   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 365   {
 366     const char* msg = conc_reset_event_message();
 367     ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_reset);
 368     EventMark em("%s", msg);
 369 
 370     ShenandoahWorkerScope scope(heap->workers(),
 371                                 ShenandoahWorkerPolicy::calc_workers_for_conc_reset(),
 372                                 msg);
 373     op_reset();
 374   }
 375 
 376   if (_do_old_gc_bootstrap) {
 377     static const char* msg = "Concurrent reset (OLD)";
 378     ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_reset_old);
 379     ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
 380                                 ShenandoahWorkerPolicy::calc_workers_for_conc_reset(),
 381                                 msg);
 382     EventMark em("%s", msg);
 383 
 384     heap->old_generation()->prepare_gc();
 385   }
 386 }
 387 
 388 void ShenandoahConcurrentGC::entry_scan_remembered_set() {
 389   if (_generation->is_young()) {
 390     ShenandoahHeap* const heap = ShenandoahHeap::heap();
 391     TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 392     const char* msg = "Concurrent remembered set scanning";
 393     ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::init_scan_rset);
 394     EventMark em("%s", msg);
 395 
 396     ShenandoahWorkerScope scope(heap->workers(),
 397                                 ShenandoahWorkerPolicy::calc_workers_for_rs_scanning(),
 398                                 msg);
 399 
 400     heap->try_inject_alloc_failure();
 401     _generation->scan_remembered_set(true /* is_concurrent */);
 402   }
 403 }
 404 
 405 void ShenandoahConcurrentGC::entry_mark_roots() {
 406   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 407   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 408   const char* msg = "Concurrent marking roots";
 409   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_mark_roots);
 410   EventMark em("%s", msg);
 411 
 412   ShenandoahWorkerScope scope(heap->workers(),
 413                               ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
 414                               "concurrent marking roots");
 415 
 416   heap->try_inject_alloc_failure();
 417   op_mark_roots();
 418 }
 419 
 420 void ShenandoahConcurrentGC::entry_mark() {
 421   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 422   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 423   const char* msg = conc_mark_event_message();
 424   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_mark);
 425   EventMark em("%s", msg);
 426 
 427   ShenandoahWorkerScope scope(heap->workers(),
 428                               ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
 429                               "concurrent marking");
 430 
 431   heap->try_inject_alloc_failure();
 432   op_mark();
 433 }
 434 
 435 void ShenandoahConcurrentGC::entry_thread_roots() {
 436   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 437   static const char* msg = "Concurrent thread roots";
 438   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_thread_roots);
 439   EventMark em("%s", msg);
 440 
 441   ShenandoahWorkerScope scope(heap->workers(),
 442                               ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
 443                               msg);
 444 
 445   heap->try_inject_alloc_failure();
 446   op_thread_roots();
 447 }
 448 
 449 void ShenandoahConcurrentGC::entry_weak_refs() {
 450   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 451   const char* msg = conc_weak_refs_event_message();
 452   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_weak_refs);
 453   EventMark em("%s", msg);
 454 
 455   ShenandoahWorkerScope scope(heap->workers(),
 456                               ShenandoahWorkerPolicy::calc_workers_for_conc_refs_processing(),
 457                               "concurrent weak references");
 458 
 459   heap->try_inject_alloc_failure();
 460   op_weak_refs();
 461 }
 462 
 463 void ShenandoahConcurrentGC::entry_weak_roots() {
 464   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 465   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 466   const char* msg = conc_weak_roots_event_message();
 467   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_weak_roots);
 468   EventMark em("%s", msg);
 469 
 470   ShenandoahWorkerScope scope(heap->workers(),
 471                               ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
 472                               "concurrent weak root");
 473 
 474   heap->try_inject_alloc_failure();
 475   op_weak_roots();
 476 }
 477 
 478 void ShenandoahConcurrentGC::entry_class_unloading() {
 479   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 480   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 481   static const char* msg = "Concurrent class unloading";
 482   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_class_unload);
 483   EventMark em("%s", msg);
 484 
 485   ShenandoahWorkerScope scope(heap->workers(),
 486                               ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
 487                               "concurrent class unloading");
 488 
 489   heap->try_inject_alloc_failure();
 490   op_class_unloading();
 491 }
 492 
 493 void ShenandoahConcurrentGC::entry_strong_roots() {
 494   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 495   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 496   static const char* msg = "Concurrent strong roots";
 497   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_strong_roots);
 498   EventMark em("%s", msg);
 499 
 500   ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_strong_roots);
 501 
 502   ShenandoahWorkerScope scope(heap->workers(),
 503                               ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
 504                               "concurrent strong root");
 505 
 506   heap->try_inject_alloc_failure();
 507   op_strong_roots();
 508 }
 509 
 510 void ShenandoahConcurrentGC::entry_cleanup_early() {
 511   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 512   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 513   const char* msg = conc_cleanup_event_message();
 514   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_cleanup_early, true /* log_heap_usage */);
 515   EventMark em("%s", msg);
 516 
 517   // This phase does not use workers, no need for setup
 518   heap->try_inject_alloc_failure();
 519   op_cleanup_early();
 520 }
 521 
 522 void ShenandoahConcurrentGC::entry_evacuate() {
 523   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 524   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 525 
 526   static const char* msg = "Concurrent evacuation";
 527   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_evac);
 528   EventMark em("%s", msg);
 529 
 530   ShenandoahWorkerScope scope(heap->workers(),
 531                               ShenandoahWorkerPolicy::calc_workers_for_conc_evac(),
 532                               "concurrent evacuation");
 533 
 534   heap->try_inject_alloc_failure();
 535   op_evacuate();
 536 }
 537 
 538 void ShenandoahConcurrentGC::entry_promote_in_place() {
 539   shenandoah_assert_generational();
 540 
 541   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 542   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 543 
 544   static const char* msg = "Promote in place";
 545   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::promote_in_place);
 546   EventMark em("%s", msg);
 547 
 548   ShenandoahWorkerScope scope(heap->workers(),
 549                               ShenandoahWorkerPolicy::calc_workers_for_conc_evac(),
 550                               "promote in place");
 551 
 552   ShenandoahGenerationalHeap::heap()->promote_regions_in_place(true);
 553 }
 554 
 555 void ShenandoahConcurrentGC::entry_update_thread_roots() {
 556   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 557   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 558 
 559   static const char* msg = "Concurrent update thread roots";
 560   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_update_thread_roots);
 561   EventMark em("%s", msg);
 562 
 563   // No workers used in this phase, no setup required
 564   heap->try_inject_alloc_failure();
 565   op_update_thread_roots();
 566 }
 567 
 568 void ShenandoahConcurrentGC::entry_updaterefs() {
 569   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 570   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 571   static const char* msg = "Concurrent update references";
 572   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_update_refs);
 573   EventMark em("%s", msg);
 574 
 575   ShenandoahWorkerScope scope(heap->workers(),
 576                               ShenandoahWorkerPolicy::calc_workers_for_conc_update_ref(),
 577                               "concurrent reference update");
 578 
 579   heap->try_inject_alloc_failure();
 580   op_updaterefs();
 581 }
 582 
 583 void ShenandoahConcurrentGC::entry_cleanup_complete() {
 584   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 585   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 586   const char* msg = conc_cleanup_event_message();
 587   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_cleanup_complete, true /* log_heap_usage */);
 588   EventMark em("%s", msg);
 589 
 590   // This phase does not use workers, no need for setup
 591   heap->try_inject_alloc_failure();
 592   op_cleanup_complete();
 593 }
 594 
 595 void ShenandoahConcurrentGC::op_reset() {
 596   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 597   if (ShenandoahPacing) {
 598     heap->pacer()->setup_for_reset();
 599   }
 600   _generation->prepare_gc();
 601 }
 602 
 603 class ShenandoahInitMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
 604 private:
 605   ShenandoahMarkingContext* const _ctx;
 606 public:
 607   ShenandoahInitMarkUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
 608 
 609   void heap_region_do(ShenandoahHeapRegion* r) {
 610     assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index());
 611     if (r->is_active()) {
 612       // Check if region needs updating its TAMS. We have updated it already during concurrent
 613       // reset, so it is very likely we don't need to do another write here.  Since most regions
 614       // are not "active", this path is relatively rare.
 615       if (_ctx->top_at_mark_start(r) != r->top()) {
 616         _ctx->capture_top_at_mark_start(r);
 617       }
 618     } else {
 619       assert(_ctx->top_at_mark_start(r) == r->top(),
 620              "Region " SIZE_FORMAT " should already have correct TAMS", r->index());
 621     }
 622   }
 623 
 624   bool is_thread_safe() { return true; }
 625 };
 626 
 627 void ShenandoahConcurrentGC::start_mark() {
 628   _mark.start_mark();
 629 }
 630 
 631 void ShenandoahConcurrentGC::op_init_mark() {
 632   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 633   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
 634   assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
 635 
 636   assert(_generation->is_bitmap_clear(), "need clear marking bitmap");
 637   assert(!_generation->is_mark_complete(), "should not be complete");
 638   assert(!heap->has_forwarded_objects(), "No forwarded objects on this path");
 639 
 640 
 641   if (heap->mode()->is_generational()) {
 642     if (_generation->is_young()) {
 643       // The current implementation of swap_remembered_set() copies the write-card-table to the read-card-table.
 644       ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_swap_rset);
 645       _generation->swap_remembered_set();
 646     }
 647 
 648     if (_generation->is_global()) {
 649       heap->old_generation()->cancel_gc();
 650     } else if (heap->is_concurrent_old_mark_in_progress()) {
 651       // Purge the SATB buffers, transferring any valid, old pointers to the
 652       // old generation mark queue. Any pointers in a young region will be
 653       // abandoned.
 654       ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_transfer_satb);
 655       heap->old_generation()->transfer_pointers_from_satb();
 656     }
 657   }
 658 
 659   if (ShenandoahVerify) {
 660     heap->verifier()->verify_before_concmark();
 661   }
 662 
 663   if (VerifyBeforeGC) {
 664     Universe::verify();
 665   }
 666 
 667   _generation->set_concurrent_mark_in_progress(true);
 668 
 669   start_mark();
 670 
 671   if (_do_old_gc_bootstrap) {
 672     shenandoah_assert_generational();
 673     // Update region state for both young and old regions
 674     ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states);
 675     ShenandoahInitMarkUpdateRegionStateClosure cl;
 676     heap->parallel_heap_region_iterate(&cl);
 677     heap->old_generation()->ref_processor()->reset_thread_locals();
 678   } else {
 679     // Update region state for only young regions
 680     ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states);
 681     ShenandoahInitMarkUpdateRegionStateClosure cl;
 682     _generation->parallel_heap_region_iterate(&cl);
 683   }
 684 
 685   // Weak reference processing
 686   ShenandoahReferenceProcessor* rp = _generation->ref_processor();
 687   rp->reset_thread_locals();
 688   rp->set_soft_reference_policy(heap->soft_ref_policy()->should_clear_all_soft_refs());
 689 
 690   // Make above changes visible to worker threads
 691   OrderAccess::fence();
 692 
 693   // Arm nmethods for concurrent mark
 694   ShenandoahCodeRoots::arm_nmethods_for_mark();
 695 
 696   ShenandoahStackWatermark::change_epoch_id();
 697   if (ShenandoahPacing) {
 698     heap->pacer()->setup_for_mark();
 699   }
 700 }
 701 
 702 void ShenandoahConcurrentGC::op_mark_roots() {
 703   _mark.mark_concurrent_roots();
 704 }
 705 
 706 void ShenandoahConcurrentGC::op_mark() {
 707   _mark.concurrent_mark();
 708 }
 709 
 710 void ShenandoahConcurrentGC::op_final_mark() {
 711   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 712   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
 713   assert(!heap->has_forwarded_objects(), "No forwarded objects on this path");
 714 
 715   if (ShenandoahVerify) {
 716     heap->verifier()->verify_roots_no_forwarded();
 717   }
 718 
 719   if (!heap->cancelled_gc()) {
 720     _mark.finish_mark();
 721     assert(!heap->cancelled_gc(), "STW mark cannot OOM");
 722 
 723     // Notify JVMTI that the tagmap table will need cleaning.
 724     JvmtiTagMap::set_needs_cleaning();
 725 
 726     // The collection set is chosen by prepare_regions_and_collection_set(). Additionally, certain parameters have been
 727     // established to govern the evacuation efforts that are about to begin.  Refer to comments on reserve members in
 728     // ShenandoahGeneration and ShenandoahOldGeneration for more detail.
 729     _generation->prepare_regions_and_collection_set(true /*concurrent*/);
 730 
 731     // Has to be done after cset selection
 732     heap->prepare_concurrent_roots();
 733 
 734     if (!heap->collection_set()->is_empty()) {
 735       LogTarget(Debug, gc, cset) lt;
 736       if (lt.is_enabled()) {
 737         ResourceMark rm;
 738         LogStream ls(lt);
 739         heap->collection_set()->print_on(&ls);
 740       }
 741 
 742       if (ShenandoahVerify) {
 743         heap->verifier()->verify_before_evacuation();
 744       }
 745 
 746       heap->set_evacuation_in_progress(true);
 747       // From here on, we need to update references.
 748       heap->set_has_forwarded_objects(true);
 749 
 750       // Arm nmethods/stack for concurrent processing
 751       ShenandoahCodeRoots::arm_nmethods_for_evac();
 752       ShenandoahStackWatermark::change_epoch_id();
 753 
 754       if (ShenandoahPacing) {
 755         heap->pacer()->setup_for_evac();
 756       }
 757     } else {
 758       if (ShenandoahVerify) {
 759         if (has_in_place_promotions(heap)) {
 760           heap->verifier()->verify_after_concmark_with_promotions();
 761         } else {
 762           heap->verifier()->verify_after_concmark();
 763         }
 764       }
 765 
 766       if (VerifyAfterGC) {
 767         Universe::verify();
 768       }
 769     }
 770   }
 771 }
 772 
 773 bool ShenandoahConcurrentGC::has_in_place_promotions(ShenandoahHeap* heap) {
 774   return heap->mode()->is_generational() && heap->old_generation()->has_in_place_promotions();
 775 }
 776 
 777 template<bool GENERATIONAL>
 778 class ShenandoahConcurrentEvacThreadClosure : public ThreadClosure {
 779 private:
 780   OopClosure* const _oops;
 781 public:
 782   explicit ShenandoahConcurrentEvacThreadClosure(OopClosure* oops) : _oops(oops) {}
 783 
 784   void do_thread(Thread* thread) override {
 785     JavaThread* const jt = JavaThread::cast(thread);
 786     StackWatermarkSet::finish_processing(jt, _oops, StackWatermarkKind::gc);
 787     if (GENERATIONAL) {
 788       ShenandoahThreadLocalData::enable_plab_promotions(thread);
 789     }
 790   }
 791 };
 792 
 793 template<bool GENERATIONAL>
 794 class ShenandoahConcurrentEvacUpdateThreadTask : public WorkerTask {
 795 private:
 796   ShenandoahJavaThreadsIterator _java_threads;
 797 
 798 public:
 799   explicit ShenandoahConcurrentEvacUpdateThreadTask(uint n_workers) :
 800     WorkerTask("Shenandoah Evacuate/Update Concurrent Thread Roots"),
 801     _java_threads(ShenandoahPhaseTimings::conc_thread_roots, n_workers) {
 802   }
 803 
 804   void work(uint worker_id) override {
 805     if (GENERATIONAL) {
 806       Thread* worker_thread = Thread::current();
 807       ShenandoahThreadLocalData::enable_plab_promotions(worker_thread);
 808     }
 809 
 810     // ShenandoahEvacOOMScope has to be setup by ShenandoahContextEvacuateUpdateRootsClosure.
 811     // Otherwise, may deadlock with watermark lock
 812     ShenandoahContextEvacuateUpdateRootsClosure oops_cl;
 813     ShenandoahConcurrentEvacThreadClosure<GENERATIONAL> thr_cl(&oops_cl);
 814     _java_threads.threads_do(&thr_cl, worker_id);
 815   }
 816 };
 817 
 818 void ShenandoahConcurrentGC::op_thread_roots() {
 819   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 820   assert(heap->is_evacuation_in_progress(), "Checked by caller");
 821   ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_thread_roots);
 822   if (heap->mode()->is_generational()) {
 823     ShenandoahConcurrentEvacUpdateThreadTask<true> task(heap->workers()->active_workers());
 824     heap->workers()->run_task(&task);
 825   } else {
 826     ShenandoahConcurrentEvacUpdateThreadTask<false> task(heap->workers()->active_workers());
 827     heap->workers()->run_task(&task);
 828   }
 829 }
 830 
 831 void ShenandoahConcurrentGC::op_weak_refs() {
 832   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 833   assert(heap->is_concurrent_weak_root_in_progress(), "Only during this phase");
 834   // Concurrent weak refs processing
 835   ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_refs);
 836   if (heap->gc_cause() == GCCause::_wb_breakpoint) {
 837     ShenandoahBreakpoint::at_after_reference_processing_started();
 838   }
 839   _generation->ref_processor()->process_references(ShenandoahPhaseTimings::conc_weak_refs, heap->workers(), true /* concurrent */);
 840 }
 841 
 842 class ShenandoahEvacUpdateCleanupOopStorageRootsClosure : public BasicOopIterateClosure {
 843 private:
 844   ShenandoahHeap* const _heap;
 845   ShenandoahMarkingContext* const _mark_context;
 846   bool  _evac_in_progress;
 847   Thread* const _thread;
 848 
 849 public:
 850   ShenandoahEvacUpdateCleanupOopStorageRootsClosure();
 851   void do_oop(oop* p);
 852   void do_oop(narrowOop* p);
 853 };
 854 
 855 ShenandoahEvacUpdateCleanupOopStorageRootsClosure::ShenandoahEvacUpdateCleanupOopStorageRootsClosure() :
 856   _heap(ShenandoahHeap::heap()),
 857   _mark_context(ShenandoahHeap::heap()->marking_context()),
 858   _evac_in_progress(ShenandoahHeap::heap()->is_evacuation_in_progress()),
 859   _thread(Thread::current()) {
 860 }
 861 
 862 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(oop* p) {
 863   const oop obj = RawAccess<>::oop_load(p);
 864   if (!CompressedOops::is_null(obj)) {
 865     if (!_mark_context->is_marked(obj)) {
 866       shenandoah_assert_generations_reconciled();
 867       if (_heap->is_in_active_generation(obj)) {
 868         // Here we are asserting that an unmarked from-space object is 'correct'. There seems to be a legitimate
 869         // use-case for accessing from-space objects during concurrent class unloading. In all modes of Shenandoah,
 870         // concurrent class unloading only happens during a global collection.
 871         shenandoah_assert_correct(p, obj);
 872         ShenandoahHeap::atomic_clear_oop(p, obj);
 873       }
 874     } else if (_evac_in_progress && _heap->in_collection_set(obj)) {
 875       oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
 876       if (resolved == obj) {
 877         resolved = _heap->evacuate_object(obj, _thread);
 878       }
 879       shenandoah_assert_not_in_cset_except(p, resolved, _heap->cancelled_gc());
 880       ShenandoahHeap::atomic_update_oop(resolved, p, obj);
 881     }
 882   }
 883 }
 884 
 885 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(narrowOop* p) {
 886   ShouldNotReachHere();
 887 }
 888 
 889 class ShenandoahIsCLDAliveClosure : public CLDClosure {
 890 public:
 891   void do_cld(ClassLoaderData* cld) {
 892     cld->is_alive();
 893   }
 894 };
 895 
 896 class ShenandoahIsNMethodAliveClosure: public NMethodClosure {
 897 public:
 898   void do_nmethod(nmethod* n) {
 899     n->is_unloading();
 900   }
 901 };
 902 
 903 // This task not only evacuates/updates marked weak roots, but also "null"
 904 // dead weak roots.
 905 class ShenandoahConcurrentWeakRootsEvacUpdateTask : public WorkerTask {
 906 private:
 907   ShenandoahVMWeakRoots<true /*concurrent*/> _vm_roots;
 908 
 909   // Roots related to concurrent class unloading
 910   ShenandoahClassLoaderDataRoots<true /* concurrent */>
 911                                              _cld_roots;
 912   ShenandoahConcurrentNMethodIterator        _nmethod_itr;
 913   ShenandoahPhaseTimings::Phase              _phase;
 914 
 915 public:
 916   ShenandoahConcurrentWeakRootsEvacUpdateTask(ShenandoahPhaseTimings::Phase phase) :
 917     WorkerTask("Shenandoah Evacuate/Update Concurrent Weak Roots"),
 918     _vm_roots(phase),
 919     _cld_roots(phase, ShenandoahHeap::heap()->workers()->active_workers(), false /*heap iteration*/),
 920     _nmethod_itr(ShenandoahCodeRoots::table()),
 921     _phase(phase) {}
 922 
 923   ~ShenandoahConcurrentWeakRootsEvacUpdateTask() {
 924     // Notify runtime data structures of potentially dead oops
 925     _vm_roots.report_num_dead();
 926   }
 927 
 928   void work(uint worker_id) {
 929     ShenandoahConcurrentWorkerSession worker_session(worker_id);
 930     ShenandoahSuspendibleThreadSetJoiner sts_join;
 931     {
 932       ShenandoahEvacOOMScope oom;
 933       // jni_roots and weak_roots are OopStorage backed roots, concurrent iteration
 934       // may race against OopStorage::release() calls.
 935       ShenandoahEvacUpdateCleanupOopStorageRootsClosure cl;
 936       _vm_roots.oops_do(&cl, worker_id);
 937     }
 938 
 939     // If we are going to perform concurrent class unloading later on, we need to
 940     // cleanup the weak oops in CLD and determinate nmethod's unloading state, so that we
 941     // can cleanup immediate garbage sooner.
 942     if (ShenandoahHeap::heap()->unload_classes()) {
 943       // Applies ShenandoahIsCLDAlive closure to CLDs, native barrier will either null the
 944       // CLD's holder or evacuate it.
 945       {
 946         ShenandoahIsCLDAliveClosure is_cld_alive;
 947         _cld_roots.cld_do(&is_cld_alive, worker_id);
 948       }
 949 
 950       // Applies ShenandoahIsNMethodAliveClosure to registered nmethods.
 951       // The closure calls nmethod->is_unloading(). The is_unloading
 952       // state is cached, therefore, during concurrent class unloading phase,
 953       // we will not touch the metadata of unloading nmethods
 954       {
 955         ShenandoahWorkerTimingsTracker timer(_phase, ShenandoahPhaseTimings::CodeCacheRoots, worker_id);
 956         ShenandoahIsNMethodAliveClosure is_nmethod_alive;
 957         _nmethod_itr.nmethods_do(&is_nmethod_alive);
 958       }
 959     }
 960   }
 961 };
 962 
 963 void ShenandoahConcurrentGC::op_weak_roots() {
 964   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 965   assert(heap->is_concurrent_weak_root_in_progress(), "Only during this phase");
 966   // Concurrent weak root processing
 967   {
 968     ShenandoahTimingsTracker t(ShenandoahPhaseTimings::conc_weak_roots_work);
 969     ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_roots_work);
 970     ShenandoahConcurrentWeakRootsEvacUpdateTask task(ShenandoahPhaseTimings::conc_weak_roots_work);
 971     heap->workers()->run_task(&task);
 972   }
 973 
 974   // Perform handshake to flush out dead oops
 975   {
 976     ShenandoahTimingsTracker t(ShenandoahPhaseTimings::conc_weak_roots_rendezvous);
 977     heap->rendezvous_threads();
 978   }
 979   // We can only toggle concurrent_weak_root_in_progress flag
 980   // at a safepoint, so that mutators see a consistent
 981   // value. The flag will be cleared at the next safepoint.
 982 }
 983 
 984 void ShenandoahConcurrentGC::op_class_unloading() {
 985   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 986   assert (heap->is_concurrent_weak_root_in_progress() &&
 987           heap->unload_classes(),
 988           "Checked by caller");
 989   heap->do_class_unloading();
 990 }
 991 
 992 class ShenandoahEvacUpdateCodeCacheClosure : public NMethodClosure {
 993 private:
 994   BarrierSetNMethod* const                  _bs;
 995   ShenandoahEvacuateUpdateMetadataClosure   _cl;
 996 
 997 public:
 998   ShenandoahEvacUpdateCodeCacheClosure() :
 999     _bs(BarrierSet::barrier_set()->barrier_set_nmethod()),
1000     _cl() {
1001   }
1002 
1003   void do_nmethod(nmethod* n) {
1004     ShenandoahNMethod* data = ShenandoahNMethod::gc_data(n);
1005     ShenandoahReentrantLocker locker(data->lock());
1006     // Setup EvacOOM scope below reentrant lock to avoid deadlock with
1007     // nmethod_entry_barrier
1008     ShenandoahEvacOOMScope oom;
1009     data->oops_do(&_cl, true/*fix relocation*/);
1010     _bs->disarm(n);
1011   }
1012 };
1013 
1014 class ShenandoahConcurrentRootsEvacUpdateTask : public WorkerTask {
1015 private:
1016   ShenandoahPhaseTimings::Phase                 _phase;
1017   ShenandoahVMRoots<true /*concurrent*/>        _vm_roots;
1018   ShenandoahClassLoaderDataRoots<true /*concurrent*/>
1019                                                 _cld_roots;
1020   ShenandoahConcurrentNMethodIterator           _nmethod_itr;
1021 
1022 public:
1023   ShenandoahConcurrentRootsEvacUpdateTask(ShenandoahPhaseTimings::Phase phase) :
1024     WorkerTask("Shenandoah Evacuate/Update Concurrent Strong Roots"),
1025     _phase(phase),
1026     _vm_roots(phase),
1027     _cld_roots(phase, ShenandoahHeap::heap()->workers()->active_workers(), false /*heap iteration*/),
1028     _nmethod_itr(ShenandoahCodeRoots::table()) {}
1029 
1030   void work(uint worker_id) {
1031     ShenandoahConcurrentWorkerSession worker_session(worker_id);
1032     {
1033       ShenandoahEvacOOMScope oom;
1034       {
1035         // vm_roots and weak_roots are OopStorage backed roots, concurrent iteration
1036         // may race against OopStorage::release() calls.
1037         ShenandoahContextEvacuateUpdateRootsClosure cl;
1038         _vm_roots.oops_do<ShenandoahContextEvacuateUpdateRootsClosure>(&cl, worker_id);
1039       }
1040 
1041       {
1042         ShenandoahEvacuateUpdateMetadataClosure cl;
1043         CLDToOopClosure clds(&cl, ClassLoaderData::_claim_strong);
1044         _cld_roots.cld_do(&clds, worker_id);
1045       }
1046     }
1047 
1048     // Cannot setup ShenandoahEvacOOMScope here, due to potential deadlock with nmethod_entry_barrier.
1049     if (!ShenandoahHeap::heap()->unload_classes()) {
1050       ShenandoahWorkerTimingsTracker timer(_phase, ShenandoahPhaseTimings::CodeCacheRoots, worker_id);
1051       ShenandoahEvacUpdateCodeCacheClosure cl;
1052       _nmethod_itr.nmethods_do(&cl);
1053     }
1054   }
1055 };
1056 
1057 void ShenandoahConcurrentGC::op_strong_roots() {
1058   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1059   assert(heap->is_concurrent_strong_root_in_progress(), "Checked by caller");
1060   ShenandoahConcurrentRootsEvacUpdateTask task(ShenandoahPhaseTimings::conc_strong_roots);
1061   heap->workers()->run_task(&task);
1062   heap->set_concurrent_strong_root_in_progress(false);
1063 }
1064 
1065 void ShenandoahConcurrentGC::op_cleanup_early() {
1066   ShenandoahHeap::heap()->free_set()->recycle_trash();
1067 }
1068 
1069 void ShenandoahConcurrentGC::op_evacuate() {
1070   ShenandoahHeap::heap()->evacuate_collection_set(true /*concurrent*/);
1071 }
1072 
1073 void ShenandoahConcurrentGC::op_init_updaterefs() {
1074   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1075   heap->set_evacuation_in_progress(false);
1076   heap->set_concurrent_weak_root_in_progress(false);
1077   heap->prepare_update_heap_references(true /*concurrent*/);
1078   if (ShenandoahVerify) {
1079     heap->verifier()->verify_before_updaterefs();
1080   }
1081 
1082   heap->set_update_refs_in_progress(true);
1083   if (ShenandoahPacing) {
1084     heap->pacer()->setup_for_updaterefs();
1085   }
1086 }
1087 
1088 void ShenandoahConcurrentGC::op_updaterefs() {
1089   ShenandoahHeap::heap()->update_heap_references(true /*concurrent*/);
1090 }
1091 
1092 class ShenandoahUpdateThreadClosure : public HandshakeClosure {
1093 private:
1094   ShenandoahUpdateRefsClosure _cl;
1095 public:
1096   ShenandoahUpdateThreadClosure();
1097   void do_thread(Thread* thread);
1098 };
1099 
1100 ShenandoahUpdateThreadClosure::ShenandoahUpdateThreadClosure() :
1101   HandshakeClosure("Shenandoah Update Thread Roots") {
1102 }
1103 
1104 void ShenandoahUpdateThreadClosure::do_thread(Thread* thread) {
1105   if (thread->is_Java_thread()) {
1106     JavaThread* jt = JavaThread::cast(thread);
1107     ResourceMark rm;
1108     jt->oops_do(&_cl, nullptr);
1109   }
1110 }
1111 
1112 void ShenandoahConcurrentGC::op_update_thread_roots() {
1113   ShenandoahUpdateThreadClosure cl;
1114   Handshake::execute(&cl);
1115 }
1116 
1117 void ShenandoahConcurrentGC::op_final_updaterefs() {
1118   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1119   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
1120   assert(!heap->_update_refs_iterator.has_next(), "Should have finished update references");
1121 
1122   heap->finish_concurrent_roots();
1123 
1124   // Clear cancelled GC, if set. On cancellation path, the block before would handle
1125   // everything.
1126   if (heap->cancelled_gc()) {
1127     heap->clear_cancelled_gc(true /* clear oom handler */);
1128   }
1129 
1130   // Has to be done before cset is clear
1131   if (ShenandoahVerify) {
1132     heap->verifier()->verify_roots_in_to_space();
1133   }
1134 
1135   // If we are running in generational mode and this is an aging cycle, this will also age active
1136   // regions that haven't been used for allocation.
1137   heap->update_heap_region_states(true /*concurrent*/);
1138 
1139   heap->set_update_refs_in_progress(false);
1140   heap->set_has_forwarded_objects(false);
1141 
1142   if (heap->mode()->is_generational() && heap->is_concurrent_old_mark_in_progress()) {
1143     // When the SATB barrier is left on to support concurrent old gen mark, it may pick up writes to
1144     // objects in the collection set. After those objects are evacuated, the pointers in the
1145     // SATB are no longer safe. Once we have finished update references, we are guaranteed that
1146     // no more writes to the collection set are possible.
1147     //
1148     // This will transfer any old pointers in _active_ regions from the SATB to the old gen
1149     // mark queues. All other pointers will be discarded. This would also discard any pointers
1150     // in old regions that were included in a mixed evacuation. We aren't using the SATB filter
1151     // methods here because we cannot control when they execute. If the SATB filter runs _after_
1152     // a region has been recycled, we will not be able to detect the bad pointer.
1153     //
1154     // We are not concerned about skipping this step in abbreviated cycles because regions
1155     // with no live objects cannot have been written to and so cannot have entries in the SATB
1156     // buffers.
1157     heap->old_generation()->transfer_pointers_from_satb();
1158 
1159     // Aging_cycle is only relevant during evacuation cycle for individual objects and during final mark for
1160     // entire regions.  Both of these relevant operations occur before final update refs.
1161     ShenandoahGenerationalHeap::heap()->set_aging_cycle(false);
1162   }
1163 
1164   if (ShenandoahVerify) {
1165     heap->verifier()->verify_after_updaterefs();
1166   }
1167 
1168   if (VerifyAfterGC) {
1169     Universe::verify();
1170   }
1171 
1172   heap->rebuild_free_set(true /*concurrent*/);
1173 }
1174 
1175 void ShenandoahConcurrentGC::op_final_roots() {
1176 
1177   ShenandoahHeap *heap = ShenandoahHeap::heap();
1178   heap->set_concurrent_weak_root_in_progress(false);
1179   heap->set_evacuation_in_progress(false);
1180 
1181   if (heap->mode()->is_generational()) {
1182     // If the cycle was shortened for having enough immediate garbage, this could be
1183     // the last GC safepoint before concurrent marking of old resumes. We must be sure
1184     // that old mark threads don't see any pointers to garbage in the SATB buffers.
1185     if (heap->is_concurrent_old_mark_in_progress()) {
1186       heap->old_generation()->transfer_pointers_from_satb();
1187     }
1188 
1189     if (!_generation->is_old()) {
1190       ShenandoahGenerationalHeap::heap()->update_region_ages(_generation->complete_marking_context());
1191     }
1192   }
1193 }
1194 
1195 void ShenandoahConcurrentGC::op_cleanup_complete() {
1196   ShenandoahHeap::heap()->free_set()->recycle_trash();
1197 }
1198 
1199 bool ShenandoahConcurrentGC::check_cancellation_and_abort(ShenandoahDegenPoint point) {
1200   if (ShenandoahHeap::heap()->cancelled_gc()) {
1201     _degen_point = point;
1202     return true;
1203   }
1204   return false;
1205 }
1206 
1207 const char* ShenandoahConcurrentGC::init_mark_event_message() const {
1208   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1209   assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here");
1210   if (heap->unload_classes()) {
1211     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Init Mark", " (unload classes)");
1212   } else {
1213     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Init Mark", "");
1214   }
1215 }
1216 
1217 const char* ShenandoahConcurrentGC::final_mark_event_message() const {
1218   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1219   assert(!heap->has_forwarded_objects() || heap->is_concurrent_old_mark_in_progress(),
1220          "Should not have forwarded objects during final mark, unless old gen concurrent mark is running");
1221 
1222   if (heap->unload_classes()) {
1223     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Final Mark", " (unload classes)");
1224   } else {
1225     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Final Mark", "");
1226   }
1227 }
1228 
1229 const char* ShenandoahConcurrentGC::conc_mark_event_message() const {
1230   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1231   assert(!heap->has_forwarded_objects() || heap->is_concurrent_old_mark_in_progress(),
1232          "Should not have forwarded objects concurrent mark, unless old gen concurrent mark is running");
1233   if (heap->unload_classes()) {
1234     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent marking", " (unload classes)");
1235   } else {
1236     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent marking", "");
1237   }
1238 }
1239 
1240 const char* ShenandoahConcurrentGC::conc_reset_event_message() const {
1241   if (ShenandoahHeap::heap()->unload_classes()) {
1242     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent reset", " (unload classes)");
1243   } else {
1244     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent reset", "");
1245   }
1246 }
1247 
1248 const char* ShenandoahConcurrentGC::final_roots_event_message() const {
1249   if (ShenandoahHeap::heap()->unload_classes()) {
1250     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Final Roots", " (unload classes)");
1251   } else {
1252     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Final Roots", "");
1253   }
1254 }
1255 
1256 const char* ShenandoahConcurrentGC::conc_weak_refs_event_message() const {
1257   if (ShenandoahHeap::heap()->unload_classes()) {
1258     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent weak references", " (unload classes)");
1259   } else {
1260     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent weak references", "");
1261   }
1262 }
1263 
1264 const char* ShenandoahConcurrentGC::conc_weak_roots_event_message() const {
1265   if (ShenandoahHeap::heap()->unload_classes()) {
1266     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent weak roots", " (unload classes)");
1267   } else {
1268     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent weak roots", "");
1269   }
1270 }
1271 
1272 const char* ShenandoahConcurrentGC::conc_cleanup_event_message() const {
1273   if (ShenandoahHeap::heap()->unload_classes()) {
1274     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent cleanup", " (unload classes)");
1275   } else {
1276     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent cleanup", "");
1277   }
1278 }