1 /*
   2  * Copyright (c) 2021, 2022, Red Hat, Inc. All rights reserved.
   3  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 
  28 #include "gc/shared/barrierSetNMethod.hpp"
  29 #include "gc/shared/collectorCounters.hpp"
  30 #include "gc/shared/continuationGCSupport.inline.hpp"
  31 #include "gc/shenandoah/shenandoahBreakpoint.hpp"
  32 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  33 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
  34 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  35 #include "gc/shenandoah/shenandoahGeneration.hpp"
  36 #include "gc/shenandoah/shenandoahGenerationalHeap.hpp"
  37 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
  38 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
  39 #include "gc/shenandoah/shenandoahLock.hpp"
  40 #include "gc/shenandoah/shenandoahMark.inline.hpp"
  41 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  42 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  43 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  44 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
  45 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
  46 #include "gc/shenandoah/shenandoahStackWatermark.hpp"
  47 #include "gc/shenandoah/shenandoahUtils.hpp"
  48 #include "gc/shenandoah/shenandoahVerifier.hpp"
  49 #include "gc/shenandoah/shenandoahVMOperations.hpp"
  50 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
  51 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  52 #include "memory/allocation.hpp"
  53 #include "prims/jvmtiTagMap.hpp"
  54 #include "runtime/vmThread.hpp"
  55 #include "utilities/events.hpp"
  56 
  57 // Breakpoint support
  58 class ShenandoahBreakpointGCScope : public StackObj {
  59 private:
  60   const GCCause::Cause _cause;
  61 public:
  62   ShenandoahBreakpointGCScope(GCCause::Cause cause) : _cause(cause) {
  63     if (cause == GCCause::_wb_breakpoint) {
  64       ShenandoahBreakpoint::start_gc();
  65       ShenandoahBreakpoint::at_before_gc();
  66     }
  67   }
  68 
  69   ~ShenandoahBreakpointGCScope() {
  70     if (_cause == GCCause::_wb_breakpoint) {
  71       ShenandoahBreakpoint::at_after_gc();
  72     }
  73   }
  74 };
  75 
  76 class ShenandoahBreakpointMarkScope : public StackObj {
  77 private:
  78   const GCCause::Cause _cause;
  79 public:
  80   ShenandoahBreakpointMarkScope(GCCause::Cause cause) : _cause(cause) {
  81     if (_cause == GCCause::_wb_breakpoint) {
  82       ShenandoahBreakpoint::at_after_marking_started();
  83     }
  84   }
  85 
  86   ~ShenandoahBreakpointMarkScope() {
  87     if (_cause == GCCause::_wb_breakpoint) {
  88       ShenandoahBreakpoint::at_before_marking_completed();
  89     }
  90   }
  91 };
  92 
  93 ShenandoahConcurrentGC::ShenandoahConcurrentGC(ShenandoahGeneration* generation, bool do_old_gc_bootstrap) :
  94   _mark(generation),
  95   _degen_point(ShenandoahDegenPoint::_degenerated_unset),
  96   _abbreviated(false),
  97   _do_old_gc_bootstrap(do_old_gc_bootstrap),
  98   _generation(generation) {
  99 }
 100 
 101 ShenandoahGC::ShenandoahDegenPoint ShenandoahConcurrentGC::degen_point() const {
 102   return _degen_point;
 103 }
 104 
 105 bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) {
 106   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 107 
 108   ShenandoahBreakpointGCScope breakpoint_gc_scope(cause);
 109 
 110   // Reset for upcoming marking
 111   entry_reset();
 112 
 113   // Start initial mark under STW
 114   vmop_entry_init_mark();
 115 
 116   {
 117     ShenandoahBreakpointMarkScope breakpoint_mark_scope(cause);
 118 
 119     // Reset task queue stats here, rather than in mark_concurrent_roots,
 120     // because remembered set scan will `push` oops into the queues and
 121     // resetting after this happens will lose those counts.
 122     TASKQUEUE_STATS_ONLY(_mark.task_queues()->reset_taskqueue_stats());
 123 
 124     // Concurrent remembered set scanning
 125     entry_scan_remembered_set();
 126     // TODO: When RS scanning yields, we will need a check_cancellation_and_abort() degeneration point here.
 127 
 128     // Concurrent mark roots
 129     entry_mark_roots();
 130     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_roots)) {
 131       return false;
 132     }
 133 
 134     // Continue concurrent mark
 135     entry_mark();
 136     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_mark)) {
 137       return false;
 138     }
 139   }
 140 
 141   // Complete marking under STW, and start evacuation
 142   vmop_entry_final_mark();
 143 
 144   // If the GC was cancelled just before final mark (but after the preceding cancellation check),
 145   // then the safepoint operation will do nothing and the concurrent mark will still be in progress.
 146   // In this case it is safe (and necessary) to resume the degenerated cycle from the marking phase.
 147   //
 148   // On the other hand, if the GC is cancelled after final mark (but before this check), then the
 149   // final mark safepoint operation will have finished the mark (setting concurrent mark in progress
 150   // to false). In this case (final mark has completed), we need control to fall past the next
 151   // cancellation check and resume the degenerated cycle from the evacuation phase.
 152   if (_generation->is_concurrent_mark_in_progress()) {
 153     // If the concurrent mark is still in progress after the final mark safepoint, then the GC has
 154     // been cancelled. The degenerated cycle must resume from the marking phase. Without this check,
 155     // the non-generational mode may fall all the way to the end of this collect routine without
 156     // having done anything (besides mark most of the heap). Without having collected anything, we
 157     // can expect an 'out of cycle' degenerated GC which will again mark the entire heap. This is
 158     // not optimal.
 159     // For the generational mode, we cannot allow this. The generational mode relies on marking
 160     // (including the final mark) to rebuild portions of the card table. If the generational mode does
 161     // not complete marking after it has swapped the card tables, the root set on subsequent GCs will
 162     // be incomplete, heap corruption may follow.
 163     bool cancelled = check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_mark);
 164     assert(cancelled, "GC must have been cancelled between concurrent and final mark");
 165     return false;
 166   }
 167 
 168   // Concurrent stack processing
 169   if (heap->is_evacuation_in_progress()) {
 170     entry_thread_roots();
 171   }
 172 
 173   // Process weak roots that might still point to regions that would be broken by cleanup
 174   if (heap->is_concurrent_weak_root_in_progress()) {
 175     entry_weak_refs();
 176     entry_weak_roots();
 177   }
 178 
 179   // Final mark might have reclaimed some immediate garbage, kick cleanup to reclaim
 180   // the space. This would be the last action if there is nothing to evacuate.  Note that
 181   // we will not age young-gen objects in the case that we skip evacuation.
 182   entry_cleanup_early();
 183 
 184   {
 185     // TODO: Not sure there is value in logging free-set status right here.  Note that whenever the free set is rebuilt,
 186     // it logs the newly rebuilt status.
 187     ShenandoahHeapLocker locker(heap->lock());
 188     heap->free_set()->log_status();
 189   }
 190 
 191   // Perform concurrent class unloading
 192   if (heap->unload_classes() &&
 193       heap->is_concurrent_weak_root_in_progress()) {
 194     entry_class_unloading();
 195   }
 196 
 197   // Processing strong roots
 198   // This may be skipped if there is nothing to update/evacuate.
 199   // If so, strong_root_in_progress would be unset.
 200   if (heap->is_concurrent_strong_root_in_progress()) {
 201     entry_strong_roots();
 202   }
 203 
 204   // Continue the cycle with evacuation and optional update-refs.
 205   // This may be skipped if there is nothing to evacuate.
 206   // If so, evac_in_progress would be unset by collection set preparation code.
 207   if (heap->is_evacuation_in_progress()) {
 208     // Concurrently evacuate
 209     entry_evacuate();
 210     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_evac)) {
 211       return false;
 212     }
 213   }
 214 
 215   if (heap->has_forwarded_objects()) {
 216     // Perform update-refs phase.
 217     vmop_entry_init_updaterefs();
 218     entry_updaterefs();
 219     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_updaterefs)) {
 220       return false;
 221     }
 222 
 223     // Concurrent update thread roots
 224     entry_update_thread_roots();
 225     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_updaterefs)) {
 226       return false;
 227     }
 228 
 229     vmop_entry_final_updaterefs();
 230 
 231     // Update references freed up collection set, kick the cleanup to reclaim the space.
 232     entry_cleanup_complete();
 233   } else {
 234     // We chose not to evacuate because we found sufficient immediate garbage. Note that we
 235     // do not check for cancellation here because, at this point, the cycle is effectively
 236     // complete. If the cycle has been cancelled here, the control thread will detect it
 237     // on its next iteration and run a degenerated young cycle.
 238     vmop_entry_final_roots();
 239     _abbreviated = true;
 240   }
 241 
 242   // We defer generation resizing actions until after cset regions have been recycled.  We do this even following an
 243   // abbreviated cycle.
 244   if (heap->mode()->is_generational()) {
 245     ShenandoahGenerationalHeap::heap()->complete_concurrent_cycle();
 246   }
 247   return true;
 248 }
 249 
 250 void ShenandoahConcurrentGC::vmop_entry_init_mark() {
 251   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 252   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 253   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_mark_gross);
 254 
 255   heap->try_inject_alloc_failure();
 256   VM_ShenandoahInitMark op(this);
 257   VMThread::execute(&op); // jump to entry_init_mark() under safepoint
 258 }
 259 
 260 void ShenandoahConcurrentGC::vmop_entry_final_mark() {
 261   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 262   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 263   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_mark_gross);
 264 
 265   heap->try_inject_alloc_failure();
 266   VM_ShenandoahFinalMarkStartEvac op(this);
 267   VMThread::execute(&op); // jump to entry_final_mark under safepoint
 268 }
 269 
 270 void ShenandoahConcurrentGC::vmop_entry_init_updaterefs() {
 271   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 272   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 273   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_update_refs_gross);
 274 
 275   heap->try_inject_alloc_failure();
 276   VM_ShenandoahInitUpdateRefs op(this);
 277   VMThread::execute(&op);
 278 }
 279 
 280 void ShenandoahConcurrentGC::vmop_entry_final_updaterefs() {
 281   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 282   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 283   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_update_refs_gross);
 284 
 285   heap->try_inject_alloc_failure();
 286   VM_ShenandoahFinalUpdateRefs op(this);
 287   VMThread::execute(&op);
 288 }
 289 
 290 void ShenandoahConcurrentGC::vmop_entry_final_roots() {
 291   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 292   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 293   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_roots_gross);
 294 
 295   // This phase does not use workers, no need for setup
 296   heap->try_inject_alloc_failure();
 297   VM_ShenandoahFinalRoots op(this);
 298   VMThread::execute(&op);
 299 }
 300 
 301 void ShenandoahConcurrentGC::entry_init_mark() {
 302   const char* msg = init_mark_event_message();
 303   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::init_mark);
 304   EventMark em("%s", msg);
 305 
 306   ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
 307                               ShenandoahWorkerPolicy::calc_workers_for_init_marking(),
 308                               "init marking");
 309 
 310   op_init_mark();
 311 }
 312 
 313 void ShenandoahConcurrentGC::entry_final_mark() {
 314   const char* msg = final_mark_event_message();
 315   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_mark);
 316   EventMark em("%s", msg);
 317 
 318   ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
 319                               ShenandoahWorkerPolicy::calc_workers_for_final_marking(),
 320                               "final marking");
 321 
 322   op_final_mark();
 323 }
 324 
 325 void ShenandoahConcurrentGC::entry_init_updaterefs() {
 326   static const char* msg = "Pause Init Update Refs";
 327   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::init_update_refs);
 328   EventMark em("%s", msg);
 329 
 330   // No workers used in this phase, no setup required
 331   op_init_updaterefs();
 332 }
 333 
 334 void ShenandoahConcurrentGC::entry_final_updaterefs() {
 335   static const char* msg = "Pause Final Update Refs";
 336   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_update_refs);
 337   EventMark em("%s", msg);
 338 
 339   ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
 340                               ShenandoahWorkerPolicy::calc_workers_for_final_update_ref(),
 341                               "final reference update");
 342 
 343   op_final_updaterefs();
 344 }
 345 
 346 void ShenandoahConcurrentGC::entry_final_roots() {
 347   static const char* msg = "Pause Final Roots";
 348   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_roots);
 349   EventMark em("%s", msg);
 350 
 351   op_final_roots();
 352 }
 353 
 354 void ShenandoahConcurrentGC::entry_reset() {
 355   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 356   heap->try_inject_alloc_failure();
 357 
 358   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 359   {
 360     static const char* msg = "Concurrent reset";
 361     ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_reset);
 362     EventMark em("%s", msg);
 363 
 364     ShenandoahWorkerScope scope(heap->workers(),
 365                                 ShenandoahWorkerPolicy::calc_workers_for_conc_reset(),
 366                                 msg);
 367     op_reset();
 368   }
 369 
 370   if (_do_old_gc_bootstrap) {
 371     static const char* msg = "Concurrent reset (OLD)";
 372     ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_reset_old);
 373     ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
 374                                 ShenandoahWorkerPolicy::calc_workers_for_conc_reset(),
 375                                 msg);
 376     EventMark em("%s", msg);
 377 
 378     heap->old_generation()->prepare_gc();
 379   }
 380 }
 381 
 382 void ShenandoahConcurrentGC::entry_scan_remembered_set() {
 383   if (_generation->is_young()) {
 384     ShenandoahHeap* const heap = ShenandoahHeap::heap();
 385     TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 386     const char* msg = "Concurrent remembered set scanning";
 387     ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::init_scan_rset);
 388     EventMark em("%s", msg);
 389 
 390     ShenandoahWorkerScope scope(heap->workers(),
 391                                 ShenandoahWorkerPolicy::calc_workers_for_rs_scanning(),
 392                                 msg);
 393 
 394     heap->try_inject_alloc_failure();
 395     _generation->scan_remembered_set(true /* is_concurrent */);
 396   }
 397 }
 398 
 399 void ShenandoahConcurrentGC::entry_mark_roots() {
 400   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 401   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 402   const char* msg = "Concurrent marking roots";
 403   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_mark_roots);
 404   EventMark em("%s", msg);
 405 
 406   ShenandoahWorkerScope scope(heap->workers(),
 407                               ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
 408                               "concurrent marking roots");
 409 
 410   heap->try_inject_alloc_failure();
 411   op_mark_roots();
 412 }
 413 
 414 void ShenandoahConcurrentGC::entry_mark() {
 415   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 416   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 417   const char* msg = conc_mark_event_message();
 418   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_mark);
 419   EventMark em("%s", msg);
 420 
 421   ShenandoahWorkerScope scope(heap->workers(),
 422                               ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
 423                               "concurrent marking");
 424 
 425   heap->try_inject_alloc_failure();
 426   op_mark();
 427 }
 428 
 429 void ShenandoahConcurrentGC::entry_thread_roots() {
 430   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 431   static const char* msg = "Concurrent thread roots";
 432   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_thread_roots);
 433   EventMark em("%s", msg);
 434 
 435   ShenandoahWorkerScope scope(heap->workers(),
 436                               ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
 437                               msg);
 438 
 439   heap->try_inject_alloc_failure();
 440   op_thread_roots();
 441 }
 442 
 443 void ShenandoahConcurrentGC::entry_weak_refs() {
 444   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 445   static const char* msg = "Concurrent weak references";
 446   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_weak_refs);
 447   EventMark em("%s", msg);
 448 
 449   ShenandoahWorkerScope scope(heap->workers(),
 450                               ShenandoahWorkerPolicy::calc_workers_for_conc_refs_processing(),
 451                               "concurrent weak references");
 452 
 453   heap->try_inject_alloc_failure();
 454   op_weak_refs();
 455 }
 456 
 457 void ShenandoahConcurrentGC::entry_weak_roots() {
 458   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 459   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 460   static const char* msg = "Concurrent weak roots";
 461   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_weak_roots);
 462   EventMark em("%s", msg);
 463 
 464   ShenandoahWorkerScope scope(heap->workers(),
 465                               ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
 466                               "concurrent weak root");
 467 
 468   heap->try_inject_alloc_failure();
 469   op_weak_roots();
 470 }
 471 
 472 void ShenandoahConcurrentGC::entry_class_unloading() {
 473   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 474   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 475   static const char* msg = "Concurrent class unloading";
 476   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_class_unload);
 477   EventMark em("%s", msg);
 478 
 479   ShenandoahWorkerScope scope(heap->workers(),
 480                               ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
 481                               "concurrent class unloading");
 482 
 483   heap->try_inject_alloc_failure();
 484   op_class_unloading();
 485 }
 486 
 487 void ShenandoahConcurrentGC::entry_strong_roots() {
 488   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 489   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 490   static const char* msg = "Concurrent strong roots";
 491   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_strong_roots);
 492   EventMark em("%s", msg);
 493 
 494   ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_strong_roots);
 495 
 496   ShenandoahWorkerScope scope(heap->workers(),
 497                               ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
 498                               "concurrent strong root");
 499 
 500   heap->try_inject_alloc_failure();
 501   op_strong_roots();
 502 }
 503 
 504 void ShenandoahConcurrentGC::entry_cleanup_early() {
 505   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 506   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 507   static const char* msg = "Concurrent cleanup";
 508   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_cleanup_early, true /* log_heap_usage */);
 509   EventMark em("%s", msg);
 510 
 511   // This phase does not use workers, no need for setup
 512   heap->try_inject_alloc_failure();
 513   op_cleanup_early();
 514 }
 515 
 516 void ShenandoahConcurrentGC::entry_evacuate() {
 517   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 518   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 519 
 520   static const char* msg = "Concurrent evacuation";
 521   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_evac);
 522   EventMark em("%s", msg);
 523 
 524   ShenandoahWorkerScope scope(heap->workers(),
 525                               ShenandoahWorkerPolicy::calc_workers_for_conc_evac(),
 526                               "concurrent evacuation");
 527 
 528   heap->try_inject_alloc_failure();
 529   op_evacuate();
 530 }
 531 
 532 void ShenandoahConcurrentGC::entry_update_thread_roots() {
 533   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 534   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 535 
 536   static const char* msg = "Concurrent update thread roots";
 537   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_update_thread_roots);
 538   EventMark em("%s", msg);
 539 
 540   // No workers used in this phase, no setup required
 541   heap->try_inject_alloc_failure();
 542   op_update_thread_roots();
 543 }
 544 
 545 void ShenandoahConcurrentGC::entry_updaterefs() {
 546   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 547   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 548   static const char* msg = "Concurrent update references";
 549   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_update_refs);
 550   EventMark em("%s", msg);
 551 
 552   ShenandoahWorkerScope scope(heap->workers(),
 553                               ShenandoahWorkerPolicy::calc_workers_for_conc_update_ref(),
 554                               "concurrent reference update");
 555 
 556   heap->try_inject_alloc_failure();
 557   op_updaterefs();
 558 }
 559 
 560 void ShenandoahConcurrentGC::entry_cleanup_complete() {
 561   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 562   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 563   static const char* msg = "Concurrent cleanup";
 564   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_cleanup_complete, true /* log_heap_usage */);
 565   EventMark em("%s", msg);
 566 
 567   // This phase does not use workers, no need for setup
 568   heap->try_inject_alloc_failure();
 569   op_cleanup_complete();
 570 }
 571 
 572 void ShenandoahConcurrentGC::op_reset() {
 573   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 574   if (ShenandoahPacing) {
 575     heap->pacer()->setup_for_reset();
 576   }
 577   _generation->prepare_gc();
 578 }
 579 
 580 class ShenandoahInitMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
 581 private:
 582   ShenandoahMarkingContext* const _ctx;
 583 public:
 584   ShenandoahInitMarkUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
 585 
 586   void heap_region_do(ShenandoahHeapRegion* r) {
 587     assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index());
 588     if (r->is_active()) {
 589       // Check if region needs updating its TAMS. We have updated it already during concurrent
 590       // reset, so it is very likely we don't need to do another write here.  Since most regions
 591       // are not "active", this path is relatively rare.
 592       if (_ctx->top_at_mark_start(r) != r->top()) {
 593         _ctx->capture_top_at_mark_start(r);
 594       }
 595     } else {
 596       assert(_ctx->top_at_mark_start(r) == r->top(),
 597              "Region " SIZE_FORMAT " should already have correct TAMS", r->index());
 598     }
 599   }
 600 
 601   bool is_thread_safe() { return true; }
 602 };
 603 
 604 void ShenandoahConcurrentGC::start_mark() {
 605   _mark.start_mark();
 606 }
 607 
 608 void ShenandoahConcurrentGC::op_init_mark() {
 609   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 610   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
 611   assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
 612 
 613   assert(_generation->is_bitmap_clear(), "need clear marking bitmap");
 614   assert(!_generation->is_mark_complete(), "should not be complete");
 615   assert(!heap->has_forwarded_objects(), "No forwarded objects on this path");
 616 
 617 
 618   if (heap->mode()->is_generational()) {
 619     if (_generation->is_young() || (_generation->is_global() && ShenandoahVerify)) {
 620       // The current implementation of swap_remembered_set() copies the write-card-table
 621       // to the read-card-table. The remembered sets are also swapped for GLOBAL collections
 622       // so that the verifier works with the correct copy of the card table when verifying.
 623       // TODO: This path should not really depend on ShenandoahVerify.
 624       ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_swap_rset);
 625       _generation->swap_remembered_set();
 626     }
 627 
 628     if (_generation->is_global()) {
 629       heap->old_generation()->cancel_gc();
 630     } else if (heap->is_concurrent_old_mark_in_progress()) {
 631       // Purge the SATB buffers, transferring any valid, old pointers to the
 632       // old generation mark queue. Any pointers in a young region will be
 633       // abandoned.
 634       ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_transfer_satb);
 635       heap->old_generation()->transfer_pointers_from_satb();
 636     }
 637   }
 638 
 639   if (ShenandoahVerify) {
 640     heap->verifier()->verify_before_concmark();
 641   }
 642 
 643   if (VerifyBeforeGC) {
 644     Universe::verify();
 645   }
 646 
 647   _generation->set_concurrent_mark_in_progress(true);
 648 
 649   start_mark();
 650 
 651   if (_do_old_gc_bootstrap) {
 652     shenandoah_assert_generational();
 653     // Update region state for both young and old regions
 654     // TODO: We should be able to pull this out of the safepoint for the bootstrap
 655     // cycle. The top of an old region will only move when a GC cycle evacuates
 656     // objects into it. When we start an old cycle, we know that nothing can touch
 657     // the top of old regions.
 658     ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states);
 659     ShenandoahInitMarkUpdateRegionStateClosure cl;
 660     heap->parallel_heap_region_iterate(&cl);
 661     heap->old_generation()->ref_processor()->reset_thread_locals();
 662   } else {
 663     // Update region state for only young regions
 664     ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states);
 665     ShenandoahInitMarkUpdateRegionStateClosure cl;
 666     _generation->parallel_heap_region_iterate(&cl);
 667   }
 668 
 669   // Weak reference processing
 670   ShenandoahReferenceProcessor* rp = _generation->ref_processor();
 671   rp->reset_thread_locals();
 672   rp->set_soft_reference_policy(heap->soft_ref_policy()->should_clear_all_soft_refs());
 673 
 674   // Make above changes visible to worker threads
 675   OrderAccess::fence();
 676 
 677   // Arm nmethods for concurrent mark
 678   ShenandoahCodeRoots::arm_nmethods_for_mark();
 679 
 680   ShenandoahStackWatermark::change_epoch_id();
 681   if (ShenandoahPacing) {
 682     heap->pacer()->setup_for_mark();
 683   }
 684 }
 685 
 686 void ShenandoahConcurrentGC::op_mark_roots() {
 687   _mark.mark_concurrent_roots();
 688 }
 689 
 690 void ShenandoahConcurrentGC::op_mark() {
 691   _mark.concurrent_mark();
 692 }
 693 
 694 void ShenandoahConcurrentGC::op_final_mark() {
 695   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 696   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
 697   assert(!heap->has_forwarded_objects(), "No forwarded objects on this path");
 698 
 699   if (ShenandoahVerify) {
 700     heap->verifier()->verify_roots_no_forwarded();
 701   }
 702 
 703   if (!heap->cancelled_gc()) {
 704     _mark.finish_mark();
 705     assert(!heap->cancelled_gc(), "STW mark cannot OOM");
 706 
 707     // Notify JVMTI that the tagmap table will need cleaning.
 708     JvmtiTagMap::set_needs_cleaning();
 709 
 710     // The collection set is chosen by prepare_regions_and_collection_set().
 711     //
 712     // TODO: Under severe memory overload conditions that can be checked here, we may want to limit
 713     // the inclusion of old-gen candidates within the collection set.  This would allow us to prioritize efforts on
 714     // evacuating young-gen,  This remediation is most appropriate when old-gen availability is very high (so there
 715     // are negligible negative impacts from delaying completion of old-gen evacuation) and when young-gen collections
 716     // are "under duress" (as signalled by very low availability of memory within young-gen, indicating that/ young-gen
 717     // collections are not triggering frequently enough).
 718     _generation->prepare_regions_and_collection_set(true /*concurrent*/);
 719 
 720     // Upon return from prepare_regions_and_collection_set(), certain parameters have been established to govern the
 721     // evacuation efforts that are about to begin.  In particular:
 722     //
 723     // heap->get_promoted_reserve() represents the amount of memory within old-gen's available memory that has
 724     //   been set aside to hold objects promoted from young-gen memory.  This represents an estimated percentage
 725     //   of the live young-gen memory within the collection set.  If there is more data ready to be promoted than
 726     //   can fit within this reserve, the promotion of some objects will be deferred until a subsequent evacuation
 727     //   pass.
 728     //
 729     // heap->get_old_evac_reserve() represents the amount of memory within old-gen's available memory that has been
 730     //  set aside to hold objects evacuated from the old-gen collection set.
 731     //
 732     // heap->get_young_evac_reserve() represents the amount of memory within young-gen's available memory that has
 733     //  been set aside to hold objects evacuated from the young-gen collection set.  Conservatively, this value
 734     //  equals the entire amount of live young-gen memory within the collection set, even though some of this memory
 735     //  will likely be promoted.
 736 
 737     // Has to be done after cset selection
 738     heap->prepare_concurrent_roots();
 739 
 740     if (!heap->collection_set()->is_empty() || has_in_place_promotions(heap)) {
 741       // Even if the collection set is empty, we need to do evacuation if there are regions to be promoted in place.
 742       // Concurrent evacuation takes responsibility for registering objects and setting the remembered set cards to dirty.
 743 
 744       LogTarget(Debug, gc, cset) lt;
 745       if (lt.is_enabled()) {
 746         ResourceMark rm;
 747         LogStream ls(lt);
 748         heap->collection_set()->print_on(&ls);
 749       }
 750 
 751       if (ShenandoahVerify) {
 752         heap->verifier()->verify_before_evacuation();
 753       }
 754 
 755       // TODO: Do we need to set this if we are only promoting regions in place? We don't need the barriers on for that.
 756       heap->set_evacuation_in_progress(true);
 757 
 758       // Verify before arming for concurrent processing.
 759       // Otherwise, verification can trigger stack processing.
 760       if (ShenandoahVerify) {
 761         heap->verifier()->verify_during_evacuation();
 762       }
 763 
 764       // Generational mode may promote objects in place during the evacuation phase.
 765       // If that is the only reason we are evacuating, we don't need to update references
 766       // and there will be no forwarded objects on the heap.
 767       heap->set_has_forwarded_objects(!heap->collection_set()->is_empty());
 768 
 769       // Arm nmethods/stack for concurrent processing
 770       if (!heap->collection_set()->is_empty()) {
 771         // Iff objects will be evaluated, arm the nmethod barriers. These will be disarmed
 772         // under the same condition (established in prepare_concurrent_roots) after strong
 773         // root evacuation has completed (see op_strong_roots).
 774         ShenandoahCodeRoots::arm_nmethods_for_evac();
 775         ShenandoahStackWatermark::change_epoch_id();
 776       }
 777 
 778       if (ShenandoahPacing) {
 779         heap->pacer()->setup_for_evac();
 780       }
 781     } else {
 782       if (ShenandoahVerify) {
 783         heap->verifier()->verify_after_concmark();
 784       }
 785 
 786       if (VerifyAfterGC) {
 787         Universe::verify();
 788       }
 789     }
 790   }
 791 }
 792 
 793 bool ShenandoahConcurrentGC::has_in_place_promotions(ShenandoahHeap* heap) {
 794   return heap->mode()->is_generational() && heap->old_generation()->has_in_place_promotions();
 795 }
 796 
 797 template<bool GENERATIONAL>
 798 class ShenandoahConcurrentEvacThreadClosure : public ThreadClosure {
 799 private:
 800   OopClosure* const _oops;
 801 public:
 802   explicit ShenandoahConcurrentEvacThreadClosure(OopClosure* oops) : _oops(oops) {}
 803 
 804   void do_thread(Thread* thread) override {
 805     JavaThread* const jt = JavaThread::cast(thread);
 806     StackWatermarkSet::finish_processing(jt, _oops, StackWatermarkKind::gc);
 807     if (GENERATIONAL) {
 808       ShenandoahThreadLocalData::enable_plab_promotions(thread);
 809     }
 810   }
 811 };
 812 
 813 template<bool GENERATIONAL>
 814 class ShenandoahConcurrentEvacUpdateThreadTask : public WorkerTask {
 815 private:
 816   ShenandoahJavaThreadsIterator _java_threads;
 817 
 818 public:
 819   explicit ShenandoahConcurrentEvacUpdateThreadTask(uint n_workers) :
 820     WorkerTask("Shenandoah Evacuate/Update Concurrent Thread Roots"),
 821     _java_threads(ShenandoahPhaseTimings::conc_thread_roots, n_workers) {
 822   }
 823 
 824   void work(uint worker_id) override {
 825     if (GENERATIONAL) {
 826       Thread* worker_thread = Thread::current();
 827       ShenandoahThreadLocalData::enable_plab_promotions(worker_thread);
 828     }
 829 
 830     // ShenandoahEvacOOMScope has to be setup by ShenandoahContextEvacuateUpdateRootsClosure.
 831     // Otherwise, may deadlock with watermark lock
 832     ShenandoahContextEvacuateUpdateRootsClosure oops_cl;
 833     ShenandoahConcurrentEvacThreadClosure<GENERATIONAL> thr_cl(&oops_cl);
 834     _java_threads.threads_do(&thr_cl, worker_id);
 835   }
 836 };
 837 
 838 void ShenandoahConcurrentGC::op_thread_roots() {
 839   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 840   assert(heap->is_evacuation_in_progress(), "Checked by caller");
 841   ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_thread_roots);
 842   if (heap->mode()->is_generational()) {
 843     ShenandoahConcurrentEvacUpdateThreadTask<true> task(heap->workers()->active_workers());
 844     heap->workers()->run_task(&task);
 845   } else {
 846     ShenandoahConcurrentEvacUpdateThreadTask<false> task(heap->workers()->active_workers());
 847     heap->workers()->run_task(&task);
 848   }
 849 }
 850 
 851 void ShenandoahConcurrentGC::op_weak_refs() {
 852   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 853   assert(heap->is_concurrent_weak_root_in_progress(), "Only during this phase");
 854   // Concurrent weak refs processing
 855   ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_refs);
 856   if (heap->gc_cause() == GCCause::_wb_breakpoint) {
 857     ShenandoahBreakpoint::at_after_reference_processing_started();
 858   }
 859   _generation->ref_processor()->process_references(ShenandoahPhaseTimings::conc_weak_refs, heap->workers(), true /* concurrent */);
 860 }
 861 
 862 class ShenandoahEvacUpdateCleanupOopStorageRootsClosure : public BasicOopIterateClosure {
 863 private:
 864   ShenandoahHeap* const _heap;
 865   ShenandoahMarkingContext* const _mark_context;
 866   bool  _evac_in_progress;
 867   Thread* const _thread;
 868 
 869 public:
 870   ShenandoahEvacUpdateCleanupOopStorageRootsClosure();
 871   void do_oop(oop* p);
 872   void do_oop(narrowOop* p);
 873 };
 874 
 875 ShenandoahEvacUpdateCleanupOopStorageRootsClosure::ShenandoahEvacUpdateCleanupOopStorageRootsClosure() :
 876   _heap(ShenandoahHeap::heap()),
 877   _mark_context(ShenandoahHeap::heap()->marking_context()),
 878   _evac_in_progress(ShenandoahHeap::heap()->is_evacuation_in_progress()),
 879   _thread(Thread::current()) {
 880 }
 881 
 882 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(oop* p) {
 883   const oop obj = RawAccess<>::oop_load(p);
 884   if (!CompressedOops::is_null(obj)) {
 885     if (!_mark_context->is_marked(obj)) {
 886       shenandoah_assert_generations_reconciled();
 887       if (_heap->is_in_active_generation(obj)) {
 888         // TODO: This worries me. Here we are asserting that an unmarked from-space object is 'correct'.
 889         // Normally, I would call this a bogus assert, but there seems to be a legitimate use-case for
 890         // accessing from-space objects during class unloading. However, the from-space object may have
 891         // been "filled". We've made no effort to prevent old generation classes being unloaded by young
 892         // gen (and vice-versa).
 893         shenandoah_assert_correct(p, obj);
 894         ShenandoahHeap::atomic_clear_oop(p, obj);
 895       }
 896     } else if (_evac_in_progress && _heap->in_collection_set(obj)) {
 897       oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
 898       if (resolved == obj) {
 899         resolved = _heap->evacuate_object(obj, _thread);
 900       }
 901       shenandoah_assert_not_in_cset_except(p, resolved, _heap->cancelled_gc());
 902       ShenandoahHeap::atomic_update_oop(resolved, p, obj);
 903     }
 904   }
 905 }
 906 
 907 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(narrowOop* p) {
 908   ShouldNotReachHere();
 909 }
 910 
 911 class ShenandoahIsCLDAliveClosure : public CLDClosure {
 912 public:
 913   void do_cld(ClassLoaderData* cld) {
 914     cld->is_alive();
 915   }
 916 };
 917 
 918 class ShenandoahIsNMethodAliveClosure: public NMethodClosure {
 919 public:
 920   void do_nmethod(nmethod* n) {
 921     n->is_unloading();
 922   }
 923 };
 924 
 925 // This task not only evacuates/updates marked weak roots, but also "null"
 926 // dead weak roots.
 927 class ShenandoahConcurrentWeakRootsEvacUpdateTask : public WorkerTask {
 928 private:
 929   ShenandoahVMWeakRoots<true /*concurrent*/> _vm_roots;
 930 
 931   // Roots related to concurrent class unloading
 932   ShenandoahClassLoaderDataRoots<true /* concurrent */>
 933                                              _cld_roots;
 934   ShenandoahConcurrentNMethodIterator        _nmethod_itr;
 935   ShenandoahPhaseTimings::Phase              _phase;
 936 
 937 public:
 938   ShenandoahConcurrentWeakRootsEvacUpdateTask(ShenandoahPhaseTimings::Phase phase) :
 939     WorkerTask("Shenandoah Evacuate/Update Concurrent Weak Roots"),
 940     _vm_roots(phase),
 941     _cld_roots(phase, ShenandoahHeap::heap()->workers()->active_workers(), false /*heap iteration*/),
 942     _nmethod_itr(ShenandoahCodeRoots::table()),
 943     _phase(phase) {
 944     if (ShenandoahHeap::heap()->unload_classes()) {
 945       MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 946       _nmethod_itr.nmethods_do_begin();
 947     }
 948   }
 949 
 950   ~ShenandoahConcurrentWeakRootsEvacUpdateTask() {
 951     if (ShenandoahHeap::heap()->unload_classes()) {
 952       MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 953       _nmethod_itr.nmethods_do_end();
 954     }
 955     // Notify runtime data structures of potentially dead oops
 956     _vm_roots.report_num_dead();
 957   }
 958 
 959   void work(uint worker_id) {
 960     ShenandoahConcurrentWorkerSession worker_session(worker_id);
 961     ShenandoahSuspendibleThreadSetJoiner sts_join;
 962     {
 963       ShenandoahEvacOOMScope oom;
 964       // jni_roots and weak_roots are OopStorage backed roots, concurrent iteration
 965       // may race against OopStorage::release() calls.
 966       ShenandoahEvacUpdateCleanupOopStorageRootsClosure cl;
 967       _vm_roots.oops_do(&cl, worker_id);
 968     }
 969 
 970     // If we are going to perform concurrent class unloading later on, we need to
 971     // cleanup the weak oops in CLD and determinate nmethod's unloading state, so that we
 972     // can cleanup immediate garbage sooner.
 973     if (ShenandoahHeap::heap()->unload_classes()) {
 974       // Applies ShenandoahIsCLDAlive closure to CLDs, native barrier will either null the
 975       // CLD's holder or evacuate it.
 976       {
 977         ShenandoahIsCLDAliveClosure is_cld_alive;
 978         _cld_roots.cld_do(&is_cld_alive, worker_id);
 979       }
 980 
 981       // Applies ShenandoahIsNMethodAliveClosure to registered nmethods.
 982       // The closure calls nmethod->is_unloading(). The is_unloading
 983       // state is cached, therefore, during concurrent class unloading phase,
 984       // we will not touch the metadata of unloading nmethods
 985       {
 986         ShenandoahWorkerTimingsTracker timer(_phase, ShenandoahPhaseTimings::CodeCacheRoots, worker_id);
 987         ShenandoahIsNMethodAliveClosure is_nmethod_alive;
 988         _nmethod_itr.nmethods_do(&is_nmethod_alive);
 989       }
 990     }
 991   }
 992 };
 993 
 994 void ShenandoahConcurrentGC::op_weak_roots() {
 995   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 996   assert(heap->is_concurrent_weak_root_in_progress(), "Only during this phase");
 997   // Concurrent weak root processing
 998   {
 999     ShenandoahTimingsTracker t(ShenandoahPhaseTimings::conc_weak_roots_work);
1000     ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_roots_work);
1001     ShenandoahConcurrentWeakRootsEvacUpdateTask task(ShenandoahPhaseTimings::conc_weak_roots_work);
1002     heap->workers()->run_task(&task);
1003   }
1004 
1005   // Perform handshake to flush out dead oops
1006   {
1007     ShenandoahTimingsTracker t(ShenandoahPhaseTimings::conc_weak_roots_rendezvous);
1008     heap->rendezvous_threads();
1009   }
1010   // We can only toggle concurrent_weak_root_in_progress flag
1011   // at a safepoint, so that mutators see a consistent
1012   // value. The flag will be cleared at the next safepoint.
1013 }
1014 
1015 void ShenandoahConcurrentGC::op_class_unloading() {
1016   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1017   assert (heap->is_concurrent_weak_root_in_progress() &&
1018           heap->unload_classes(),
1019           "Checked by caller");
1020   heap->do_class_unloading();
1021 }
1022 
1023 class ShenandoahEvacUpdateCodeCacheClosure : public NMethodClosure {
1024 private:
1025   BarrierSetNMethod* const                  _bs;
1026   ShenandoahEvacuateUpdateMetadataClosure   _cl;
1027 
1028 public:
1029   ShenandoahEvacUpdateCodeCacheClosure() :
1030     _bs(BarrierSet::barrier_set()->barrier_set_nmethod()),
1031     _cl() {
1032   }
1033 
1034   void do_nmethod(nmethod* n) {
1035     ShenandoahNMethod* data = ShenandoahNMethod::gc_data(n);
1036     ShenandoahReentrantLocker locker(data->lock());
1037     // Setup EvacOOM scope below reentrant lock to avoid deadlock with
1038     // nmethod_entry_barrier
1039     ShenandoahEvacOOMScope oom;
1040     data->oops_do(&_cl, true/*fix relocation*/);
1041     _bs->disarm(n);
1042   }
1043 };
1044 
1045 class ShenandoahConcurrentRootsEvacUpdateTask : public WorkerTask {
1046 private:
1047   ShenandoahPhaseTimings::Phase                 _phase;
1048   ShenandoahVMRoots<true /*concurrent*/>        _vm_roots;
1049   ShenandoahClassLoaderDataRoots<true /*concurrent*/>
1050                                                 _cld_roots;
1051   ShenandoahConcurrentNMethodIterator           _nmethod_itr;
1052 
1053 public:
1054   ShenandoahConcurrentRootsEvacUpdateTask(ShenandoahPhaseTimings::Phase phase) :
1055     WorkerTask("Shenandoah Evacuate/Update Concurrent Strong Roots"),
1056     _phase(phase),
1057     _vm_roots(phase),
1058     _cld_roots(phase, ShenandoahHeap::heap()->workers()->active_workers(), false /*heap iteration*/),
1059     _nmethod_itr(ShenandoahCodeRoots::table()) {
1060     if (!ShenandoahHeap::heap()->unload_classes()) {
1061       MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1062       _nmethod_itr.nmethods_do_begin();
1063     }
1064   }
1065 
1066   ~ShenandoahConcurrentRootsEvacUpdateTask() {
1067     if (!ShenandoahHeap::heap()->unload_classes()) {
1068       MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1069       _nmethod_itr.nmethods_do_end();
1070     }
1071   }
1072 
1073   void work(uint worker_id) {
1074     ShenandoahConcurrentWorkerSession worker_session(worker_id);
1075     {
1076       ShenandoahEvacOOMScope oom;
1077       {
1078         // vm_roots and weak_roots are OopStorage backed roots, concurrent iteration
1079         // may race against OopStorage::release() calls.
1080         ShenandoahContextEvacuateUpdateRootsClosure cl;
1081         _vm_roots.oops_do<ShenandoahContextEvacuateUpdateRootsClosure>(&cl, worker_id);
1082       }
1083 
1084       {
1085         ShenandoahEvacuateUpdateMetadataClosure cl;
1086         CLDToOopClosure clds(&cl, ClassLoaderData::_claim_strong);
1087         _cld_roots.cld_do(&clds, worker_id);
1088       }
1089     }
1090 
1091     // Cannot setup ShenandoahEvacOOMScope here, due to potential deadlock with nmethod_entry_barrier.
1092     if (!ShenandoahHeap::heap()->unload_classes()) {
1093       ShenandoahWorkerTimingsTracker timer(_phase, ShenandoahPhaseTimings::CodeCacheRoots, worker_id);
1094       ShenandoahEvacUpdateCodeCacheClosure cl;
1095       _nmethod_itr.nmethods_do(&cl);
1096     }
1097   }
1098 };
1099 
1100 void ShenandoahConcurrentGC::op_strong_roots() {
1101   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1102   assert(heap->is_concurrent_strong_root_in_progress(), "Checked by caller");
1103   ShenandoahConcurrentRootsEvacUpdateTask task(ShenandoahPhaseTimings::conc_strong_roots);
1104   heap->workers()->run_task(&task);
1105   heap->set_concurrent_strong_root_in_progress(false);
1106 }
1107 
1108 void ShenandoahConcurrentGC::op_cleanup_early() {
1109   ShenandoahHeap::heap()->free_set()->recycle_trash();
1110 }
1111 
1112 void ShenandoahConcurrentGC::op_evacuate() {
1113   ShenandoahHeap::heap()->evacuate_collection_set(true /*concurrent*/);
1114 }
1115 
1116 void ShenandoahConcurrentGC::op_init_updaterefs() {
1117   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1118   heap->set_evacuation_in_progress(false);
1119   heap->set_concurrent_weak_root_in_progress(false);
1120   heap->prepare_update_heap_references(true /*concurrent*/);
1121   heap->set_update_refs_in_progress(true);
1122   if (ShenandoahVerify) {
1123     heap->verifier()->verify_before_updaterefs();
1124   }
1125   if (ShenandoahPacing) {
1126     heap->pacer()->setup_for_updaterefs();
1127   }
1128 }
1129 
1130 void ShenandoahConcurrentGC::op_updaterefs() {
1131   ShenandoahHeap::heap()->update_heap_references(true /*concurrent*/);
1132 }
1133 
1134 class ShenandoahUpdateThreadClosure : public HandshakeClosure {
1135 private:
1136   ShenandoahUpdateRefsClosure _cl;
1137 public:
1138   ShenandoahUpdateThreadClosure();
1139   void do_thread(Thread* thread);
1140 };
1141 
1142 ShenandoahUpdateThreadClosure::ShenandoahUpdateThreadClosure() :
1143   HandshakeClosure("Shenandoah Update Thread Roots") {
1144 }
1145 
1146 void ShenandoahUpdateThreadClosure::do_thread(Thread* thread) {
1147   if (thread->is_Java_thread()) {
1148     JavaThread* jt = JavaThread::cast(thread);
1149     ResourceMark rm;
1150     jt->oops_do(&_cl, nullptr);
1151   }
1152 }
1153 
1154 void ShenandoahConcurrentGC::op_update_thread_roots() {
1155   ShenandoahUpdateThreadClosure cl;
1156   Handshake::execute(&cl);
1157 }
1158 
1159 void ShenandoahConcurrentGC::op_final_updaterefs() {
1160   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1161   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
1162   assert(!heap->_update_refs_iterator.has_next(), "Should have finished update references");
1163 
1164   heap->finish_concurrent_roots();
1165 
1166   // Clear cancelled GC, if set. On cancellation path, the block before would handle
1167   // everything.
1168   if (heap->cancelled_gc()) {
1169     heap->clear_cancelled_gc(true /* clear oom handler */);
1170   }
1171 
1172   // Has to be done before cset is clear
1173   if (ShenandoahVerify) {
1174     heap->verifier()->verify_roots_in_to_space();
1175   }
1176 
1177   // If we are running in generational mode and this is an aging cycle, this will also age active
1178   // regions that haven't been used for allocation.
1179   heap->update_heap_region_states(true /*concurrent*/);
1180 
1181   heap->set_update_refs_in_progress(false);
1182   heap->set_has_forwarded_objects(false);
1183 
1184   if (heap->mode()->is_generational() && heap->is_concurrent_old_mark_in_progress()) {
1185     // When the SATB barrier is left on to support concurrent old gen mark, it may pick up writes to
1186     // objects in the collection set. After those objects are evacuated, the pointers in the
1187     // SATB are no longer safe. Once we have finished update references, we are guaranteed that
1188     // no more writes to the collection set are possible.
1189     //
1190     // This will transfer any old pointers in _active_ regions from the SATB to the old gen
1191     // mark queues. All other pointers will be discarded. This would also discard any pointers
1192     // in old regions that were included in a mixed evacuation. We aren't using the SATB filter
1193     // methods here because we cannot control when they execute. If the SATB filter runs _after_
1194     // a region has been recycled, we will not be able to detect the bad pointer.
1195     //
1196     // We are not concerned about skipping this step in abbreviated cycles because regions
1197     // with no live objects cannot have been written to and so cannot have entries in the SATB
1198     // buffers.
1199     heap->old_generation()->transfer_pointers_from_satb();
1200 
1201     // Aging_cycle is only relevant during evacuation cycle for individual objects and during final mark for
1202     // entire regions.  Both of these relevant operations occur before final update refs.
1203     ShenandoahGenerationalHeap::heap()->set_aging_cycle(false);
1204   }
1205 
1206   if (ShenandoahVerify) {
1207     heap->verifier()->verify_after_updaterefs();
1208   }
1209 
1210   if (VerifyAfterGC) {
1211     Universe::verify();
1212   }
1213 
1214   heap->rebuild_free_set(true /*concurrent*/);
1215 }
1216 
1217 void ShenandoahConcurrentGC::op_final_roots() {
1218 
1219   ShenandoahHeap *heap = ShenandoahHeap::heap();
1220   heap->set_concurrent_weak_root_in_progress(false);
1221   heap->set_evacuation_in_progress(false);
1222 
1223   if (heap->mode()->is_generational()) {
1224     // If the cycle was shortened for having enough immediate garbage, this could be
1225     // the last GC safepoint before concurrent marking of old resumes. We must be sure
1226     // that old mark threads don't see any pointers to garbage in the SATB buffers.
1227     if (heap->is_concurrent_old_mark_in_progress()) {
1228       heap->old_generation()->transfer_pointers_from_satb();
1229     }
1230 
1231     if (!_generation->is_old()) {
1232       ShenandoahGenerationalHeap::heap()->update_region_ages(_generation->complete_marking_context());
1233     }
1234   }
1235 }
1236 
1237 void ShenandoahConcurrentGC::op_cleanup_complete() {
1238   ShenandoahHeap::heap()->free_set()->recycle_trash();
1239 }
1240 
1241 bool ShenandoahConcurrentGC::check_cancellation_and_abort(ShenandoahDegenPoint point) {
1242   if (ShenandoahHeap::heap()->cancelled_gc()) {
1243     _degen_point = point;
1244     return true;
1245   }
1246   return false;
1247 }
1248 
1249 const char* ShenandoahConcurrentGC::init_mark_event_message() const {
1250   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1251   assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here");
1252   if (heap->unload_classes()) {
1253     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Init Mark", " (unload classes)");
1254   } else {
1255     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Init Mark", "");
1256   }
1257 }
1258 
1259 const char* ShenandoahConcurrentGC::final_mark_event_message() const {
1260   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1261   assert(!heap->has_forwarded_objects() || heap->is_concurrent_old_mark_in_progress(),
1262          "Should not have forwarded objects during final mark, unless old gen concurrent mark is running");
1263 
1264   if (heap->unload_classes()) {
1265     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Final Mark", " (unload classes)");
1266   } else {
1267     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Final Mark", "");
1268   }
1269 }
1270 
1271 const char* ShenandoahConcurrentGC::conc_mark_event_message() const {
1272   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1273   assert(!heap->has_forwarded_objects() || heap->is_concurrent_old_mark_in_progress(),
1274          "Should not have forwarded objects concurrent mark, unless old gen concurrent mark is running");
1275   if (heap->unload_classes()) {
1276     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent marking", " (unload classes)");
1277   } else {
1278     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent marking", "");
1279   }
1280 }