1 /*
   2  * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2021, 2022, Red Hat, Inc. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 
  28 #include "gc/shared/barrierSetNMethod.hpp"
  29 #include "gc/shared/collectorCounters.hpp"
  30 #include "gc/shared/continuationGCSupport.inline.hpp"
  31 #include "gc/shenandoah/shenandoahBreakpoint.hpp"
  32 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  33 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
  34 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  35 #include "gc/shenandoah/shenandoahLock.hpp"
  36 #include "gc/shenandoah/shenandoahMark.inline.hpp"
  37 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  38 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  39 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  40 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
  41 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
  42 #include "gc/shenandoah/shenandoahStackWatermark.hpp"
  43 #include "gc/shenandoah/shenandoahUtils.hpp"
  44 #include "gc/shenandoah/shenandoahVerifier.hpp"
  45 #include "gc/shenandoah/shenandoahVMOperations.hpp"
  46 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
  47 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  48 #include "memory/allocation.hpp"
  49 #include "prims/jvmtiTagMap.hpp"
  50 #include "runtime/vmThread.hpp"
  51 #include "utilities/events.hpp"
  52 
  53 // Breakpoint support
  54 class ShenandoahBreakpointGCScope : public StackObj {
  55 private:
  56   const GCCause::Cause _cause;
  57 public:
  58   ShenandoahBreakpointGCScope(GCCause::Cause cause) : _cause(cause) {
  59     if (cause == GCCause::_wb_breakpoint) {
  60       ShenandoahBreakpoint::start_gc();
  61       ShenandoahBreakpoint::at_before_gc();
  62     }
  63   }
  64 
  65   ~ShenandoahBreakpointGCScope() {
  66     if (_cause == GCCause::_wb_breakpoint) {
  67       ShenandoahBreakpoint::at_after_gc();
  68     }
  69   }
  70 };
  71 
  72 class ShenandoahBreakpointMarkScope : public StackObj {
  73 private:
  74   const GCCause::Cause _cause;
  75 public:
  76   ShenandoahBreakpointMarkScope(GCCause::Cause cause) : _cause(cause) {
  77     if (_cause == GCCause::_wb_breakpoint) {
  78       ShenandoahBreakpoint::at_after_marking_started();
  79     }
  80   }
  81 
  82   ~ShenandoahBreakpointMarkScope() {
  83     if (_cause == GCCause::_wb_breakpoint) {
  84       ShenandoahBreakpoint::at_before_marking_completed();
  85     }
  86   }
  87 };
  88 
  89 ShenandoahConcurrentGC::ShenandoahConcurrentGC() :
  90   _mark(),
  91   _degen_point(ShenandoahDegenPoint::_degenerated_unset),
  92   _abbreviated(false) {
  93 }
  94 
  95 ShenandoahGC::ShenandoahDegenPoint ShenandoahConcurrentGC::degen_point() const {
  96   return _degen_point;
  97 }
  98 
  99 void ShenandoahConcurrentGC::cancel() {
 100   ShenandoahConcurrentMark::cancel();
 101 }
 102 
 103 bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) {
 104   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 105   ShenandoahBreakpointGCScope breakpoint_gc_scope(cause);
 106 
 107   // Reset for upcoming marking
 108   entry_reset();
 109 
 110   // Start initial mark under STW
 111   vmop_entry_init_mark();
 112 
 113   {
 114     ShenandoahBreakpointMarkScope breakpoint_mark_scope(cause);
 115     // Concurrent mark roots
 116     entry_mark_roots();
 117     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_outside_cycle)) {
 118       return false;
 119     }
 120 
 121     // Continue concurrent mark
 122     entry_mark();
 123     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_mark)) {
 124       return false;
 125     }
 126   }
 127 
 128   // Complete marking under STW, and start evacuation
 129   vmop_entry_final_mark();
 130 
 131   // If the GC was cancelled before final mark, nothing happens on the safepoint. We are still
 132   // in the marking phase and must resume the degenerated cycle from there. If the GC was cancelled
 133   // after final mark, then we've entered the evacuation phase and must resume the degenerated cycle
 134   // from that phase.
 135   if (heap->is_concurrent_mark_in_progress()) {
 136     bool cancelled = check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_mark);
 137     assert(cancelled, "GC must have been cancelled between concurrent and final mark");
 138     return false;
 139   }
 140 
 141   // Concurrent stack processing
 142   if (heap->is_evacuation_in_progress()) {
 143     entry_thread_roots();
 144   }
 145 
 146   // Process weak roots that might still point to regions that would be broken by cleanup
 147   if (heap->is_concurrent_weak_root_in_progress()) {
 148     entry_weak_refs();
 149     entry_weak_roots();
 150   }
 151 
 152   // Final mark might have reclaimed some immediate garbage, kick cleanup to reclaim
 153   // the space. This would be the last action if there is nothing to evacuate.
 154   entry_cleanup_early();
 155 
 156   heap->free_set()->log_status_under_lock();
 157 
 158   // Perform concurrent class unloading
 159   if (heap->unload_classes() &&
 160       heap->is_concurrent_weak_root_in_progress()) {
 161     entry_class_unloading();
 162   }
 163 
 164   // Processing strong roots
 165   // This may be skipped if there is nothing to update/evacuate.
 166   // If so, strong_root_in_progress would be unset.
 167   if (heap->is_concurrent_strong_root_in_progress()) {
 168     entry_strong_roots();
 169   }
 170 
 171   // Continue the cycle with evacuation and optional update-refs.
 172   // This may be skipped if there is nothing to evacuate.
 173   // If so, evac_in_progress would be unset by collection set preparation code.
 174   if (heap->is_evacuation_in_progress()) {
 175     // Concurrently evacuate
 176     entry_evacuate();
 177     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_evac)) {
 178       return false;
 179     }
 180 
 181     // Perform update-refs phase.
 182     vmop_entry_init_updaterefs();
 183     entry_updaterefs();
 184     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_updaterefs)) {
 185       return false;
 186     }
 187 
 188     // Concurrent update thread roots
 189     entry_update_thread_roots();
 190     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_updaterefs)) {
 191       return false;
 192     }
 193 
 194     vmop_entry_final_updaterefs();
 195 
 196     // Update references freed up collection set, kick the cleanup to reclaim the space.
 197     entry_cleanup_complete();
 198   } else {
 199     vmop_entry_final_roots();
 200     _abbreviated = true;
 201   }
 202 
 203   return true;
 204 }
 205 
 206 void ShenandoahConcurrentGC::vmop_entry_init_mark() {
 207   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 208   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 209   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_mark_gross);
 210 
 211   heap->try_inject_alloc_failure();
 212   VM_ShenandoahInitMark op(this);
 213   VMThread::execute(&op); // jump to entry_init_mark() under safepoint
 214 }
 215 
 216 void ShenandoahConcurrentGC::vmop_entry_final_mark() {
 217   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 218   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 219   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_mark_gross);
 220 
 221   heap->try_inject_alloc_failure();
 222   VM_ShenandoahFinalMarkStartEvac op(this);
 223   VMThread::execute(&op); // jump to entry_final_mark under safepoint
 224 }
 225 
 226 void ShenandoahConcurrentGC::vmop_entry_init_updaterefs() {
 227   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 228   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 229   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_update_refs_gross);
 230 
 231   heap->try_inject_alloc_failure();
 232   VM_ShenandoahInitUpdateRefs op(this);
 233   VMThread::execute(&op);
 234 }
 235 
 236 void ShenandoahConcurrentGC::vmop_entry_final_updaterefs() {
 237   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 238   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 239   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_update_refs_gross);
 240 
 241   heap->try_inject_alloc_failure();
 242   VM_ShenandoahFinalUpdateRefs op(this);
 243   VMThread::execute(&op);
 244 }
 245 
 246 void ShenandoahConcurrentGC::vmop_entry_final_roots() {
 247   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 248   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 249   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_roots_gross);
 250 
 251   // This phase does not use workers, no need for setup
 252   heap->try_inject_alloc_failure();
 253   VM_ShenandoahFinalRoots op(this);
 254   VMThread::execute(&op);
 255 }
 256 
 257 void ShenandoahConcurrentGC::entry_init_mark() {
 258   const char* msg = init_mark_event_message();
 259   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::init_mark);
 260   EventMark em("%s", msg);
 261 
 262   ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
 263                               ShenandoahWorkerPolicy::calc_workers_for_init_marking(),
 264                               "init marking");
 265 
 266   op_init_mark();
 267 }
 268 
 269 void ShenandoahConcurrentGC::entry_final_mark() {
 270   const char* msg = final_mark_event_message();
 271   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_mark);
 272   EventMark em("%s", msg);
 273 
 274   ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
 275                               ShenandoahWorkerPolicy::calc_workers_for_final_marking(),
 276                               "final marking");
 277 
 278   op_final_mark();
 279 }
 280 
 281 void ShenandoahConcurrentGC::entry_init_updaterefs() {
 282   static const char* msg = "Pause Init Update Refs";
 283   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::init_update_refs);
 284   EventMark em("%s", msg);
 285 
 286   // No workers used in this phase, no setup required
 287   op_init_updaterefs();
 288 }
 289 
 290 void ShenandoahConcurrentGC::entry_final_updaterefs() {
 291   static const char* msg = "Pause Final Update Refs";
 292   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_update_refs);
 293   EventMark em("%s", msg);
 294 
 295   ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
 296                               ShenandoahWorkerPolicy::calc_workers_for_final_update_ref(),
 297                               "final reference update");
 298 
 299   op_final_updaterefs();
 300 }
 301 
 302 void ShenandoahConcurrentGC::entry_final_roots() {
 303   static const char* msg = "Pause Final Roots";
 304   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_roots);
 305   EventMark em("%s", msg);
 306 
 307   op_final_roots();
 308 }
 309 
 310 void ShenandoahConcurrentGC::entry_reset() {
 311   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 312   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 313   static const char* msg = "Concurrent reset";
 314   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_reset);
 315   EventMark em("%s", msg);
 316 
 317   ShenandoahWorkerScope scope(heap->workers(),
 318                               ShenandoahWorkerPolicy::calc_workers_for_conc_reset(),
 319                               "concurrent reset");
 320 
 321   heap->try_inject_alloc_failure();
 322   op_reset();
 323 }
 324 
 325 void ShenandoahConcurrentGC::entry_mark_roots() {
 326   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 327   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 328   const char* msg = "Concurrent marking roots";
 329   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_mark_roots);
 330   EventMark em("%s", msg);
 331 
 332   ShenandoahWorkerScope scope(heap->workers(),
 333                               ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
 334                               "concurrent marking roots");
 335 
 336   heap->try_inject_alloc_failure();
 337   op_mark_roots();
 338 }
 339 
 340 void ShenandoahConcurrentGC::entry_mark() {
 341   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 342   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 343   const char* msg = conc_mark_event_message();
 344   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_mark);
 345   EventMark em("%s", msg);
 346 
 347   ShenandoahWorkerScope scope(heap->workers(),
 348                               ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
 349                               "concurrent marking");
 350 
 351   heap->try_inject_alloc_failure();
 352   op_mark();
 353 }
 354 
 355 void ShenandoahConcurrentGC::entry_thread_roots() {
 356   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 357   static const char* msg = "Concurrent thread roots";
 358   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_thread_roots);
 359   EventMark em("%s", msg);
 360 
 361   ShenandoahWorkerScope scope(heap->workers(),
 362                               ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
 363                               msg);
 364 
 365   heap->try_inject_alloc_failure();
 366   op_thread_roots();
 367 }
 368 
 369 void ShenandoahConcurrentGC::entry_weak_refs() {
 370   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 371   static const char* msg = "Concurrent weak references";
 372   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_weak_refs);
 373   EventMark em("%s", msg);
 374 
 375   ShenandoahWorkerScope scope(heap->workers(),
 376                               ShenandoahWorkerPolicy::calc_workers_for_conc_refs_processing(),
 377                               "concurrent weak references");
 378 
 379   heap->try_inject_alloc_failure();
 380   op_weak_refs();
 381 }
 382 
 383 void ShenandoahConcurrentGC::entry_weak_roots() {
 384   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 385   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 386   static const char* msg = "Concurrent weak roots";
 387   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_weak_roots);
 388   EventMark em("%s", msg);
 389 
 390   ShenandoahWorkerScope scope(heap->workers(),
 391                               ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
 392                               "concurrent weak root");
 393 
 394   heap->try_inject_alloc_failure();
 395   op_weak_roots();
 396 }
 397 
 398 void ShenandoahConcurrentGC::entry_class_unloading() {
 399   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 400   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 401   static const char* msg = "Concurrent class unloading";
 402   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_class_unload);
 403   EventMark em("%s", msg);
 404 
 405   ShenandoahWorkerScope scope(heap->workers(),
 406                               ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
 407                               "concurrent class unloading");
 408 
 409   heap->try_inject_alloc_failure();
 410   op_class_unloading();
 411 }
 412 
 413 void ShenandoahConcurrentGC::entry_strong_roots() {
 414   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 415   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 416   static const char* msg = "Concurrent strong roots";
 417   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_strong_roots);
 418   EventMark em("%s", msg);
 419 
 420   ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_strong_roots);
 421 
 422   ShenandoahWorkerScope scope(heap->workers(),
 423                               ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
 424                               "concurrent strong root");
 425 
 426   heap->try_inject_alloc_failure();
 427   op_strong_roots();
 428 }
 429 
 430 void ShenandoahConcurrentGC::entry_cleanup_early() {
 431   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 432   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 433   static const char* msg = "Concurrent cleanup";
 434   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_cleanup_early, true /* log_heap_usage */);
 435   EventMark em("%s", msg);
 436 
 437   // This phase does not use workers, no need for setup
 438   heap->try_inject_alloc_failure();
 439   op_cleanup_early();
 440 }
 441 
 442 void ShenandoahConcurrentGC::entry_evacuate() {
 443   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 444   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 445 
 446   static const char* msg = "Concurrent evacuation";
 447   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_evac);
 448   EventMark em("%s", msg);
 449 
 450   ShenandoahWorkerScope scope(heap->workers(),
 451                               ShenandoahWorkerPolicy::calc_workers_for_conc_evac(),
 452                               "concurrent evacuation");
 453 
 454   heap->try_inject_alloc_failure();
 455   op_evacuate();
 456 }
 457 
 458 void ShenandoahConcurrentGC::entry_update_thread_roots() {
 459   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 460   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 461 
 462   static const char* msg = "Concurrent update thread roots";
 463   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_update_thread_roots);
 464   EventMark em("%s", msg);
 465 
 466   // No workers used in this phase, no setup required
 467   heap->try_inject_alloc_failure();
 468   op_update_thread_roots();
 469 }
 470 
 471 void ShenandoahConcurrentGC::entry_updaterefs() {
 472   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 473   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 474   static const char* msg = "Concurrent update references";
 475   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_update_refs);
 476   EventMark em("%s", msg);
 477 
 478   ShenandoahWorkerScope scope(heap->workers(),
 479                               ShenandoahWorkerPolicy::calc_workers_for_conc_update_ref(),
 480                               "concurrent reference update");
 481 
 482   heap->try_inject_alloc_failure();
 483   op_updaterefs();
 484 }
 485 
 486 void ShenandoahConcurrentGC::entry_cleanup_complete() {
 487   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 488   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 489   static const char* msg = "Concurrent cleanup";
 490   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_cleanup_complete, true /* log_heap_usage */);
 491   EventMark em("%s", msg);
 492 
 493   // This phase does not use workers, no need for setup
 494   heap->try_inject_alloc_failure();
 495   op_cleanup_complete();
 496 }
 497 
 498 void ShenandoahConcurrentGC::op_reset() {
 499   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 500   if (ShenandoahPacing) {
 501     heap->pacer()->setup_for_reset();
 502   }
 503 
 504   heap->prepare_gc();
 505 }
 506 
 507 class ShenandoahInitMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
 508 private:
 509   ShenandoahMarkingContext* const _ctx;
 510 public:
 511   ShenandoahInitMarkUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
 512 
 513   void heap_region_do(ShenandoahHeapRegion* r) {
 514     assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index());
 515     if (r->is_active()) {
 516       // Check if region needs updating its TAMS. We have updated it already during concurrent
 517       // reset, so it is very likely we don't need to do another write here.
 518       if (_ctx->top_at_mark_start(r) != r->top()) {
 519         _ctx->capture_top_at_mark_start(r);
 520       }
 521     } else {
 522       assert(_ctx->top_at_mark_start(r) == r->top(),
 523              "Region " SIZE_FORMAT " should already have correct TAMS", r->index());
 524     }
 525   }
 526 
 527   bool is_thread_safe() { return true; }
 528 };
 529 
 530 void ShenandoahConcurrentGC::start_mark() {
 531   _mark.start_mark();
 532 }
 533 
 534 void ShenandoahConcurrentGC::op_init_mark() {
 535   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 536   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
 537   assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
 538 
 539   assert(heap->marking_context()->is_bitmap_clear(), "need clear marking bitmap");
 540   assert(!heap->marking_context()->is_complete(), "should not be complete");
 541   assert(!heap->has_forwarded_objects(), "No forwarded objects on this path");
 542 
 543   if (ShenandoahVerify) {
 544     heap->verifier()->verify_before_concmark();
 545   }
 546 
 547   if (VerifyBeforeGC) {
 548     Universe::verify();
 549   }
 550 
 551   heap->set_concurrent_mark_in_progress(true);
 552 
 553   start_mark();
 554 
 555   {
 556     ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states);
 557     ShenandoahInitMarkUpdateRegionStateClosure cl;
 558     heap->parallel_heap_region_iterate(&cl);
 559   }
 560 
 561   // Weak reference processing
 562   ShenandoahReferenceProcessor* rp = heap->ref_processor();
 563   rp->reset_thread_locals();
 564   rp->set_soft_reference_policy(heap->soft_ref_policy()->should_clear_all_soft_refs());
 565 
 566   // Make above changes visible to worker threads
 567   OrderAccess::fence();
 568 
 569   // Arm nmethods for concurrent mark
 570   ShenandoahCodeRoots::arm_nmethods_for_mark();
 571 
 572   ShenandoahStackWatermark::change_epoch_id();
 573   if (ShenandoahPacing) {
 574     heap->pacer()->setup_for_mark();
 575   }
 576 }
 577 
 578 void ShenandoahConcurrentGC::op_mark_roots() {
 579   _mark.mark_concurrent_roots();
 580 }
 581 
 582 void ShenandoahConcurrentGC::op_mark() {
 583   _mark.concurrent_mark();
 584 }
 585 
 586 void ShenandoahConcurrentGC::op_final_mark() {
 587   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 588   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
 589   assert(!heap->has_forwarded_objects(), "No forwarded objects on this path");
 590 
 591   if (ShenandoahVerify) {
 592     heap->verifier()->verify_roots_no_forwarded();
 593   }
 594 
 595   if (!heap->cancelled_gc()) {
 596     _mark.finish_mark();
 597     assert(!heap->cancelled_gc(), "STW mark cannot OOM");
 598 
 599     // Notify JVMTI that the tagmap table will need cleaning.
 600     JvmtiTagMap::set_needs_cleaning();
 601 
 602     heap->prepare_regions_and_collection_set(true /*concurrent*/);
 603 
 604     // Has to be done after cset selection
 605     heap->prepare_concurrent_roots();
 606 
 607     if (!heap->collection_set()->is_empty()) {
 608       if (ShenandoahVerify) {
 609         heap->verifier()->verify_before_evacuation();
 610       }
 611 
 612       heap->set_evacuation_in_progress(true);
 613       // From here on, we need to update references.
 614       heap->set_has_forwarded_objects(true);
 615 
 616       // Arm nmethods/stack for concurrent processing
 617       ShenandoahCodeRoots::arm_nmethods_for_evac();
 618       ShenandoahStackWatermark::change_epoch_id();
 619 
 620       if (ShenandoahPacing) {
 621         heap->pacer()->setup_for_evac();
 622       }
 623     } else {
 624       if (ShenandoahVerify) {
 625         heap->verifier()->verify_after_concmark();
 626       }
 627 
 628       if (VerifyAfterGC) {
 629         Universe::verify();
 630       }
 631     }
 632   }
 633 }
 634 
 635 class ShenandoahConcurrentEvacThreadClosure : public ThreadClosure {
 636 private:
 637   OopClosure* const _oops;
 638 
 639 public:
 640   ShenandoahConcurrentEvacThreadClosure(OopClosure* oops);
 641   void do_thread(Thread* thread);
 642 };
 643 
 644 ShenandoahConcurrentEvacThreadClosure::ShenandoahConcurrentEvacThreadClosure(OopClosure* oops) :
 645   _oops(oops) {
 646 }
 647 
 648 void ShenandoahConcurrentEvacThreadClosure::do_thread(Thread* thread) {
 649   JavaThread* const jt = JavaThread::cast(thread);
 650   StackWatermarkSet::finish_processing(jt, _oops, StackWatermarkKind::gc);
 651 }
 652 
 653 class ShenandoahConcurrentEvacUpdateThreadTask : public WorkerTask {
 654 private:
 655   ShenandoahJavaThreadsIterator _java_threads;
 656 
 657 public:
 658   ShenandoahConcurrentEvacUpdateThreadTask(uint n_workers) :
 659     WorkerTask("Shenandoah Evacuate/Update Concurrent Thread Roots"),
 660     _java_threads(ShenandoahPhaseTimings::conc_thread_roots, n_workers) {
 661   }
 662 
 663   void work(uint worker_id) {
 664     // ShenandoahEvacOOMScope has to be setup by ShenandoahContextEvacuateUpdateRootsClosure.
 665     // Otherwise, may deadlock with watermark lock
 666     ShenandoahContextEvacuateUpdateRootsClosure oops_cl;
 667     ShenandoahConcurrentEvacThreadClosure thr_cl(&oops_cl);
 668     _java_threads.threads_do(&thr_cl, worker_id);
 669   }
 670 };
 671 
 672 void ShenandoahConcurrentGC::op_thread_roots() {
 673   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 674   assert(heap->is_evacuation_in_progress(), "Checked by caller");
 675   ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_thread_roots);
 676   ShenandoahConcurrentEvacUpdateThreadTask task(heap->workers()->active_workers());
 677   heap->workers()->run_task(&task);
 678 }
 679 
 680 void ShenandoahConcurrentGC::op_weak_refs() {
 681   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 682   assert(heap->is_concurrent_weak_root_in_progress(), "Only during this phase");
 683   // Concurrent weak refs processing
 684   ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_refs);
 685   if (heap->gc_cause() == GCCause::_wb_breakpoint) {
 686     ShenandoahBreakpoint::at_after_reference_processing_started();
 687   }
 688   heap->ref_processor()->process_references(ShenandoahPhaseTimings::conc_weak_refs, heap->workers(), true /* concurrent */);
 689 }
 690 
 691 class ShenandoahEvacUpdateCleanupOopStorageRootsClosure : public BasicOopIterateClosure {
 692 private:
 693   ShenandoahHeap* const _heap;
 694   ShenandoahMarkingContext* const _mark_context;
 695   bool  _evac_in_progress;
 696   Thread* const _thread;
 697 
 698 public:
 699   ShenandoahEvacUpdateCleanupOopStorageRootsClosure();
 700   void do_oop(oop* p);
 701   void do_oop(narrowOop* p);
 702 };
 703 
 704 ShenandoahEvacUpdateCleanupOopStorageRootsClosure::ShenandoahEvacUpdateCleanupOopStorageRootsClosure() :
 705   _heap(ShenandoahHeap::heap()),
 706   _mark_context(ShenandoahHeap::heap()->marking_context()),
 707   _evac_in_progress(ShenandoahHeap::heap()->is_evacuation_in_progress()),
 708   _thread(Thread::current()) {
 709 }
 710 
 711 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(oop* p) {
 712   const oop obj = RawAccess<>::oop_load(p);
 713   if (!CompressedOops::is_null(obj)) {
 714     if (!_mark_context->is_marked(obj)) {
 715       // Note: The obj is dead here. Do not touch it, just clear.
 716       ShenandoahHeap::atomic_clear_oop(p, obj);
 717     } else if (_evac_in_progress && _heap->in_collection_set(obj)) {
 718       oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
 719       if (resolved == obj) {
 720         resolved = _heap->evacuate_object(obj, _thread);
 721       }
 722       shenandoah_assert_not_in_cset_except(p, resolved, _heap->cancelled_gc());
 723       ShenandoahHeap::atomic_update_oop(resolved, p, obj);
 724     }
 725   }
 726 }
 727 
 728 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(narrowOop* p) {
 729   ShouldNotReachHere();
 730 }
 731 
 732 class ShenandoahIsCLDAliveClosure : public CLDClosure {
 733 public:
 734   void do_cld(ClassLoaderData* cld) {
 735     cld->is_alive();
 736   }
 737 };
 738 
 739 class ShenandoahIsNMethodAliveClosure: public NMethodClosure {
 740 public:
 741   void do_nmethod(nmethod* n) {
 742     n->is_unloading();
 743   }
 744 };
 745 
 746 // This task not only evacuates/updates marked weak roots, but also "null"
 747 // dead weak roots.
 748 class ShenandoahConcurrentWeakRootsEvacUpdateTask : public WorkerTask {
 749 private:
 750   ShenandoahVMWeakRoots<true /*concurrent*/> _vm_roots;
 751 
 752   // Roots related to concurrent class unloading
 753   ShenandoahClassLoaderDataRoots<true /* concurrent */>
 754                                              _cld_roots;
 755   ShenandoahConcurrentNMethodIterator        _nmethod_itr;
 756   ShenandoahPhaseTimings::Phase              _phase;
 757 
 758 public:
 759   ShenandoahConcurrentWeakRootsEvacUpdateTask(ShenandoahPhaseTimings::Phase phase) :
 760     WorkerTask("Shenandoah Evacuate/Update Concurrent Weak Roots"),
 761     _vm_roots(phase),
 762     _cld_roots(phase, ShenandoahHeap::heap()->workers()->active_workers(), false /*heap iteration*/),
 763     _nmethod_itr(ShenandoahCodeRoots::table()),
 764     _phase(phase) {}
 765 
 766   ~ShenandoahConcurrentWeakRootsEvacUpdateTask() {
 767     // Notify runtime data structures of potentially dead oops
 768     _vm_roots.report_num_dead();
 769   }
 770 
 771   void work(uint worker_id) {
 772     ShenandoahConcurrentWorkerSession worker_session(worker_id);
 773     ShenandoahSuspendibleThreadSetJoiner sts_join;
 774     {
 775       ShenandoahEvacOOMScope oom;
 776       // jni_roots and weak_roots are OopStorage backed roots, concurrent iteration
 777       // may race against OopStorage::release() calls.
 778       ShenandoahEvacUpdateCleanupOopStorageRootsClosure cl;
 779       _vm_roots.oops_do(&cl, worker_id);
 780     }
 781 
 782     // If we are going to perform concurrent class unloading later on, we need to
 783     // cleanup the weak oops in CLD and determinate nmethod's unloading state, so that we
 784     // can cleanup immediate garbage sooner.
 785     if (ShenandoahHeap::heap()->unload_classes()) {
 786       // Applies ShenandoahIsCLDAlive closure to CLDs, native barrier will either null the
 787       // CLD's holder or evacuate it.
 788       {
 789         ShenandoahIsCLDAliveClosure is_cld_alive;
 790         _cld_roots.cld_do(&is_cld_alive, worker_id);
 791       }
 792 
 793       // Applies ShenandoahIsNMethodAliveClosure to registered nmethods.
 794       // The closure calls nmethod->is_unloading(). The is_unloading
 795       // state is cached, therefore, during concurrent class unloading phase,
 796       // we will not touch the metadata of unloading nmethods
 797       {
 798         ShenandoahWorkerTimingsTracker timer(_phase, ShenandoahPhaseTimings::CodeCacheRoots, worker_id);
 799         ShenandoahIsNMethodAliveClosure is_nmethod_alive;
 800         _nmethod_itr.nmethods_do(&is_nmethod_alive);
 801       }
 802     }
 803   }
 804 };
 805 
 806 void ShenandoahConcurrentGC::op_weak_roots() {
 807   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 808   assert(heap->is_concurrent_weak_root_in_progress(), "Only during this phase");
 809   // Concurrent weak root processing
 810   {
 811     ShenandoahTimingsTracker t(ShenandoahPhaseTimings::conc_weak_roots_work);
 812     ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_roots_work);
 813     ShenandoahConcurrentWeakRootsEvacUpdateTask task(ShenandoahPhaseTimings::conc_weak_roots_work);
 814     heap->workers()->run_task(&task);
 815   }
 816 
 817   // Perform handshake to flush out dead oops
 818   {
 819     ShenandoahTimingsTracker t(ShenandoahPhaseTimings::conc_weak_roots_rendezvous);
 820     heap->rendezvous_threads("Shenandoah Concurrent Weak Roots");
 821   }
 822 }
 823 
 824 void ShenandoahConcurrentGC::op_class_unloading() {
 825   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 826   assert (heap->is_concurrent_weak_root_in_progress() &&
 827           heap->unload_classes(),
 828           "Checked by caller");
 829   heap->do_class_unloading();
 830 }
 831 
 832 class ShenandoahEvacUpdateCodeCacheClosure : public NMethodClosure {
 833 private:
 834   BarrierSetNMethod* const                  _bs;
 835   ShenandoahEvacuateUpdateMetadataClosure   _cl;
 836 
 837 public:
 838   ShenandoahEvacUpdateCodeCacheClosure() :
 839     _bs(BarrierSet::barrier_set()->barrier_set_nmethod()),
 840     _cl() {
 841   }
 842 
 843   void do_nmethod(nmethod* n) {
 844     ShenandoahNMethod* data = ShenandoahNMethod::gc_data(n);
 845     ShenandoahReentrantLocker locker(data->lock());
 846     // Setup EvacOOM scope below reentrant lock to avoid deadlock with
 847     // nmethod_entry_barrier
 848     ShenandoahEvacOOMScope oom;
 849     data->oops_do(&_cl, true/*fix relocation*/);
 850     _bs->disarm(n);
 851   }
 852 };
 853 
 854 class ShenandoahConcurrentRootsEvacUpdateTask : public WorkerTask {
 855 private:
 856   ShenandoahPhaseTimings::Phase                 _phase;
 857   ShenandoahVMRoots<true /*concurrent*/>        _vm_roots;
 858   ShenandoahClassLoaderDataRoots<true /*concurrent*/>
 859                                                 _cld_roots;
 860   ShenandoahConcurrentNMethodIterator           _nmethod_itr;
 861 
 862 public:
 863   ShenandoahConcurrentRootsEvacUpdateTask(ShenandoahPhaseTimings::Phase phase) :
 864     WorkerTask("Shenandoah Evacuate/Update Concurrent Strong Roots"),
 865     _phase(phase),
 866     _vm_roots(phase),
 867     _cld_roots(phase, ShenandoahHeap::heap()->workers()->active_workers(), false /*heap iteration*/),
 868     _nmethod_itr(ShenandoahCodeRoots::table()) {}
 869 
 870   void work(uint worker_id) {
 871     ShenandoahConcurrentWorkerSession worker_session(worker_id);
 872     {
 873       ShenandoahEvacOOMScope oom;
 874       {
 875         // vm_roots and weak_roots are OopStorage backed roots, concurrent iteration
 876         // may race against OopStorage::release() calls.
 877         ShenandoahContextEvacuateUpdateRootsClosure cl;
 878         _vm_roots.oops_do<ShenandoahContextEvacuateUpdateRootsClosure>(&cl, worker_id);
 879       }
 880 
 881       {
 882         ShenandoahEvacuateUpdateMetadataClosure cl;
 883         CLDToOopClosure clds(&cl, ClassLoaderData::_claim_strong);
 884         _cld_roots.cld_do(&clds, worker_id);
 885       }
 886     }
 887 
 888     // Cannot setup ShenandoahEvacOOMScope here, due to potential deadlock with nmethod_entry_barrier.
 889     if (!ShenandoahHeap::heap()->unload_classes()) {
 890       ShenandoahWorkerTimingsTracker timer(_phase, ShenandoahPhaseTimings::CodeCacheRoots, worker_id);
 891       ShenandoahEvacUpdateCodeCacheClosure cl;
 892       _nmethod_itr.nmethods_do(&cl);
 893     }
 894   }
 895 };
 896 
 897 void ShenandoahConcurrentGC::op_strong_roots() {
 898   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 899   assert(heap->is_concurrent_strong_root_in_progress(), "Checked by caller");
 900   ShenandoahConcurrentRootsEvacUpdateTask task(ShenandoahPhaseTimings::conc_strong_roots);
 901   heap->workers()->run_task(&task);
 902   heap->set_concurrent_strong_root_in_progress(false);
 903 }
 904 
 905 void ShenandoahConcurrentGC::op_cleanup_early() {
 906   ShenandoahHeap::heap()->free_set()->recycle_trash();
 907 }
 908 
 909 void ShenandoahConcurrentGC::op_evacuate() {
 910   ShenandoahHeap::heap()->evacuate_collection_set(true /*concurrent*/);
 911 }
 912 
 913 void ShenandoahConcurrentGC::op_init_updaterefs() {
 914   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 915   heap->set_evacuation_in_progress(false);
 916   heap->set_concurrent_weak_root_in_progress(false);
 917   heap->prepare_update_heap_references(true /*concurrent*/);
 918   if (ShenandoahVerify) {
 919     heap->verifier()->verify_before_updaterefs();
 920   }
 921 
 922   heap->set_update_refs_in_progress(true);
 923   if (ShenandoahPacing) {
 924     heap->pacer()->setup_for_updaterefs();
 925   }
 926 }
 927 
 928 void ShenandoahConcurrentGC::op_updaterefs() {
 929   ShenandoahHeap::heap()->update_heap_references(true /*concurrent*/);
 930 }
 931 
 932 class ShenandoahUpdateThreadClosure : public HandshakeClosure {
 933 private:
 934   ShenandoahUpdateRefsClosure _cl;
 935 public:
 936   ShenandoahUpdateThreadClosure();
 937   void do_thread(Thread* thread);
 938 };
 939 
 940 ShenandoahUpdateThreadClosure::ShenandoahUpdateThreadClosure() :
 941   HandshakeClosure("Shenandoah Update Thread Roots") {
 942 }
 943 
 944 void ShenandoahUpdateThreadClosure::do_thread(Thread* thread) {
 945   if (thread->is_Java_thread()) {
 946     JavaThread* jt = JavaThread::cast(thread);
 947     ResourceMark rm;
 948     jt->oops_do(&_cl, nullptr);
 949   }
 950 }
 951 
 952 void ShenandoahConcurrentGC::op_update_thread_roots() {
 953   ShenandoahUpdateThreadClosure cl;
 954   Handshake::execute(&cl);
 955 }
 956 
 957 void ShenandoahConcurrentGC::op_final_updaterefs() {
 958   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 959   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
 960   assert(!heap->_update_refs_iterator.has_next(), "Should have finished update references");
 961 
 962   heap->finish_concurrent_roots();
 963 
 964   // Clear cancelled GC, if set. On cancellation path, the block before would handle
 965   // everything.
 966   if (heap->cancelled_gc()) {
 967     heap->clear_cancelled_gc();
 968   }
 969 
 970   // Has to be done before cset is clear
 971   if (ShenandoahVerify) {
 972     heap->verifier()->verify_roots_in_to_space();
 973   }
 974 
 975   heap->update_heap_region_states(true /*concurrent*/);
 976 
 977   heap->set_update_refs_in_progress(false);
 978   heap->set_has_forwarded_objects(false);
 979 
 980   if (ShenandoahVerify) {
 981     heap->verifier()->verify_after_updaterefs();
 982   }
 983 
 984   if (VerifyAfterGC) {
 985     Universe::verify();
 986   }
 987 
 988   heap->rebuild_free_set(true /*concurrent*/);
 989 }
 990 
 991 void ShenandoahConcurrentGC::op_final_roots() {
 992   ShenandoahHeap::heap()->set_concurrent_weak_root_in_progress(false);
 993 }
 994 
 995 void ShenandoahConcurrentGC::op_cleanup_complete() {
 996   ShenandoahHeap::heap()->free_set()->recycle_trash();
 997 }
 998 
 999 bool ShenandoahConcurrentGC::check_cancellation_and_abort(ShenandoahDegenPoint point) {
1000   if (ShenandoahHeap::heap()->cancelled_gc()) {
1001     _degen_point = point;
1002     return true;
1003   }
1004   return false;
1005 }
1006 
1007 const char* ShenandoahConcurrentGC::init_mark_event_message() const {
1008   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1009   assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here");
1010   if (heap->unload_classes()) {
1011     return "Pause Init Mark (unload classes)";
1012   } else {
1013     return "Pause Init Mark";
1014   }
1015 }
1016 
1017 const char* ShenandoahConcurrentGC::final_mark_event_message() const {
1018   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1019   assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here");
1020   if (heap->unload_classes()) {
1021     return "Pause Final Mark (unload classes)";
1022   } else {
1023     return "Pause Final Mark";
1024   }
1025 }
1026 
1027 const char* ShenandoahConcurrentGC::conc_mark_event_message() const {
1028   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1029   assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here");
1030   if (heap->unload_classes()) {
1031     return "Concurrent marking (unload classes)";
1032   } else {
1033     return "Concurrent marking";
1034   }
1035 }