1 /*
   2  * Copyright (c) 2021, 2022, Red Hat, Inc. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 
  27 #include "gc/shared/barrierSetNMethod.hpp"
  28 #include "gc/shared/collectorCounters.hpp"
  29 #include "gc/shared/continuationGCSupport.inline.hpp"
  30 #include "gc/shenandoah/shenandoahBreakpoint.hpp"
  31 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  32 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
  33 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  34 #include "gc/shenandoah/shenandoahLock.hpp"
  35 #include "gc/shenandoah/shenandoahMark.inline.hpp"
  36 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  37 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  38 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  39 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
  40 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
  41 #include "gc/shenandoah/shenandoahStackWatermark.hpp"
  42 #include "gc/shenandoah/shenandoahUtils.hpp"
  43 #include "gc/shenandoah/shenandoahVerifier.hpp"
  44 #include "gc/shenandoah/shenandoahVMOperations.hpp"
  45 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
  46 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  47 #include "memory/allocation.hpp"
  48 #include "prims/jvmtiTagMap.hpp"
  49 #include "runtime/vmThread.hpp"
  50 #include "utilities/events.hpp"
  51 
  52 // Breakpoint support
  53 class ShenandoahBreakpointGCScope : public StackObj {
  54 private:
  55   const GCCause::Cause _cause;
  56 public:
  57   ShenandoahBreakpointGCScope(GCCause::Cause cause) : _cause(cause) {
  58     if (cause == GCCause::_wb_breakpoint) {
  59       ShenandoahBreakpoint::start_gc();
  60       ShenandoahBreakpoint::at_before_gc();
  61     }
  62   }
  63 
  64   ~ShenandoahBreakpointGCScope() {
  65     if (_cause == GCCause::_wb_breakpoint) {
  66       ShenandoahBreakpoint::at_after_gc();
  67     }
  68   }
  69 };
  70 
  71 class ShenandoahBreakpointMarkScope : public StackObj {
  72 private:
  73   const GCCause::Cause _cause;
  74 public:
  75   ShenandoahBreakpointMarkScope(GCCause::Cause cause) : _cause(cause) {
  76     if (_cause == GCCause::_wb_breakpoint) {
  77       ShenandoahBreakpoint::at_after_marking_started();
  78     }
  79   }
  80 
  81   ~ShenandoahBreakpointMarkScope() {
  82     if (_cause == GCCause::_wb_breakpoint) {
  83       ShenandoahBreakpoint::at_before_marking_completed();
  84     }
  85   }
  86 };
  87 
  88 ShenandoahConcurrentGC::ShenandoahConcurrentGC() :
  89   _mark(),
  90   _degen_point(ShenandoahDegenPoint::_degenerated_unset),
  91   _abbreviated(false) {
  92 }
  93 
  94 ShenandoahGC::ShenandoahDegenPoint ShenandoahConcurrentGC::degen_point() const {
  95   return _degen_point;
  96 }
  97 
  98 void ShenandoahConcurrentGC::cancel() {
  99   ShenandoahConcurrentMark::cancel();
 100 }
 101 
 102 bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) {
 103   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 104   ShenandoahBreakpointGCScope breakpoint_gc_scope(cause);
 105 
 106   // Reset for upcoming marking
 107   entry_reset();
 108 
 109   // Start initial mark under STW
 110   vmop_entry_init_mark();
 111 
 112   {
 113     ShenandoahBreakpointMarkScope breakpoint_mark_scope(cause);
 114     // Concurrent mark roots
 115     entry_mark_roots();
 116     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_outside_cycle)) {
 117       return false;
 118     }
 119 
 120     // Continue concurrent mark
 121     entry_mark();
 122     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_mark)) {
 123       return false;
 124     }
 125   }
 126 
 127   // Complete marking under STW, and start evacuation
 128   vmop_entry_final_mark();
 129 
 130   // Concurrent stack processing
 131   if (heap->is_evacuation_in_progress()) {
 132     entry_thread_roots();
 133   }
 134 
 135   // Process weak roots that might still point to regions that would be broken by cleanup
 136   if (heap->is_concurrent_weak_root_in_progress()) {
 137     entry_weak_refs();
 138     entry_weak_roots();
 139   }
 140 
 141   // Final mark might have reclaimed some immediate garbage, kick cleanup to reclaim
 142   // the space. This would be the last action if there is nothing to evacuate.
 143   entry_cleanup_early();
 144 
 145   {
 146     ShenandoahHeapLocker locker(heap->lock());
 147     heap->free_set()->log_status();
 148   }
 149 
 150   // Perform concurrent class unloading
 151   if (heap->unload_classes() &&
 152       heap->is_concurrent_weak_root_in_progress()) {
 153     entry_class_unloading();
 154   }
 155 
 156   // Processing strong roots
 157   // This may be skipped if there is nothing to update/evacuate.
 158   // If so, strong_root_in_progress would be unset.
 159   if (heap->is_concurrent_strong_root_in_progress()) {
 160     entry_strong_roots();
 161   }
 162 
 163   // Continue the cycle with evacuation and optional update-refs.
 164   // This may be skipped if there is nothing to evacuate.
 165   // If so, evac_in_progress would be unset by collection set preparation code.
 166   if (heap->is_evacuation_in_progress()) {
 167     // Concurrently evacuate
 168     entry_evacuate();
 169     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_evac)) {
 170       return false;
 171     }
 172 
 173     // Perform update-refs phase.
 174     vmop_entry_init_updaterefs();
 175     entry_updaterefs();
 176     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_updaterefs)) {
 177       return false;
 178     }
 179 
 180     // Concurrent update thread roots
 181     entry_update_thread_roots();
 182     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_updaterefs)) {
 183       return false;
 184     }
 185 
 186     vmop_entry_final_updaterefs();
 187 
 188     // Update references freed up collection set, kick the cleanup to reclaim the space.
 189     entry_cleanup_complete();
 190   } else {
 191     vmop_entry_final_roots();
 192     _abbreviated = true;
 193   }
 194 
 195   return true;
 196 }
 197 
 198 void ShenandoahConcurrentGC::vmop_entry_init_mark() {
 199   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 200   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 201   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_mark_gross);
 202 
 203   heap->try_inject_alloc_failure();
 204   VM_ShenandoahInitMark op(this);
 205   VMThread::execute(&op); // jump to entry_init_mark() under safepoint
 206 }
 207 
 208 void ShenandoahConcurrentGC::vmop_entry_final_mark() {
 209   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 210   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 211   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_mark_gross);
 212 
 213   heap->try_inject_alloc_failure();
 214   VM_ShenandoahFinalMarkStartEvac op(this);
 215   VMThread::execute(&op); // jump to entry_final_mark under safepoint
 216 }
 217 
 218 void ShenandoahConcurrentGC::vmop_entry_init_updaterefs() {
 219   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 220   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 221   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_update_refs_gross);
 222 
 223   heap->try_inject_alloc_failure();
 224   VM_ShenandoahInitUpdateRefs op(this);
 225   VMThread::execute(&op);
 226 }
 227 
 228 void ShenandoahConcurrentGC::vmop_entry_final_updaterefs() {
 229   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 230   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 231   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_update_refs_gross);
 232 
 233   heap->try_inject_alloc_failure();
 234   VM_ShenandoahFinalUpdateRefs op(this);
 235   VMThread::execute(&op);
 236 }
 237 
 238 void ShenandoahConcurrentGC::vmop_entry_final_roots() {
 239   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 240   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 241   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_roots_gross);
 242 
 243   // This phase does not use workers, no need for setup
 244   heap->try_inject_alloc_failure();
 245   VM_ShenandoahFinalRoots op(this);
 246   VMThread::execute(&op);
 247 }
 248 
 249 void ShenandoahConcurrentGC::entry_init_mark() {
 250   const char* msg = init_mark_event_message();
 251   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::init_mark);
 252   EventMark em("%s", msg);
 253 
 254   ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
 255                               ShenandoahWorkerPolicy::calc_workers_for_init_marking(),
 256                               "init marking");
 257 
 258   op_init_mark();
 259 }
 260 
 261 void ShenandoahConcurrentGC::entry_final_mark() {
 262   const char* msg = final_mark_event_message();
 263   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_mark);
 264   EventMark em("%s", msg);
 265 
 266   ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
 267                               ShenandoahWorkerPolicy::calc_workers_for_final_marking(),
 268                               "final marking");
 269 
 270   op_final_mark();
 271 }
 272 
 273 void ShenandoahConcurrentGC::entry_init_updaterefs() {
 274   static const char* msg = "Pause Init Update Refs";
 275   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::init_update_refs);
 276   EventMark em("%s", msg);
 277 
 278   // No workers used in this phase, no setup required
 279   op_init_updaterefs();
 280 }
 281 
 282 void ShenandoahConcurrentGC::entry_final_updaterefs() {
 283   static const char* msg = "Pause Final Update Refs";
 284   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_update_refs);
 285   EventMark em("%s", msg);
 286 
 287   ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
 288                               ShenandoahWorkerPolicy::calc_workers_for_final_update_ref(),
 289                               "final reference update");
 290 
 291   op_final_updaterefs();
 292 }
 293 
 294 void ShenandoahConcurrentGC::entry_final_roots() {
 295   static const char* msg = "Pause Final Roots";
 296   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_roots);
 297   EventMark em("%s", msg);
 298 
 299   op_final_roots();
 300 }
 301 
 302 void ShenandoahConcurrentGC::entry_reset() {
 303   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 304   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 305   static const char* msg = "Concurrent reset";
 306   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_reset);
 307   EventMark em("%s", msg);
 308 
 309   ShenandoahWorkerScope scope(heap->workers(),
 310                               ShenandoahWorkerPolicy::calc_workers_for_conc_reset(),
 311                               "concurrent reset");
 312 
 313   heap->try_inject_alloc_failure();
 314   op_reset();
 315 }
 316 
 317 void ShenandoahConcurrentGC::entry_mark_roots() {
 318   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 319   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 320   const char* msg = "Concurrent marking roots";
 321   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_mark_roots);
 322   EventMark em("%s", msg);
 323 
 324   ShenandoahWorkerScope scope(heap->workers(),
 325                               ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
 326                               "concurrent marking roots");
 327 
 328   heap->try_inject_alloc_failure();
 329   op_mark_roots();
 330 }
 331 
 332 void ShenandoahConcurrentGC::entry_mark() {
 333   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 334   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 335   const char* msg = conc_mark_event_message();
 336   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_mark);
 337   EventMark em("%s", msg);
 338 
 339   ShenandoahWorkerScope scope(heap->workers(),
 340                               ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
 341                               "concurrent marking");
 342 
 343   heap->try_inject_alloc_failure();
 344   op_mark();
 345 }
 346 
 347 void ShenandoahConcurrentGC::entry_thread_roots() {
 348   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 349   static const char* msg = "Concurrent thread roots";
 350   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_thread_roots);
 351   EventMark em("%s", msg);
 352 
 353   ShenandoahWorkerScope scope(heap->workers(),
 354                               ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
 355                               msg);
 356 
 357   heap->try_inject_alloc_failure();
 358   op_thread_roots();
 359 }
 360 
 361 void ShenandoahConcurrentGC::entry_weak_refs() {
 362   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 363   static const char* msg = "Concurrent weak references";
 364   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_weak_refs);
 365   EventMark em("%s", msg);
 366 
 367   ShenandoahWorkerScope scope(heap->workers(),
 368                               ShenandoahWorkerPolicy::calc_workers_for_conc_refs_processing(),
 369                               "concurrent weak references");
 370 
 371   heap->try_inject_alloc_failure();
 372   op_weak_refs();
 373 }
 374 
 375 void ShenandoahConcurrentGC::entry_weak_roots() {
 376   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 377   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 378   static const char* msg = "Concurrent weak roots";
 379   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_weak_roots);
 380   EventMark em("%s", msg);
 381 
 382   ShenandoahWorkerScope scope(heap->workers(),
 383                               ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
 384                               "concurrent weak root");
 385 
 386   heap->try_inject_alloc_failure();
 387   op_weak_roots();
 388 }
 389 
 390 void ShenandoahConcurrentGC::entry_class_unloading() {
 391   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 392   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 393   static const char* msg = "Concurrent class unloading";
 394   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_class_unload);
 395   EventMark em("%s", msg);
 396 
 397   ShenandoahWorkerScope scope(heap->workers(),
 398                               ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
 399                               "concurrent class unloading");
 400 
 401   heap->try_inject_alloc_failure();
 402   op_class_unloading();
 403 }
 404 
 405 void ShenandoahConcurrentGC::entry_strong_roots() {
 406   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 407   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 408   static const char* msg = "Concurrent strong roots";
 409   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_strong_roots);
 410   EventMark em("%s", msg);
 411 
 412   ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_strong_roots);
 413 
 414   ShenandoahWorkerScope scope(heap->workers(),
 415                               ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
 416                               "concurrent strong root");
 417 
 418   heap->try_inject_alloc_failure();
 419   op_strong_roots();
 420 }
 421 
 422 void ShenandoahConcurrentGC::entry_cleanup_early() {
 423   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 424   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 425   static const char* msg = "Concurrent cleanup";
 426   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_cleanup_early, true /* log_heap_usage */);
 427   EventMark em("%s", msg);
 428 
 429   // This phase does not use workers, no need for setup
 430   heap->try_inject_alloc_failure();
 431   op_cleanup_early();
 432 }
 433 
 434 void ShenandoahConcurrentGC::entry_evacuate() {
 435   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 436   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 437 
 438   static const char* msg = "Concurrent evacuation";
 439   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_evac);
 440   EventMark em("%s", msg);
 441 
 442   ShenandoahWorkerScope scope(heap->workers(),
 443                               ShenandoahWorkerPolicy::calc_workers_for_conc_evac(),
 444                               "concurrent evacuation");
 445 
 446   heap->try_inject_alloc_failure();
 447   op_evacuate();
 448 }
 449 
 450 void ShenandoahConcurrentGC::entry_update_thread_roots() {
 451   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 452   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 453 
 454   static const char* msg = "Concurrent update thread roots";
 455   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_update_thread_roots);
 456   EventMark em("%s", msg);
 457 
 458   // No workers used in this phase, no setup required
 459   heap->try_inject_alloc_failure();
 460   op_update_thread_roots();
 461 }
 462 
 463 void ShenandoahConcurrentGC::entry_updaterefs() {
 464   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 465   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 466   static const char* msg = "Concurrent update references";
 467   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_update_refs);
 468   EventMark em("%s", msg);
 469 
 470   ShenandoahWorkerScope scope(heap->workers(),
 471                               ShenandoahWorkerPolicy::calc_workers_for_conc_update_ref(),
 472                               "concurrent reference update");
 473 
 474   heap->try_inject_alloc_failure();
 475   op_updaterefs();
 476 }
 477 
 478 void ShenandoahConcurrentGC::entry_cleanup_complete() {
 479   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 480   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 481   static const char* msg = "Concurrent cleanup";
 482   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_cleanup_complete, true /* log_heap_usage */);
 483   EventMark em("%s", msg);
 484 
 485   // This phase does not use workers, no need for setup
 486   heap->try_inject_alloc_failure();
 487   op_cleanup_complete();
 488 }
 489 
 490 void ShenandoahConcurrentGC::op_reset() {
 491   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 492   if (ShenandoahPacing) {
 493     heap->pacer()->setup_for_reset();
 494   }
 495 
 496   heap->prepare_gc();
 497 }
 498 
 499 class ShenandoahInitMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
 500 private:
 501   ShenandoahMarkingContext* const _ctx;
 502 public:
 503   ShenandoahInitMarkUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
 504 
 505   void heap_region_do(ShenandoahHeapRegion* r) {
 506     assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index());
 507     if (r->is_active()) {
 508       // Check if region needs updating its TAMS. We have updated it already during concurrent
 509       // reset, so it is very likely we don't need to do another write here.
 510       if (_ctx->top_at_mark_start(r) != r->top()) {
 511         _ctx->capture_top_at_mark_start(r);
 512       }
 513     } else {
 514       assert(_ctx->top_at_mark_start(r) == r->top(),
 515              "Region " SIZE_FORMAT " should already have correct TAMS", r->index());
 516     }
 517   }
 518 
 519   bool is_thread_safe() { return true; }
 520 };
 521 
 522 void ShenandoahConcurrentGC::start_mark() {
 523   _mark.start_mark();
 524 }
 525 
 526 void ShenandoahConcurrentGC::op_init_mark() {
 527   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 528   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
 529   assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
 530 
 531   assert(heap->marking_context()->is_bitmap_clear(), "need clear marking bitmap");
 532   assert(!heap->marking_context()->is_complete(), "should not be complete");
 533   assert(!heap->has_forwarded_objects(), "No forwarded objects on this path");
 534 
 535   if (ShenandoahVerify) {
 536     heap->verifier()->verify_before_concmark();
 537   }
 538 
 539   if (VerifyBeforeGC) {
 540     Universe::verify();
 541   }
 542 
 543   heap->set_concurrent_mark_in_progress(true);
 544 
 545   start_mark();
 546 
 547   {
 548     ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states);
 549     ShenandoahInitMarkUpdateRegionStateClosure cl;
 550     heap->parallel_heap_region_iterate(&cl);
 551   }
 552 
 553   // Weak reference processing
 554   ShenandoahReferenceProcessor* rp = heap->ref_processor();
 555   rp->reset_thread_locals();
 556   rp->set_soft_reference_policy(heap->soft_ref_policy()->should_clear_all_soft_refs());
 557 
 558   // Make above changes visible to worker threads
 559   OrderAccess::fence();
 560 
 561   // Arm nmethods for concurrent mark
 562   ShenandoahCodeRoots::arm_nmethods_for_mark();
 563 
 564   ShenandoahStackWatermark::change_epoch_id();
 565   if (ShenandoahPacing) {
 566     heap->pacer()->setup_for_mark();
 567   }
 568 }
 569 
 570 void ShenandoahConcurrentGC::op_mark_roots() {
 571   _mark.mark_concurrent_roots();
 572 }
 573 
 574 void ShenandoahConcurrentGC::op_mark() {
 575   _mark.concurrent_mark();
 576 }
 577 
 578 void ShenandoahConcurrentGC::op_final_mark() {
 579   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 580   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
 581   assert(!heap->has_forwarded_objects(), "No forwarded objects on this path");
 582 
 583   if (ShenandoahVerify) {
 584     heap->verifier()->verify_roots_no_forwarded();
 585   }
 586 
 587   if (!heap->cancelled_gc()) {
 588     _mark.finish_mark();
 589     assert(!heap->cancelled_gc(), "STW mark cannot OOM");
 590 
 591     // Notify JVMTI that the tagmap table will need cleaning.
 592     JvmtiTagMap::set_needs_cleaning();
 593 
 594     heap->prepare_regions_and_collection_set(true /*concurrent*/);
 595 
 596     // Has to be done after cset selection
 597     heap->prepare_concurrent_roots();
 598 
 599     if (!heap->collection_set()->is_empty()) {
 600       if (ShenandoahVerify) {
 601         heap->verifier()->verify_before_evacuation();
 602       }
 603 
 604       heap->set_evacuation_in_progress(true);
 605       // From here on, we need to update references.
 606       heap->set_has_forwarded_objects(true);
 607 
 608       // Verify before arming for concurrent processing.
 609       // Otherwise, verification can trigger stack processing.
 610       if (ShenandoahVerify) {
 611         heap->verifier()->verify_during_evacuation();
 612       }
 613 
 614       // Arm nmethods/stack for concurrent processing
 615       ShenandoahCodeRoots::arm_nmethods_for_evac();
 616       ShenandoahStackWatermark::change_epoch_id();
 617 
 618       if (ShenandoahPacing) {
 619         heap->pacer()->setup_for_evac();
 620       }
 621     } else {
 622       if (ShenandoahVerify) {
 623         heap->verifier()->verify_after_concmark();
 624       }
 625 
 626       if (VerifyAfterGC) {
 627         Universe::verify();
 628       }
 629     }
 630   }
 631 }
 632 
 633 class ShenandoahConcurrentEvacThreadClosure : public ThreadClosure {
 634 private:
 635   OopClosure* const _oops;
 636 
 637 public:
 638   ShenandoahConcurrentEvacThreadClosure(OopClosure* oops);
 639   void do_thread(Thread* thread);
 640 };
 641 
 642 ShenandoahConcurrentEvacThreadClosure::ShenandoahConcurrentEvacThreadClosure(OopClosure* oops) :
 643   _oops(oops) {
 644 }
 645 
 646 void ShenandoahConcurrentEvacThreadClosure::do_thread(Thread* thread) {
 647   JavaThread* const jt = JavaThread::cast(thread);
 648   StackWatermarkSet::finish_processing(jt, _oops, StackWatermarkKind::gc);
 649 }
 650 
 651 class ShenandoahConcurrentEvacUpdateThreadTask : public WorkerTask {
 652 private:
 653   ShenandoahJavaThreadsIterator _java_threads;
 654 
 655 public:
 656   ShenandoahConcurrentEvacUpdateThreadTask(uint n_workers) :
 657     WorkerTask("Shenandoah Evacuate/Update Concurrent Thread Roots"),
 658     _java_threads(ShenandoahPhaseTimings::conc_thread_roots, n_workers) {
 659   }
 660 
 661   void work(uint worker_id) {
 662     // ShenandoahEvacOOMScope has to be setup by ShenandoahContextEvacuateUpdateRootsClosure.
 663     // Otherwise, may deadlock with watermark lock
 664     ShenandoahContextEvacuateUpdateRootsClosure oops_cl;
 665     ShenandoahConcurrentEvacThreadClosure thr_cl(&oops_cl);
 666     _java_threads.threads_do(&thr_cl, worker_id);
 667   }
 668 };
 669 
 670 void ShenandoahConcurrentGC::op_thread_roots() {
 671   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 672   assert(heap->is_evacuation_in_progress(), "Checked by caller");
 673   ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_thread_roots);
 674   ShenandoahConcurrentEvacUpdateThreadTask task(heap->workers()->active_workers());
 675   heap->workers()->run_task(&task);
 676 }
 677 
 678 void ShenandoahConcurrentGC::op_weak_refs() {
 679   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 680   assert(heap->is_concurrent_weak_root_in_progress(), "Only during this phase");
 681   // Concurrent weak refs processing
 682   ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_refs);
 683   if (heap->gc_cause() == GCCause::_wb_breakpoint) {
 684     ShenandoahBreakpoint::at_after_reference_processing_started();
 685   }
 686   heap->ref_processor()->process_references(ShenandoahPhaseTimings::conc_weak_refs, heap->workers(), true /* concurrent */);
 687 }
 688 
 689 class ShenandoahEvacUpdateCleanupOopStorageRootsClosure : public BasicOopIterateClosure {
 690 private:
 691   ShenandoahHeap* const _heap;
 692   ShenandoahMarkingContext* const _mark_context;
 693   bool  _evac_in_progress;
 694   Thread* const _thread;
 695 
 696 public:
 697   ShenandoahEvacUpdateCleanupOopStorageRootsClosure();
 698   void do_oop(oop* p);
 699   void do_oop(narrowOop* p);
 700 };
 701 
 702 ShenandoahEvacUpdateCleanupOopStorageRootsClosure::ShenandoahEvacUpdateCleanupOopStorageRootsClosure() :
 703   _heap(ShenandoahHeap::heap()),
 704   _mark_context(ShenandoahHeap::heap()->marking_context()),
 705   _evac_in_progress(ShenandoahHeap::heap()->is_evacuation_in_progress()),
 706   _thread(Thread::current()) {
 707 }
 708 
 709 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(oop* p) {
 710   const oop obj = RawAccess<>::oop_load(p);
 711   if (!CompressedOops::is_null(obj)) {
 712     if (!_mark_context->is_marked(obj)) {
 713       shenandoah_assert_correct(p, obj);
 714       ShenandoahHeap::atomic_clear_oop(p, obj);
 715     } else if (_evac_in_progress && _heap->in_collection_set(obj)) {
 716       oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
 717       if (resolved == obj) {
 718         resolved = _heap->evacuate_object(obj, _thread);
 719       }
 720       shenandoah_assert_not_in_cset_except(p, resolved, _heap->cancelled_gc());
 721       ShenandoahHeap::atomic_update_oop(resolved, p, obj);
 722     }
 723   }
 724 }
 725 
 726 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(narrowOop* p) {
 727   ShouldNotReachHere();
 728 }
 729 
 730 class ShenandoahIsCLDAliveClosure : public CLDClosure {
 731 public:
 732   void do_cld(ClassLoaderData* cld) {
 733     cld->is_alive();
 734   }
 735 };
 736 
 737 class ShenandoahIsNMethodAliveClosure: public NMethodClosure {
 738 public:
 739   void do_nmethod(nmethod* n) {
 740     n->is_unloading();
 741   }
 742 };
 743 
 744 // This task not only evacuates/updates marked weak roots, but also "null"
 745 // dead weak roots.
 746 class ShenandoahConcurrentWeakRootsEvacUpdateTask : public WorkerTask {
 747 private:
 748   ShenandoahVMWeakRoots<true /*concurrent*/> _vm_roots;
 749 
 750   // Roots related to concurrent class unloading
 751   ShenandoahClassLoaderDataRoots<true /* concurrent */>
 752                                              _cld_roots;
 753   ShenandoahConcurrentNMethodIterator        _nmethod_itr;
 754   ShenandoahPhaseTimings::Phase              _phase;
 755 
 756 public:
 757   ShenandoahConcurrentWeakRootsEvacUpdateTask(ShenandoahPhaseTimings::Phase phase) :
 758     WorkerTask("Shenandoah Evacuate/Update Concurrent Weak Roots"),
 759     _vm_roots(phase),
 760     _cld_roots(phase, ShenandoahHeap::heap()->workers()->active_workers(), false /*heap iteration*/),
 761     _nmethod_itr(ShenandoahCodeRoots::table()),
 762     _phase(phase) {
 763     if (ShenandoahHeap::heap()->unload_classes()) {
 764       MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 765       _nmethod_itr.nmethods_do_begin();
 766     }
 767   }
 768 
 769   ~ShenandoahConcurrentWeakRootsEvacUpdateTask() {
 770     if (ShenandoahHeap::heap()->unload_classes()) {
 771       MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 772       _nmethod_itr.nmethods_do_end();
 773     }
 774     // Notify runtime data structures of potentially dead oops
 775     _vm_roots.report_num_dead();
 776   }
 777 
 778   void work(uint worker_id) {
 779     ShenandoahConcurrentWorkerSession worker_session(worker_id);
 780     ShenandoahSuspendibleThreadSetJoiner sts_join;
 781     {
 782       ShenandoahEvacOOMScope oom;
 783       // jni_roots and weak_roots are OopStorage backed roots, concurrent iteration
 784       // may race against OopStorage::release() calls.
 785       ShenandoahEvacUpdateCleanupOopStorageRootsClosure cl;
 786       _vm_roots.oops_do(&cl, worker_id);
 787     }
 788 
 789     // If we are going to perform concurrent class unloading later on, we need to
 790     // cleanup the weak oops in CLD and determinate nmethod's unloading state, so that we
 791     // can cleanup immediate garbage sooner.
 792     if (ShenandoahHeap::heap()->unload_classes()) {
 793       // Applies ShenandoahIsCLDAlive closure to CLDs, native barrier will either null the
 794       // CLD's holder or evacuate it.
 795       {
 796         ShenandoahIsCLDAliveClosure is_cld_alive;
 797         _cld_roots.cld_do(&is_cld_alive, worker_id);
 798       }
 799 
 800       // Applies ShenandoahIsNMethodAliveClosure to registered nmethods.
 801       // The closure calls nmethod->is_unloading(). The is_unloading
 802       // state is cached, therefore, during concurrent class unloading phase,
 803       // we will not touch the metadata of unloading nmethods
 804       {
 805         ShenandoahWorkerTimingsTracker timer(_phase, ShenandoahPhaseTimings::CodeCacheRoots, worker_id);
 806         ShenandoahIsNMethodAliveClosure is_nmethod_alive;
 807         _nmethod_itr.nmethods_do(&is_nmethod_alive);
 808       }
 809     }
 810   }
 811 };
 812 
 813 void ShenandoahConcurrentGC::op_weak_roots() {
 814   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 815   assert(heap->is_concurrent_weak_root_in_progress(), "Only during this phase");
 816   // Concurrent weak root processing
 817   {
 818     ShenandoahTimingsTracker t(ShenandoahPhaseTimings::conc_weak_roots_work);
 819     ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_roots_work);
 820     ShenandoahConcurrentWeakRootsEvacUpdateTask task(ShenandoahPhaseTimings::conc_weak_roots_work);
 821     heap->workers()->run_task(&task);
 822   }
 823 
 824   // Perform handshake to flush out dead oops
 825   {
 826     ShenandoahTimingsTracker t(ShenandoahPhaseTimings::conc_weak_roots_rendezvous);
 827     heap->rendezvous_threads();
 828   }
 829 }
 830 
 831 void ShenandoahConcurrentGC::op_class_unloading() {
 832   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 833   assert (heap->is_concurrent_weak_root_in_progress() &&
 834           heap->unload_classes(),
 835           "Checked by caller");
 836   heap->do_class_unloading();
 837 }
 838 
 839 class ShenandoahEvacUpdateCodeCacheClosure : public NMethodClosure {
 840 private:
 841   BarrierSetNMethod* const                  _bs;
 842   ShenandoahEvacuateUpdateMetadataClosure   _cl;
 843 
 844 public:
 845   ShenandoahEvacUpdateCodeCacheClosure() :
 846     _bs(BarrierSet::barrier_set()->barrier_set_nmethod()),
 847     _cl() {
 848   }
 849 
 850   void do_nmethod(nmethod* n) {
 851     ShenandoahNMethod* data = ShenandoahNMethod::gc_data(n);
 852     ShenandoahReentrantLocker locker(data->lock());
 853     // Setup EvacOOM scope below reentrant lock to avoid deadlock with
 854     // nmethod_entry_barrier
 855     ShenandoahEvacOOMScope oom;
 856     data->oops_do(&_cl, true/*fix relocation*/);
 857     _bs->disarm(n);
 858   }
 859 };
 860 
 861 class ShenandoahConcurrentRootsEvacUpdateTask : public WorkerTask {
 862 private:
 863   ShenandoahPhaseTimings::Phase                 _phase;
 864   ShenandoahVMRoots<true /*concurrent*/>        _vm_roots;
 865   ShenandoahClassLoaderDataRoots<true /*concurrent*/>
 866                                                 _cld_roots;
 867   ShenandoahConcurrentNMethodIterator           _nmethod_itr;
 868 
 869 public:
 870   ShenandoahConcurrentRootsEvacUpdateTask(ShenandoahPhaseTimings::Phase phase) :
 871     WorkerTask("Shenandoah Evacuate/Update Concurrent Strong Roots"),
 872     _phase(phase),
 873     _vm_roots(phase),
 874     _cld_roots(phase, ShenandoahHeap::heap()->workers()->active_workers(), false /*heap iteration*/),
 875     _nmethod_itr(ShenandoahCodeRoots::table()) {
 876     if (!ShenandoahHeap::heap()->unload_classes()) {
 877       MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 878       _nmethod_itr.nmethods_do_begin();
 879     }
 880   }
 881 
 882   ~ShenandoahConcurrentRootsEvacUpdateTask() {
 883     if (!ShenandoahHeap::heap()->unload_classes()) {
 884       MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 885       _nmethod_itr.nmethods_do_end();
 886     }
 887   }
 888 
 889   void work(uint worker_id) {
 890     ShenandoahConcurrentWorkerSession worker_session(worker_id);
 891     {
 892       ShenandoahEvacOOMScope oom;
 893       {
 894         // vm_roots and weak_roots are OopStorage backed roots, concurrent iteration
 895         // may race against OopStorage::release() calls.
 896         ShenandoahContextEvacuateUpdateRootsClosure cl;
 897         _vm_roots.oops_do<ShenandoahContextEvacuateUpdateRootsClosure>(&cl, worker_id);
 898       }
 899 
 900       {
 901         ShenandoahEvacuateUpdateMetadataClosure cl;
 902         CLDToOopClosure clds(&cl, ClassLoaderData::_claim_strong);
 903         _cld_roots.cld_do(&clds, worker_id);
 904       }
 905     }
 906 
 907     // Cannot setup ShenandoahEvacOOMScope here, due to potential deadlock with nmethod_entry_barrier.
 908     if (!ShenandoahHeap::heap()->unload_classes()) {
 909       ShenandoahWorkerTimingsTracker timer(_phase, ShenandoahPhaseTimings::CodeCacheRoots, worker_id);
 910       ShenandoahEvacUpdateCodeCacheClosure cl;
 911       _nmethod_itr.nmethods_do(&cl);
 912     }
 913   }
 914 };
 915 
 916 void ShenandoahConcurrentGC::op_strong_roots() {
 917   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 918   assert(heap->is_concurrent_strong_root_in_progress(), "Checked by caller");
 919   ShenandoahConcurrentRootsEvacUpdateTask task(ShenandoahPhaseTimings::conc_strong_roots);
 920   heap->workers()->run_task(&task);
 921   heap->set_concurrent_strong_root_in_progress(false);
 922 }
 923 
 924 void ShenandoahConcurrentGC::op_cleanup_early() {
 925   ShenandoahHeap::heap()->free_set()->recycle_trash();
 926 }
 927 
 928 void ShenandoahConcurrentGC::op_evacuate() {
 929   ShenandoahHeap::heap()->evacuate_collection_set(true /*concurrent*/);
 930 }
 931 
 932 void ShenandoahConcurrentGC::op_init_updaterefs() {
 933   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 934   heap->set_evacuation_in_progress(false);
 935   heap->set_concurrent_weak_root_in_progress(false);
 936   heap->prepare_update_heap_references(true /*concurrent*/);
 937   heap->set_update_refs_in_progress(true);
 938 
 939   if (ShenandoahPacing) {
 940     heap->pacer()->setup_for_updaterefs();
 941   }
 942 }
 943 
 944 void ShenandoahConcurrentGC::op_updaterefs() {
 945   ShenandoahHeap::heap()->update_heap_references(true /*concurrent*/);
 946 }
 947 
 948 class ShenandoahUpdateThreadClosure : public HandshakeClosure {
 949 private:
 950   ShenandoahUpdateRefsClosure _cl;
 951 public:
 952   ShenandoahUpdateThreadClosure();
 953   void do_thread(Thread* thread);
 954 };
 955 
 956 ShenandoahUpdateThreadClosure::ShenandoahUpdateThreadClosure() :
 957   HandshakeClosure("Shenandoah Update Thread Roots") {
 958 }
 959 
 960 void ShenandoahUpdateThreadClosure::do_thread(Thread* thread) {
 961   if (thread->is_Java_thread()) {
 962     JavaThread* jt = JavaThread::cast(thread);
 963     ResourceMark rm;
 964     jt->oops_do(&_cl, nullptr);
 965   }
 966 }
 967 
 968 void ShenandoahConcurrentGC::op_update_thread_roots() {
 969   ShenandoahUpdateThreadClosure cl;
 970   Handshake::execute(&cl);
 971 }
 972 
 973 void ShenandoahConcurrentGC::op_final_updaterefs() {
 974   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 975   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
 976   assert(!heap->_update_refs_iterator.has_next(), "Should have finished update references");
 977 
 978   heap->finish_concurrent_roots();
 979 
 980   // Clear cancelled GC, if set. On cancellation path, the block before would handle
 981   // everything.
 982   if (heap->cancelled_gc()) {
 983     heap->clear_cancelled_gc();
 984   }
 985 
 986   // Has to be done before cset is clear
 987   if (ShenandoahVerify) {
 988     heap->verifier()->verify_roots_in_to_space();
 989   }
 990 
 991   heap->update_heap_region_states(true /*concurrent*/);
 992 
 993   heap->set_update_refs_in_progress(false);
 994   heap->set_has_forwarded_objects(false);
 995 
 996   if (ShenandoahVerify) {
 997     heap->verifier()->verify_after_updaterefs();
 998   }
 999 
1000   if (VerifyAfterGC) {
1001     Universe::verify();
1002   }
1003 
1004   heap->rebuild_free_set(true /*concurrent*/);
1005 }
1006 
1007 void ShenandoahConcurrentGC::op_final_roots() {
1008   ShenandoahHeap::heap()->set_concurrent_weak_root_in_progress(false);
1009 }
1010 
1011 void ShenandoahConcurrentGC::op_cleanup_complete() {
1012   ShenandoahHeap::heap()->free_set()->recycle_trash();
1013 }
1014 
1015 bool ShenandoahConcurrentGC::check_cancellation_and_abort(ShenandoahDegenPoint point) {
1016   if (ShenandoahHeap::heap()->cancelled_gc()) {
1017     _degen_point = point;
1018     return true;
1019   }
1020   return false;
1021 }
1022 
1023 const char* ShenandoahConcurrentGC::init_mark_event_message() const {
1024   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1025   assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here");
1026   if (heap->unload_classes()) {
1027     return "Pause Init Mark (unload classes)";
1028   } else {
1029     return "Pause Init Mark";
1030   }
1031 }
1032 
1033 const char* ShenandoahConcurrentGC::final_mark_event_message() const {
1034   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1035   assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here");
1036   if (heap->unload_classes()) {
1037     return "Pause Final Mark (unload classes)";
1038   } else {
1039     return "Pause Final Mark";
1040   }
1041 }
1042 
1043 const char* ShenandoahConcurrentGC::conc_mark_event_message() const {
1044   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1045   assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here");
1046   if (heap->unload_classes()) {
1047     return "Concurrent marking (unload classes)";
1048   } else {
1049     return "Concurrent marking";
1050   }
1051 }