1 /*
   2  * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2021, 2022, Red Hat, Inc. All rights reserved.
   4  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
   5  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   6  *
   7  * This code is free software; you can redistribute it and/or modify it
   8  * under the terms of the GNU General Public License version 2 only, as
   9  * published by the Free Software Foundation.
  10  *
  11  * This code is distributed in the hope that it will be useful, but WITHOUT
  12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  14  * version 2 for more details (a copy is included in the LICENSE file that
  15  * accompanied this code).
  16  *
  17  * You should have received a copy of the GNU General Public License version
  18  * 2 along with this work; if not, write to the Free Software Foundation,
  19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20  *
  21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22  * or visit www.oracle.com if you need additional information or have any
  23  * questions.
  24  *
  25  */
  26 
  27 #include "precompiled.hpp"
  28 
  29 #include "gc/shared/barrierSetNMethod.hpp"
  30 #include "gc/shared/collectorCounters.hpp"
  31 #include "gc/shared/continuationGCSupport.inline.hpp"
  32 #include "gc/shenandoah/shenandoahBreakpoint.hpp"
  33 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
  34 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  35 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
  36 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  37 #include "gc/shenandoah/shenandoahGeneration.hpp"
  38 #include "gc/shenandoah/shenandoahGenerationalHeap.hpp"
  39 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
  40 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
  41 #include "gc/shenandoah/shenandoahLock.hpp"
  42 #include "gc/shenandoah/shenandoahMark.inline.hpp"
  43 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  44 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  45 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
  46 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
  47 #include "gc/shenandoah/shenandoahStackWatermark.hpp"
  48 #include "gc/shenandoah/shenandoahUtils.hpp"
  49 #include "gc/shenandoah/shenandoahVerifier.hpp"
  50 #include "gc/shenandoah/shenandoahVMOperations.hpp"
  51 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
  52 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  53 #include "memory/allocation.hpp"
  54 #include "prims/jvmtiTagMap.hpp"
  55 #include "runtime/vmThread.hpp"
  56 #include "utilities/events.hpp"
  57 
  58 // Breakpoint support
  59 class ShenandoahBreakpointGCScope : public StackObj {
  60 private:
  61   const GCCause::Cause _cause;
  62 public:
  63   ShenandoahBreakpointGCScope(GCCause::Cause cause) : _cause(cause) {
  64     if (cause == GCCause::_wb_breakpoint) {
  65       ShenandoahBreakpoint::start_gc();
  66       ShenandoahBreakpoint::at_before_gc();
  67     }
  68   }
  69 
  70   ~ShenandoahBreakpointGCScope() {
  71     if (_cause == GCCause::_wb_breakpoint) {
  72       ShenandoahBreakpoint::at_after_gc();
  73     }
  74   }
  75 };
  76 
  77 class ShenandoahBreakpointMarkScope : public StackObj {
  78 private:
  79   const GCCause::Cause _cause;
  80 public:
  81   ShenandoahBreakpointMarkScope(GCCause::Cause cause) : _cause(cause) {
  82     if (_cause == GCCause::_wb_breakpoint) {
  83       ShenandoahBreakpoint::at_after_marking_started();
  84     }
  85   }
  86 
  87   ~ShenandoahBreakpointMarkScope() {
  88     if (_cause == GCCause::_wb_breakpoint) {
  89       ShenandoahBreakpoint::at_before_marking_completed();
  90     }
  91   }
  92 };
  93 
  94 ShenandoahConcurrentGC::ShenandoahConcurrentGC(ShenandoahGeneration* generation, bool do_old_gc_bootstrap) :
  95   _mark(generation),
  96   _generation(generation),
  97   _degen_point(ShenandoahDegenPoint::_degenerated_unset),
  98   _abbreviated(false),
  99   _do_old_gc_bootstrap(do_old_gc_bootstrap) {
 100 }
 101 
 102 ShenandoahGC::ShenandoahDegenPoint ShenandoahConcurrentGC::degen_point() const {
 103   return _degen_point;
 104 }
 105 
 106 void ShenandoahConcurrentGC::entry_concurrent_update_refs_prepare(ShenandoahHeap* const heap) {
 107   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 108   const char* msg = conc_init_update_refs_event_message();
 109   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_update_refs_prepare);
 110   EventMark em("%s", msg);
 111 
 112   // Evacuation is complete, retire gc labs and change gc state
 113   heap->concurrent_prepare_for_update_refs();
 114 }
 115 
 116 bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) {
 117   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 118 
 119   ShenandoahBreakpointGCScope breakpoint_gc_scope(cause);
 120 
 121   // Reset for upcoming marking
 122   entry_reset();
 123 
 124   // Start initial mark under STW
 125   vmop_entry_init_mark();
 126 
 127   {
 128     ShenandoahBreakpointMarkScope breakpoint_mark_scope(cause);
 129 
 130     // Reset task queue stats here, rather than in mark_concurrent_roots,
 131     // because remembered set scan will `push` oops into the queues and
 132     // resetting after this happens will lose those counts.
 133     TASKQUEUE_STATS_ONLY(_mark.task_queues()->reset_taskqueue_stats());
 134 
 135     // Concurrent remembered set scanning
 136     entry_scan_remembered_set();
 137 
 138     // Concurrent mark roots
 139     entry_mark_roots();
 140     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_roots)) {
 141       return false;
 142     }
 143 
 144     // Continue concurrent mark
 145     entry_mark();
 146     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_mark)) {
 147       return false;
 148     }
 149   }
 150 
 151   // Complete marking under STW, and start evacuation
 152   vmop_entry_final_mark();
 153 
 154   // If the GC was cancelled before final mark, nothing happens on the safepoint. We are still
 155   // in the marking phase and must resume the degenerated cycle from there. If the GC was cancelled
 156   // after final mark, then we've entered the evacuation phase and must resume the degenerated cycle
 157   // from that phase.
 158   if (_generation->is_concurrent_mark_in_progress()) {
 159     bool cancelled = check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_mark);
 160     assert(cancelled, "GC must have been cancelled between concurrent and final mark");
 161     return false;
 162   }
 163 
 164   assert(heap->is_concurrent_weak_root_in_progress(), "Must be doing weak roots now");
 165 
 166   // Concurrent stack processing
 167   if (heap->is_evacuation_in_progress()) {
 168     entry_thread_roots();
 169   }
 170 
 171   // Process weak roots that might still point to regions that would be broken by cleanup.
 172   // We cannot recycle regions because weak roots need to know what is marked in trashed regions.
 173   entry_weak_refs();
 174   entry_weak_roots();
 175 
 176   // Perform concurrent class unloading before any regions get recycled. Class unloading may
 177   // need to inspect unmarked objects in trashed regions.
 178   if (heap->unload_classes()) {
 179     entry_class_unloading();
 180   }
 181 
 182   // Final mark might have reclaimed some immediate garbage, kick cleanup to reclaim
 183   // the space. This would be the last action if there is nothing to evacuate.  Note that
 184   // we will not age young-gen objects in the case that we skip evacuation.
 185   entry_cleanup_early();
 186 
 187   heap->free_set()->log_status_under_lock();
 188 
 189   // Processing strong roots
 190   // This may be skipped if there is nothing to update/evacuate.
 191   // If so, strong_root_in_progress would be unset.
 192   if (heap->is_concurrent_strong_root_in_progress()) {
 193     entry_strong_roots();
 194   }
 195 
 196   // Continue the cycle with evacuation and optional update-refs.
 197   // This may be skipped if there is nothing to evacuate.
 198   // If so, evac_in_progress would be unset by collection set preparation code.
 199   if (heap->is_evacuation_in_progress()) {
 200     // Concurrently evacuate
 201     entry_evacuate();
 202     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_evac)) {
 203       return false;
 204     }
 205 
 206     entry_concurrent_update_refs_prepare(heap);
 207 
 208     // Perform update-refs phase.
 209     if (ShenandoahVerify || ShenandoahPacing) {
 210       vmop_entry_init_update_refs();
 211     }
 212 
 213     entry_update_refs();
 214     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_update_refs)) {
 215       return false;
 216     }
 217 
 218     // Concurrent update thread roots
 219     entry_update_thread_roots();
 220     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_update_refs)) {
 221       return false;
 222     }
 223 
 224     vmop_entry_final_update_refs();
 225 
 226     // Update references freed up collection set, kick the cleanup to reclaim the space.
 227     entry_cleanup_complete();
 228   } else {
 229     if (!entry_final_roots()) {
 230       assert(_degen_point != _degenerated_unset, "Need to know where to start degenerated cycle");
 231       return false;
 232     }
 233 
 234     if (VerifyAfterGC) {
 235       vmop_entry_verify_final_roots();
 236     }
 237     _abbreviated = true;
 238   }
 239 
 240   // We defer generation resizing actions until after cset regions have been recycled.  We do this even following an
 241   // abbreviated cycle.
 242   if (heap->mode()->is_generational()) {
 243     ShenandoahGenerationalHeap::heap()->complete_concurrent_cycle();
 244   }
 245 
 246   // Instead of always resetting immediately before the start of a new GC, we can often reset at the end of the
 247   // previous GC. This allows us to start the next GC cycle more quickly after a trigger condition is detected,
 248   // reducing the likelihood that GC will degenerate.
 249   entry_reset_after_collect();
 250 
 251   return true;
 252 }
 253 
 254 bool ShenandoahConcurrentGC::complete_abbreviated_cycle() {
 255   shenandoah_assert_generational();
 256 
 257   ShenandoahGenerationalHeap* const heap = ShenandoahGenerationalHeap::heap();
 258 
 259   // We chose not to evacuate because we found sufficient immediate garbage.
 260   // However, there may still be regions to promote in place, so do that now.
 261   if (heap->old_generation()->has_in_place_promotions()) {
 262     entry_promote_in_place();
 263 
 264     // If the promote-in-place operation was cancelled, we can have the degenerated
 265     // cycle complete the operation. It will see that no evacuations are in progress,
 266     // and that there are regions wanting promotion. The risk with not handling the
 267     // cancellation would be failing to restore top for these regions and leaving
 268     // them unable to serve allocations for the old generation.This will leave the weak
 269     // roots flag set (the degenerated cycle will unset it).
 270     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_evac)) {
 271       return false;
 272     }
 273   }
 274 
 275   // At this point, the cycle is effectively complete. If the cycle has been cancelled here,
 276   // the control thread will detect it on its next iteration and run a degenerated young cycle.
 277   if (!_generation->is_old()) {
 278     heap->update_region_ages(_generation->complete_marking_context());
 279   }
 280 
 281   if (!heap->is_concurrent_old_mark_in_progress()) {
 282     heap->concurrent_final_roots();
 283   } else {
 284     // Since the cycle was shortened for having enough immediate garbage, this will be
 285     // the last phase before concurrent marking of old resumes. We must be sure
 286     // that old mark threads don't see any pointers to garbage in the SATB queues. Even
 287     // though nothing was evacuated, overwriting unreachable weak roots with null may still
 288     // put pointers to regions that become trash in the SATB queues. The following will
 289     // piggyback flushing the thread local SATB queues on the same handshake that propagates
 290     // the gc state change.
 291     ShenandoahSATBMarkQueueSet& satb_queues = ShenandoahBarrierSet::satb_mark_queue_set();
 292     ShenandoahFlushSATBHandshakeClosure complete_thread_local_satb_buffers(satb_queues);
 293     heap->concurrent_final_roots(&complete_thread_local_satb_buffers);
 294     heap->old_generation()->concurrent_transfer_pointers_from_satb();
 295   }
 296   return true;
 297 }
 298 
 299 
 300 void ShenandoahConcurrentGC::vmop_entry_init_mark() {
 301   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 302   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 303   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_mark_gross);
 304 
 305   heap->try_inject_alloc_failure();
 306   VM_ShenandoahInitMark op(this);
 307   VMThread::execute(&op); // jump to entry_init_mark() under safepoint
 308 }
 309 
 310 void ShenandoahConcurrentGC::vmop_entry_final_mark() {
 311   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 312   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 313   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_mark_gross);
 314 
 315   heap->try_inject_alloc_failure();
 316   VM_ShenandoahFinalMarkStartEvac op(this);
 317   VMThread::execute(&op); // jump to entry_final_mark under safepoint
 318 }
 319 
 320 void ShenandoahConcurrentGC::vmop_entry_init_update_refs() {
 321   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 322   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 323   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_update_refs_gross);
 324 
 325   heap->try_inject_alloc_failure();
 326   VM_ShenandoahInitUpdateRefs op(this);
 327   VMThread::execute(&op);
 328 }
 329 
 330 void ShenandoahConcurrentGC::vmop_entry_final_update_refs() {
 331   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 332   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 333   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_update_refs_gross);
 334 
 335   heap->try_inject_alloc_failure();
 336   VM_ShenandoahFinalUpdateRefs op(this);
 337   VMThread::execute(&op);
 338 }
 339 
 340 void ShenandoahConcurrentGC::vmop_entry_verify_final_roots() {
 341   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 342   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 343   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_roots_gross);
 344 
 345   // This phase does not use workers, no need for setup
 346   heap->try_inject_alloc_failure();
 347   VM_ShenandoahFinalRoots op(this);
 348   VMThread::execute(&op);
 349 }
 350 
 351 void ShenandoahConcurrentGC::entry_init_mark() {
 352   const char* msg = init_mark_event_message();
 353   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::init_mark);
 354   EventMark em("%s", msg);
 355 
 356   ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
 357                               ShenandoahWorkerPolicy::calc_workers_for_init_marking(),
 358                               "init marking");
 359 
 360   op_init_mark();
 361 }
 362 
 363 void ShenandoahConcurrentGC::entry_final_mark() {
 364   const char* msg = final_mark_event_message();
 365   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_mark);
 366   EventMark em("%s", msg);
 367 
 368   ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
 369                               ShenandoahWorkerPolicy::calc_workers_for_final_marking(),
 370                               "final marking");
 371 
 372   op_final_mark();
 373 }
 374 
 375 void ShenandoahConcurrentGC::entry_init_update_refs() {
 376   static const char* msg = "Pause Init Update Refs";
 377   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::init_update_refs);
 378   EventMark em("%s", msg);
 379 
 380   // No workers used in this phase, no setup required
 381   op_init_update_refs();
 382 }
 383 
 384 void ShenandoahConcurrentGC::entry_final_update_refs() {
 385   static const char* msg = "Pause Final Update Refs";
 386   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_update_refs);
 387   EventMark em("%s", msg);
 388 
 389   ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
 390                               ShenandoahWorkerPolicy::calc_workers_for_final_update_ref(),
 391                               "final reference update");
 392 
 393   op_final_update_refs();
 394 }
 395 
 396 void ShenandoahConcurrentGC::entry_verify_final_roots() {
 397   const char* msg = verify_final_roots_event_message();
 398   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_roots);
 399   EventMark em("%s", msg);
 400 
 401   op_verify_final_roots();
 402 }
 403 
 404 void ShenandoahConcurrentGC::entry_reset() {
 405   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 406   heap->try_inject_alloc_failure();
 407 
 408   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 409   {
 410     const char* msg = conc_reset_event_message();
 411     ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_reset);
 412     EventMark em("%s", msg);
 413 
 414     ShenandoahWorkerScope scope(heap->workers(),
 415                                 ShenandoahWorkerPolicy::calc_workers_for_conc_reset(),
 416                                 msg);
 417     op_reset();
 418   }
 419 
 420   if (heap->mode()->is_generational()) {
 421     heap->old_generation()->card_scan()->mark_read_table_as_clean();
 422   }
 423 }
 424 
 425 void ShenandoahConcurrentGC::entry_scan_remembered_set() {
 426   if (_generation->is_young()) {
 427     ShenandoahHeap* const heap = ShenandoahHeap::heap();
 428     TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 429     const char* msg = "Concurrent remembered set scanning";
 430     ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::init_scan_rset);
 431     EventMark em("%s", msg);
 432 
 433     ShenandoahWorkerScope scope(heap->workers(),
 434                                 ShenandoahWorkerPolicy::calc_workers_for_rs_scanning(),
 435                                 msg);
 436 
 437     heap->try_inject_alloc_failure();
 438     _generation->scan_remembered_set(true /* is_concurrent */);
 439   }
 440 }
 441 
 442 void ShenandoahConcurrentGC::entry_mark_roots() {
 443   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 444   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 445   const char* msg = "Concurrent marking roots";
 446   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_mark_roots);
 447   EventMark em("%s", msg);
 448 
 449   ShenandoahWorkerScope scope(heap->workers(),
 450                               ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
 451                               "concurrent marking roots");
 452 
 453   heap->try_inject_alloc_failure();
 454   op_mark_roots();
 455 }
 456 
 457 void ShenandoahConcurrentGC::entry_mark() {
 458   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 459   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 460   const char* msg = conc_mark_event_message();
 461   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_mark);
 462   EventMark em("%s", msg);
 463 
 464   ShenandoahWorkerScope scope(heap->workers(),
 465                               ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
 466                               "concurrent marking");
 467 
 468   heap->try_inject_alloc_failure();
 469   op_mark();
 470 }
 471 
 472 void ShenandoahConcurrentGC::entry_thread_roots() {
 473   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 474   static const char* msg = "Concurrent thread roots";
 475   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_thread_roots);
 476   EventMark em("%s", msg);
 477 
 478   ShenandoahWorkerScope scope(heap->workers(),
 479                               ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
 480                               msg);
 481 
 482   heap->try_inject_alloc_failure();
 483   op_thread_roots();
 484 }
 485 
 486 void ShenandoahConcurrentGC::entry_weak_refs() {
 487   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 488   const char* msg = conc_weak_refs_event_message();
 489   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_weak_refs);
 490   EventMark em("%s", msg);
 491 
 492   ShenandoahWorkerScope scope(heap->workers(),
 493                               ShenandoahWorkerPolicy::calc_workers_for_conc_refs_processing(),
 494                               "concurrent weak references");
 495 
 496   heap->try_inject_alloc_failure();
 497   op_weak_refs();
 498 }
 499 
 500 void ShenandoahConcurrentGC::entry_weak_roots() {
 501   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 502   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 503   const char* msg = conc_weak_roots_event_message();
 504   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_weak_roots);
 505   EventMark em("%s", msg);
 506 
 507   ShenandoahWorkerScope scope(heap->workers(),
 508                               ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
 509                               "concurrent weak root");
 510 
 511   heap->try_inject_alloc_failure();
 512   op_weak_roots();
 513 }
 514 
 515 void ShenandoahConcurrentGC::entry_class_unloading() {
 516   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 517   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 518   static const char* msg = "Concurrent class unloading";
 519   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_class_unload);
 520   EventMark em("%s", msg);
 521 
 522   ShenandoahWorkerScope scope(heap->workers(),
 523                               ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
 524                               "concurrent class unloading");
 525 
 526   heap->try_inject_alloc_failure();
 527   op_class_unloading();
 528 }
 529 
 530 void ShenandoahConcurrentGC::entry_strong_roots() {
 531   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 532   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 533   static const char* msg = "Concurrent strong roots";
 534   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_strong_roots);
 535   EventMark em("%s", msg);
 536 
 537   ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_strong_roots);
 538 
 539   ShenandoahWorkerScope scope(heap->workers(),
 540                               ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
 541                               "concurrent strong root");
 542 
 543   heap->try_inject_alloc_failure();
 544   op_strong_roots();
 545 }
 546 
 547 void ShenandoahConcurrentGC::entry_cleanup_early() {
 548   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 549   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 550   const char* msg = conc_cleanup_event_message();
 551   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_cleanup_early, true /* log_heap_usage */);
 552   EventMark em("%s", msg);
 553 
 554   // This phase does not use workers, no need for setup
 555   heap->try_inject_alloc_failure();
 556   op_cleanup_early();
 557 }
 558 
 559 void ShenandoahConcurrentGC::entry_evacuate() {
 560   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 561   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 562 
 563   static const char* msg = "Concurrent evacuation";
 564   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_evac);
 565   EventMark em("%s", msg);
 566 
 567   ShenandoahWorkerScope scope(heap->workers(),
 568                               ShenandoahWorkerPolicy::calc_workers_for_conc_evac(),
 569                               "concurrent evacuation");
 570 
 571   heap->try_inject_alloc_failure();
 572   op_evacuate();
 573 }
 574 
 575 void ShenandoahConcurrentGC::entry_promote_in_place() const {
 576   shenandoah_assert_generational();
 577 
 578   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::promote_in_place);
 579   ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::promote_in_place);
 580   EventMark em("%s", "Promote in place");
 581 
 582   ShenandoahGenerationalHeap::heap()->promote_regions_in_place(true);
 583 }
 584 
 585 void ShenandoahConcurrentGC::entry_update_thread_roots() {
 586   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 587   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 588 
 589   static const char* msg = "Concurrent update thread roots";
 590   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_update_thread_roots);
 591   EventMark em("%s", msg);
 592 
 593   // No workers used in this phase, no setup required
 594   heap->try_inject_alloc_failure();
 595   op_update_thread_roots();
 596 }
 597 
 598 void ShenandoahConcurrentGC::entry_update_refs() {
 599   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 600   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 601   static const char* msg = "Concurrent update references";
 602   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_update_refs);
 603   EventMark em("%s", msg);
 604 
 605   ShenandoahWorkerScope scope(heap->workers(),
 606                               ShenandoahWorkerPolicy::calc_workers_for_conc_update_ref(),
 607                               "concurrent reference update");
 608 
 609   heap->try_inject_alloc_failure();
 610   op_update_refs();
 611 }
 612 
 613 void ShenandoahConcurrentGC::entry_cleanup_complete() {
 614   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 615   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 616   const char* msg = conc_cleanup_event_message();
 617   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_cleanup_complete, true /* log_heap_usage */);
 618   EventMark em("%s", msg);
 619 
 620   // This phase does not use workers, no need for setup
 621   heap->try_inject_alloc_failure();
 622   op_cleanup_complete();
 623 }
 624 
 625 void ShenandoahConcurrentGC::entry_reset_after_collect() {
 626   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 627   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 628   const char* msg = conc_reset_after_collect_event_message();
 629   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_reset_after_collect);
 630   EventMark em("%s", msg);
 631 
 632   op_reset_after_collect();
 633 }
 634 
 635 void ShenandoahConcurrentGC::op_reset() {
 636   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 637   if (ShenandoahPacing) {
 638     heap->pacer()->setup_for_reset();
 639   }
 640   // If it is old GC bootstrap cycle, always clear bitmap for global gen
 641   // to ensure bitmap for old gen is clear for old GC cycle after this.
 642   if (_do_old_gc_bootstrap) {
 643     assert(!heap->is_prepare_for_old_mark_in_progress(), "Cannot reset old without making it parsable");
 644     heap->global_generation()->prepare_gc();
 645   } else {
 646     _generation->prepare_gc();
 647   }
 648 }
 649 
 650 class ShenandoahInitMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
 651 private:
 652   ShenandoahMarkingContext* const _ctx;
 653 public:
 654   ShenandoahInitMarkUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
 655 
 656   void heap_region_do(ShenandoahHeapRegion* r) {
 657     assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index());
 658     if (r->is_active()) {
 659       // Check if region needs updating its TAMS. We have updated it already during concurrent
 660       // reset, so it is very likely we don't need to do another write here.  Since most regions
 661       // are not "active", this path is relatively rare.
 662       if (_ctx->top_at_mark_start(r) != r->top()) {
 663         _ctx->capture_top_at_mark_start(r);
 664       }
 665     } else {
 666       assert(_ctx->top_at_mark_start(r) == r->top(),
 667              "Region " SIZE_FORMAT " should already have correct TAMS", r->index());
 668     }
 669   }
 670 
 671   bool is_thread_safe() { return true; }
 672 };
 673 
 674 void ShenandoahConcurrentGC::start_mark() {
 675   _mark.start_mark();
 676 }
 677 
 678 void ShenandoahConcurrentGC::op_init_mark() {
 679   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 680   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
 681   assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
 682 
 683   assert(_generation->is_bitmap_clear(), "need clear marking bitmap");
 684   assert(!_generation->is_mark_complete(), "should not be complete");
 685   assert(!heap->has_forwarded_objects(), "No forwarded objects on this path");
 686 
 687   if (heap->mode()->is_generational()) {
 688 
 689     if (_generation->is_global()) {
 690       heap->old_generation()->cancel_gc();
 691     } else if (heap->is_concurrent_old_mark_in_progress()) {
 692       // Purge the SATB buffers, transferring any valid, old pointers to the
 693       // old generation mark queue. Any pointers in a young region will be
 694       // abandoned.
 695       ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_transfer_satb);
 696       heap->old_generation()->transfer_pointers_from_satb();
 697     }
 698     {
 699       // After we swap card table below, the write-table is all clean, and the read table holds
 700       // cards dirty prior to the start of GC. Young and bootstrap collection will update
 701       // the write card table as a side effect of remembered set scanning. Global collection will
 702       // update the card table as a side effect of global marking of old objects.
 703       ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_swap_rset);
 704       _generation->swap_card_tables();
 705     }
 706   }
 707 
 708   if (ShenandoahVerify) {
 709     ShenandoahTimingsTracker v(ShenandoahPhaseTimings::init_mark_verify);
 710     heap->verifier()->verify_before_concmark();
 711   }
 712 
 713   if (VerifyBeforeGC) {
 714     Universe::verify();
 715   }
 716 
 717   _generation->set_concurrent_mark_in_progress(true);
 718 
 719   start_mark();
 720 
 721   if (_do_old_gc_bootstrap) {
 722     shenandoah_assert_generational();
 723     // Update region state for both young and old regions
 724     ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states);
 725     ShenandoahInitMarkUpdateRegionStateClosure cl;
 726     heap->parallel_heap_region_iterate(&cl);
 727     heap->old_generation()->ref_processor()->reset_thread_locals();
 728   } else {
 729     // Update region state for only young regions
 730     ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states);
 731     ShenandoahInitMarkUpdateRegionStateClosure cl;
 732     _generation->parallel_heap_region_iterate(&cl);
 733   }
 734 
 735   // Weak reference processing
 736   ShenandoahReferenceProcessor* rp = _generation->ref_processor();
 737   rp->reset_thread_locals();
 738   rp->set_soft_reference_policy(heap->soft_ref_policy()->should_clear_all_soft_refs());
 739 
 740   // Make above changes visible to worker threads
 741   OrderAccess::fence();
 742 
 743   // Arm nmethods for concurrent mark
 744   ShenandoahCodeRoots::arm_nmethods_for_mark();
 745 
 746   ShenandoahStackWatermark::change_epoch_id();
 747   if (ShenandoahPacing) {
 748     heap->pacer()->setup_for_mark();
 749   }
 750 
 751   {
 752     ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_propagate_gc_state);
 753     heap->propagate_gc_state_to_all_threads();
 754   }
 755 }
 756 
 757 void ShenandoahConcurrentGC::op_mark_roots() {
 758   _mark.mark_concurrent_roots();
 759 }
 760 
 761 void ShenandoahConcurrentGC::op_mark() {
 762   _mark.concurrent_mark();
 763 }
 764 
 765 void ShenandoahConcurrentGC::op_final_mark() {
 766   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 767   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
 768   assert(!heap->has_forwarded_objects(), "No forwarded objects on this path");
 769 
 770   if (ShenandoahVerify) {
 771     heap->verifier()->verify_roots_no_forwarded();
 772   }
 773 
 774   if (!heap->cancelled_gc()) {
 775     _mark.finish_mark();
 776     assert(!heap->cancelled_gc(), "STW mark cannot OOM");
 777 
 778     // Notify JVMTI that the tagmap table will need cleaning.
 779     JvmtiTagMap::set_needs_cleaning();
 780 
 781     // The collection set is chosen by prepare_regions_and_collection_set(). Additionally, certain parameters have been
 782     // established to govern the evacuation efforts that are about to begin.  Refer to comments on reserve members in
 783     // ShenandoahGeneration and ShenandoahOldGeneration for more detail.
 784     _generation->prepare_regions_and_collection_set(true /*concurrent*/);
 785 
 786     // Has to be done after cset selection
 787     heap->prepare_concurrent_roots();
 788 
 789     if (!heap->collection_set()->is_empty()) {
 790       LogTarget(Debug, gc, cset) lt;
 791       if (lt.is_enabled()) {
 792         ResourceMark rm;
 793         LogStream ls(lt);
 794         heap->collection_set()->print_on(&ls);
 795       }
 796 
 797       if (ShenandoahVerify) {
 798         ShenandoahTimingsTracker v(ShenandoahPhaseTimings::final_mark_verify);
 799         heap->verifier()->verify_before_evacuation();
 800       }
 801 
 802       heap->set_evacuation_in_progress(true);
 803       // From here on, we need to update references.
 804       heap->set_has_forwarded_objects(true);
 805 
 806       // Arm nmethods/stack for concurrent processing
 807       ShenandoahCodeRoots::arm_nmethods_for_evac();
 808       ShenandoahStackWatermark::change_epoch_id();
 809 
 810       if (ShenandoahPacing) {
 811         heap->pacer()->setup_for_evac();
 812       }
 813     } else {
 814       if (ShenandoahVerify) {
 815         ShenandoahTimingsTracker v(ShenandoahPhaseTimings::final_mark_verify);
 816         if (has_in_place_promotions(heap)) {
 817           heap->verifier()->verify_after_concmark_with_promotions();
 818         } else {
 819           heap->verifier()->verify_after_concmark();
 820         }
 821       }
 822     }
 823   }
 824 
 825   {
 826     ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_mark_propagate_gc_state);
 827     heap->propagate_gc_state_to_all_threads();
 828   }
 829 }
 830 
 831 bool ShenandoahConcurrentGC::has_in_place_promotions(ShenandoahHeap* heap) {
 832   return heap->mode()->is_generational() && heap->old_generation()->has_in_place_promotions();
 833 }
 834 
 835 template<bool GENERATIONAL>
 836 class ShenandoahConcurrentEvacThreadClosure : public ThreadClosure {
 837 private:
 838   OopClosure* const _oops;
 839 public:
 840   explicit ShenandoahConcurrentEvacThreadClosure(OopClosure* oops) : _oops(oops) {}
 841 
 842   void do_thread(Thread* thread) override {
 843     JavaThread* const jt = JavaThread::cast(thread);
 844     StackWatermarkSet::finish_processing(jt, _oops, StackWatermarkKind::gc);
 845     if (GENERATIONAL) {
 846       ShenandoahThreadLocalData::enable_plab_promotions(thread);
 847     }
 848   }
 849 };
 850 
 851 template<bool GENERATIONAL>
 852 class ShenandoahConcurrentEvacUpdateThreadTask : public WorkerTask {
 853 private:
 854   ShenandoahJavaThreadsIterator _java_threads;
 855 
 856 public:
 857   explicit ShenandoahConcurrentEvacUpdateThreadTask(uint n_workers) :
 858     WorkerTask("Shenandoah Evacuate/Update Concurrent Thread Roots"),
 859     _java_threads(ShenandoahPhaseTimings::conc_thread_roots, n_workers) {
 860   }
 861 
 862   void work(uint worker_id) override {
 863     if (GENERATIONAL) {
 864       Thread* worker_thread = Thread::current();
 865       ShenandoahThreadLocalData::enable_plab_promotions(worker_thread);
 866     }
 867 
 868     // ShenandoahEvacOOMScope has to be setup by ShenandoahContextEvacuateUpdateRootsClosure.
 869     // Otherwise, may deadlock with watermark lock
 870     ShenandoahContextEvacuateUpdateRootsClosure oops_cl;
 871     ShenandoahConcurrentEvacThreadClosure<GENERATIONAL> thr_cl(&oops_cl);
 872     _java_threads.threads_do(&thr_cl, worker_id);
 873   }
 874 };
 875 
 876 void ShenandoahConcurrentGC::op_thread_roots() {
 877   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 878   assert(heap->is_evacuation_in_progress(), "Checked by caller");
 879   ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_thread_roots);
 880   if (heap->mode()->is_generational()) {
 881     ShenandoahConcurrentEvacUpdateThreadTask<true> task(heap->workers()->active_workers());
 882     heap->workers()->run_task(&task);
 883   } else {
 884     ShenandoahConcurrentEvacUpdateThreadTask<false> task(heap->workers()->active_workers());
 885     heap->workers()->run_task(&task);
 886   }
 887 }
 888 
 889 void ShenandoahConcurrentGC::op_weak_refs() {
 890   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 891   assert(heap->is_concurrent_weak_root_in_progress(), "Only during this phase");
 892   // Concurrent weak refs processing
 893   ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_refs);
 894   if (heap->gc_cause() == GCCause::_wb_breakpoint) {
 895     ShenandoahBreakpoint::at_after_reference_processing_started();
 896   }
 897   _generation->ref_processor()->process_references(ShenandoahPhaseTimings::conc_weak_refs, heap->workers(), true /* concurrent */);
 898 }
 899 
 900 class ShenandoahEvacUpdateCleanupOopStorageRootsClosure : public BasicOopIterateClosure {
 901 private:
 902   ShenandoahHeap* const _heap;
 903   ShenandoahMarkingContext* const _mark_context;
 904   bool  _evac_in_progress;
 905   Thread* const _thread;
 906 
 907 public:
 908   ShenandoahEvacUpdateCleanupOopStorageRootsClosure();
 909   void do_oop(oop* p);
 910   void do_oop(narrowOop* p);
 911 };
 912 
 913 ShenandoahEvacUpdateCleanupOopStorageRootsClosure::ShenandoahEvacUpdateCleanupOopStorageRootsClosure() :
 914   _heap(ShenandoahHeap::heap()),
 915   _mark_context(ShenandoahHeap::heap()->marking_context()),
 916   _evac_in_progress(ShenandoahHeap::heap()->is_evacuation_in_progress()),
 917   _thread(Thread::current()) {
 918 }
 919 
 920 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(oop* p) {
 921   const oop obj = RawAccess<>::oop_load(p);
 922   if (!CompressedOops::is_null(obj)) {
 923     if (!_mark_context->is_marked(obj)) {
 924       shenandoah_assert_generations_reconciled();
 925       if (_heap->is_in_active_generation(obj)) {
 926         // Note: The obj is dead here. Do not touch it, just clear.
 927         ShenandoahHeap::atomic_clear_oop(p, obj);
 928       }
 929     } else if (_evac_in_progress && _heap->in_collection_set(obj)) {
 930       oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
 931       if (resolved == obj) {
 932         resolved = _heap->evacuate_object(obj, _thread);
 933       }
 934       shenandoah_assert_not_in_cset_except(p, resolved, _heap->cancelled_gc());
 935       ShenandoahHeap::atomic_update_oop(resolved, p, obj);
 936     }
 937   }
 938 }
 939 
 940 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(narrowOop* p) {
 941   ShouldNotReachHere();
 942 }
 943 
 944 class ShenandoahIsCLDAliveClosure : public CLDClosure {
 945 public:
 946   void do_cld(ClassLoaderData* cld) {
 947     cld->is_alive();
 948   }
 949 };
 950 
 951 class ShenandoahIsNMethodAliveClosure: public NMethodClosure {
 952 public:
 953   void do_nmethod(nmethod* n) {
 954     n->is_unloading();
 955   }
 956 };
 957 
 958 // This task not only evacuates/updates marked weak roots, but also "null"
 959 // dead weak roots.
 960 class ShenandoahConcurrentWeakRootsEvacUpdateTask : public WorkerTask {
 961 private:
 962   ShenandoahVMWeakRoots<true /*concurrent*/> _vm_roots;
 963 
 964   // Roots related to concurrent class unloading
 965   ShenandoahClassLoaderDataRoots<true /* concurrent */>
 966                                              _cld_roots;
 967   ShenandoahConcurrentNMethodIterator        _nmethod_itr;
 968   ShenandoahPhaseTimings::Phase              _phase;
 969 
 970 public:
 971   ShenandoahConcurrentWeakRootsEvacUpdateTask(ShenandoahPhaseTimings::Phase phase) :
 972     WorkerTask("Shenandoah Evacuate/Update Concurrent Weak Roots"),
 973     _vm_roots(phase),
 974     _cld_roots(phase, ShenandoahHeap::heap()->workers()->active_workers(), false /*heap iteration*/),
 975     _nmethod_itr(ShenandoahCodeRoots::table()),
 976     _phase(phase) {}
 977 
 978   ~ShenandoahConcurrentWeakRootsEvacUpdateTask() {
 979     // Notify runtime data structures of potentially dead oops
 980     _vm_roots.report_num_dead();
 981   }
 982 
 983   void work(uint worker_id) {
 984     ShenandoahConcurrentWorkerSession worker_session(worker_id);
 985     ShenandoahSuspendibleThreadSetJoiner sts_join;
 986     {
 987       ShenandoahEvacOOMScope oom;
 988       // jni_roots and weak_roots are OopStorage backed roots, concurrent iteration
 989       // may race against OopStorage::release() calls.
 990       ShenandoahEvacUpdateCleanupOopStorageRootsClosure cl;
 991       _vm_roots.oops_do(&cl, worker_id);
 992     }
 993 
 994     // If we are going to perform concurrent class unloading later on, we need to
 995     // clean up the weak oops in CLD and determine nmethod's unloading state, so that we
 996     // can clean up immediate garbage sooner.
 997     if (ShenandoahHeap::heap()->unload_classes()) {
 998       // Applies ShenandoahIsCLDAlive closure to CLDs, native barrier will either null the
 999       // CLD's holder or evacuate it.
1000       {
1001         ShenandoahIsCLDAliveClosure is_cld_alive;
1002         _cld_roots.cld_do(&is_cld_alive, worker_id);
1003       }
1004 
1005       // Applies ShenandoahIsNMethodAliveClosure to registered nmethods.
1006       // The closure calls nmethod->is_unloading(). The is_unloading
1007       // state is cached, therefore, during concurrent class unloading phase,
1008       // we will not touch the metadata of unloading nmethods
1009       {
1010         ShenandoahWorkerTimingsTracker timer(_phase, ShenandoahPhaseTimings::CodeCacheRoots, worker_id);
1011         ShenandoahIsNMethodAliveClosure is_nmethod_alive;
1012         _nmethod_itr.nmethods_do(&is_nmethod_alive);
1013       }
1014     }
1015   }
1016 };
1017 
1018 void ShenandoahConcurrentGC::op_weak_roots() {
1019   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1020   assert(heap->is_concurrent_weak_root_in_progress(), "Only during this phase");
1021   {
1022     // Concurrent weak root processing
1023     ShenandoahTimingsTracker t(ShenandoahPhaseTimings::conc_weak_roots_work);
1024     ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_roots_work);
1025     ShenandoahConcurrentWeakRootsEvacUpdateTask task(ShenandoahPhaseTimings::conc_weak_roots_work);
1026     heap->workers()->run_task(&task);
1027   }
1028 
1029   {
1030     // It is possible for mutators executing the load reference barrier to have
1031     // loaded an oop through a weak handle that has since been nulled out by
1032     // weak root processing. Handshaking here forces them to complete the
1033     // barrier before the GC cycle continues and does something that would
1034     // change the evaluation of the barrier (for example, resetting the TAMS
1035     // on trashed regions could make an oop appear to be marked _after_ the
1036     // region has been recycled).
1037     ShenandoahTimingsTracker t(ShenandoahPhaseTimings::conc_weak_roots_rendezvous);
1038     heap->rendezvous_threads("Shenandoah Concurrent Weak Roots");
1039   }
1040 }
1041 
1042 void ShenandoahConcurrentGC::op_class_unloading() {
1043   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1044   assert (heap->is_concurrent_weak_root_in_progress() &&
1045           heap->unload_classes(),
1046           "Checked by caller");
1047   heap->do_class_unloading();
1048 }
1049 
1050 class ShenandoahEvacUpdateCodeCacheClosure : public NMethodClosure {
1051 private:
1052   BarrierSetNMethod* const                  _bs;
1053   ShenandoahEvacuateUpdateMetadataClosure   _cl;
1054 
1055 public:
1056   ShenandoahEvacUpdateCodeCacheClosure() :
1057     _bs(BarrierSet::barrier_set()->barrier_set_nmethod()),
1058     _cl() {
1059   }
1060 
1061   void do_nmethod(nmethod* n) {
1062     ShenandoahNMethod* data = ShenandoahNMethod::gc_data(n);
1063     ShenandoahReentrantLocker locker(data->lock());
1064     // Setup EvacOOM scope below reentrant lock to avoid deadlock with
1065     // nmethod_entry_barrier
1066     ShenandoahEvacOOMScope oom;
1067     data->oops_do(&_cl, true/*fix relocation*/);
1068     _bs->disarm(n);
1069   }
1070 };
1071 
1072 class ShenandoahConcurrentRootsEvacUpdateTask : public WorkerTask {
1073 private:
1074   ShenandoahPhaseTimings::Phase                 _phase;
1075   ShenandoahVMRoots<true /*concurrent*/>        _vm_roots;
1076   ShenandoahClassLoaderDataRoots<true /*concurrent*/>
1077                                                 _cld_roots;
1078   ShenandoahConcurrentNMethodIterator           _nmethod_itr;
1079 
1080 public:
1081   ShenandoahConcurrentRootsEvacUpdateTask(ShenandoahPhaseTimings::Phase phase) :
1082     WorkerTask("Shenandoah Evacuate/Update Concurrent Strong Roots"),
1083     _phase(phase),
1084     _vm_roots(phase),
1085     _cld_roots(phase, ShenandoahHeap::heap()->workers()->active_workers(), false /*heap iteration*/),
1086     _nmethod_itr(ShenandoahCodeRoots::table()) {}
1087 
1088   void work(uint worker_id) {
1089     ShenandoahConcurrentWorkerSession worker_session(worker_id);
1090     {
1091       ShenandoahEvacOOMScope oom;
1092       {
1093         // vm_roots and weak_roots are OopStorage backed roots, concurrent iteration
1094         // may race against OopStorage::release() calls.
1095         ShenandoahContextEvacuateUpdateRootsClosure cl;
1096         _vm_roots.oops_do<ShenandoahContextEvacuateUpdateRootsClosure>(&cl, worker_id);
1097       }
1098 
1099       {
1100         ShenandoahEvacuateUpdateMetadataClosure cl;
1101         CLDToOopClosure clds(&cl, ClassLoaderData::_claim_strong);
1102         _cld_roots.cld_do(&clds, worker_id);
1103       }
1104     }
1105 
1106     // Cannot setup ShenandoahEvacOOMScope here, due to potential deadlock with nmethod_entry_barrier.
1107     if (!ShenandoahHeap::heap()->unload_classes()) {
1108       ShenandoahWorkerTimingsTracker timer(_phase, ShenandoahPhaseTimings::CodeCacheRoots, worker_id);
1109       ShenandoahEvacUpdateCodeCacheClosure cl;
1110       _nmethod_itr.nmethods_do(&cl);
1111     }
1112   }
1113 };
1114 
1115 void ShenandoahConcurrentGC::op_strong_roots() {
1116   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1117   assert(heap->is_concurrent_strong_root_in_progress(), "Checked by caller");
1118   ShenandoahConcurrentRootsEvacUpdateTask task(ShenandoahPhaseTimings::conc_strong_roots);
1119   heap->workers()->run_task(&task);
1120   heap->set_concurrent_strong_root_in_progress(false);
1121 }
1122 
1123 void ShenandoahConcurrentGC::op_cleanup_early() {
1124   ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
1125                               ShenandoahWorkerPolicy::calc_workers_for_conc_cleanup(),
1126                               "cleanup early.");
1127   ShenandoahHeap::heap()->recycle_trash();
1128 }
1129 
1130 void ShenandoahConcurrentGC::op_evacuate() {
1131   ShenandoahHeap::heap()->evacuate_collection_set(true /*concurrent*/);
1132 }
1133 
1134 void ShenandoahConcurrentGC::op_init_update_refs() {
1135   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1136   if (ShenandoahVerify) {
1137     ShenandoahTimingsTracker v(ShenandoahPhaseTimings::init_update_refs_verify);
1138     heap->verifier()->verify_before_update_refs();
1139   }
1140   if (ShenandoahPacing) {
1141     heap->pacer()->setup_for_update_refs();
1142   }
1143 }
1144 
1145 void ShenandoahConcurrentGC::op_update_refs() {
1146   ShenandoahHeap::heap()->update_heap_references(true /*concurrent*/);
1147 }
1148 
1149 class ShenandoahUpdateThreadClosure : public HandshakeClosure {
1150 private:
1151   ShenandoahUpdateRefsClosure _cl;
1152 public:
1153   ShenandoahUpdateThreadClosure();
1154   void do_thread(Thread* thread);
1155 };
1156 
1157 ShenandoahUpdateThreadClosure::ShenandoahUpdateThreadClosure() :
1158   HandshakeClosure("Shenandoah Update Thread Roots") {
1159 }
1160 
1161 void ShenandoahUpdateThreadClosure::do_thread(Thread* thread) {
1162   if (thread->is_Java_thread()) {
1163     JavaThread* jt = JavaThread::cast(thread);
1164     ResourceMark rm;
1165     jt->oops_do(&_cl, nullptr);
1166   }
1167 }
1168 
1169 void ShenandoahConcurrentGC::op_update_thread_roots() {
1170   ShenandoahUpdateThreadClosure cl;
1171   Handshake::execute(&cl);
1172 }
1173 
1174 void ShenandoahConcurrentGC::op_final_update_refs() {
1175   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1176   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
1177   assert(!heap->_update_refs_iterator.has_next(), "Should have finished update references");
1178 
1179   heap->finish_concurrent_roots();
1180 
1181   // Clear cancelled GC, if set. On cancellation path, the block before would handle
1182   // everything.
1183   if (heap->cancelled_gc()) {
1184     heap->clear_cancelled_gc(true /* clear oom handler */);
1185   }
1186 
1187   // Has to be done before cset is clear
1188   if (ShenandoahVerify) {
1189     heap->verifier()->verify_roots_in_to_space();
1190   }
1191 
1192   // If we are running in generational mode and this is an aging cycle, this will also age active
1193   // regions that haven't been used for allocation.
1194   heap->update_heap_region_states(true /*concurrent*/);
1195 
1196   heap->set_update_refs_in_progress(false);
1197   heap->set_has_forwarded_objects(false);
1198 
1199   if (heap->mode()->is_generational() && heap->is_concurrent_old_mark_in_progress()) {
1200     // When the SATB barrier is left on to support concurrent old gen mark, it may pick up writes to
1201     // objects in the collection set. After those objects are evacuated, the pointers in the
1202     // SATB are no longer safe. Once we have finished update references, we are guaranteed that
1203     // no more writes to the collection set are possible.
1204     //
1205     // This will transfer any old pointers in _active_ regions from the SATB to the old gen
1206     // mark queues. All other pointers will be discarded. This would also discard any pointers
1207     // in old regions that were included in a mixed evacuation. We aren't using the SATB filter
1208     // methods here because we cannot control when they execute. If the SATB filter runs _after_
1209     // a region has been recycled, we will not be able to detect the bad pointer.
1210     //
1211     // We are not concerned about skipping this step in abbreviated cycles because regions
1212     // with no live objects cannot have been written to and so cannot have entries in the SATB
1213     // buffers.
1214     heap->old_generation()->transfer_pointers_from_satb();
1215 
1216     // Aging_cycle is only relevant during evacuation cycle for individual objects and during final mark for
1217     // entire regions.  Both of these relevant operations occur before final update refs.
1218     ShenandoahGenerationalHeap::heap()->set_aging_cycle(false);
1219   }
1220 
1221   if (ShenandoahVerify) {
1222     ShenandoahTimingsTracker v(ShenandoahPhaseTimings::final_update_refs_verify);
1223     heap->verifier()->verify_after_update_refs();
1224   }
1225 
1226   if (VerifyAfterGC) {
1227     Universe::verify();
1228   }
1229 
1230   heap->rebuild_free_set(true /*concurrent*/);
1231 
1232   {
1233     ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_update_refs_propagate_gc_state);
1234     heap->propagate_gc_state_to_all_threads();
1235   }
1236 }
1237 
1238 bool ShenandoahConcurrentGC::entry_final_roots() {
1239   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1240   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
1241 
1242 
1243   const char* msg = conc_final_roots_event_message();
1244   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_final_roots);
1245   EventMark em("%s", msg);
1246   ShenandoahWorkerScope scope(heap->workers(),
1247                               ShenandoahWorkerPolicy::calc_workers_for_conc_evac(),
1248                               msg);
1249 
1250   if (!heap->mode()->is_generational()) {
1251     heap->concurrent_final_roots();
1252   } else {
1253     if (!complete_abbreviated_cycle()) {
1254       return false;
1255     }
1256   }
1257   return true;
1258 }
1259 
1260 void ShenandoahConcurrentGC::op_verify_final_roots() {
1261   if (VerifyAfterGC) {
1262     Universe::verify();
1263   }
1264 }
1265 
1266 void ShenandoahConcurrentGC::op_cleanup_complete() {
1267   ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
1268                               ShenandoahWorkerPolicy::calc_workers_for_conc_cleanup(),
1269                               "cleanup complete.");
1270   ShenandoahHeap::heap()->recycle_trash();
1271 }
1272 
1273 void ShenandoahConcurrentGC::op_reset_after_collect() {
1274   ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
1275                           ShenandoahWorkerPolicy::calc_workers_for_conc_reset(),
1276                           "reset after collection.");
1277 
1278   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1279   if (heap->mode()->is_generational()) {
1280     // If we are in the midst of an old gc bootstrap or an old marking, we want to leave the mark bit map of
1281     // the young generation intact. In particular, reference processing in the old generation may potentially
1282     // need the reachability of a young generation referent of a Reference object in the old generation.
1283     if (!_do_old_gc_bootstrap && !heap->is_concurrent_old_mark_in_progress()) {
1284       heap->young_generation()->reset_mark_bitmap<false>();
1285     }
1286   } else {
1287     _generation->reset_mark_bitmap<false>();
1288   }
1289 }
1290 
1291 bool ShenandoahConcurrentGC::check_cancellation_and_abort(ShenandoahDegenPoint point) {
1292   if (ShenandoahHeap::heap()->cancelled_gc()) {
1293     _degen_point = point;
1294     return true;
1295   }
1296   return false;
1297 }
1298 
1299 const char* ShenandoahConcurrentGC::init_mark_event_message() const {
1300   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1301   assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here");
1302   if (heap->unload_classes()) {
1303     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Init Mark", " (unload classes)");
1304   } else {
1305     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Init Mark", "");
1306   }
1307 }
1308 
1309 const char* ShenandoahConcurrentGC::final_mark_event_message() const {
1310   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1311   assert(!heap->has_forwarded_objects() || heap->is_concurrent_old_mark_in_progress(),
1312          "Should not have forwarded objects during final mark, unless old gen concurrent mark is running");
1313 
1314   if (heap->unload_classes()) {
1315     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Final Mark", " (unload classes)");
1316   } else {
1317     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Final Mark", "");
1318   }
1319 }
1320 
1321 const char* ShenandoahConcurrentGC::conc_mark_event_message() const {
1322   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1323   assert(!heap->has_forwarded_objects() || heap->is_concurrent_old_mark_in_progress(),
1324          "Should not have forwarded objects concurrent mark, unless old gen concurrent mark is running");
1325   if (heap->unload_classes()) {
1326     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent marking", " (unload classes)");
1327   } else {
1328     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent marking", "");
1329   }
1330 }
1331 
1332 const char* ShenandoahConcurrentGC::conc_reset_event_message() const {
1333   if (ShenandoahHeap::heap()->unload_classes()) {
1334     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent reset", " (unload classes)");
1335   } else {
1336     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent reset", "");
1337   }
1338 }
1339 
1340 const char* ShenandoahConcurrentGC::conc_reset_after_collect_event_message() const {
1341   if (ShenandoahHeap::heap()->unload_classes()) {
1342     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent reset after collect", " (unload classes)");
1343   } else {
1344     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent reset after collect", "");
1345   }
1346 }
1347 
1348 const char* ShenandoahConcurrentGC::verify_final_roots_event_message() const {
1349   if (ShenandoahHeap::heap()->unload_classes()) {
1350     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Verify Final Roots", " (unload classes)");
1351   } else {
1352     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Verify Final Roots", "");
1353   }
1354 }
1355 
1356 const char* ShenandoahConcurrentGC::conc_final_roots_event_message() const {
1357   if (ShenandoahHeap::heap()->unload_classes()) {
1358     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent Final Roots", " (unload classes)");
1359   } else {
1360     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent Final Roots", "");
1361   }
1362 }
1363 
1364 const char* ShenandoahConcurrentGC::conc_weak_refs_event_message() const {
1365   if (ShenandoahHeap::heap()->unload_classes()) {
1366     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent weak references", " (unload classes)");
1367   } else {
1368     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent weak references", "");
1369   }
1370 }
1371 
1372 const char* ShenandoahConcurrentGC::conc_weak_roots_event_message() const {
1373   if (ShenandoahHeap::heap()->unload_classes()) {
1374     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent weak roots", " (unload classes)");
1375   } else {
1376     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent weak roots", "");
1377   }
1378 }
1379 
1380 const char* ShenandoahConcurrentGC::conc_cleanup_event_message() const {
1381   if (ShenandoahHeap::heap()->unload_classes()) {
1382     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent cleanup", " (unload classes)");
1383   } else {
1384     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent cleanup", "");
1385   }
1386 }
1387 
1388 const char* ShenandoahConcurrentGC::conc_init_update_refs_event_message() const {
1389   if (ShenandoahHeap::heap()->unload_classes()) {
1390     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent Init Update Refs", " (unload classes)");
1391   } else {
1392     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent Init Update Refs", "");
1393   }
1394 }