1 /*
   2  * Copyright (c) 2021, Red Hat, Inc. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 
  27 #include "gc/shared/barrierSetNMethod.hpp"
  28 #include "gc/shared/collectorCounters.hpp"
  29 #include "gc/shenandoah/shenandoahBreakpoint.hpp"
  30 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  31 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
  32 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  33 #include "gc/shenandoah/shenandoahGeneration.hpp"
  34 #include "gc/shenandoah/shenandoahLock.hpp"
  35 #include "gc/shenandoah/shenandoahMark.inline.hpp"
  36 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  37 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  38 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  39 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
  40 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
  41 #include "gc/shenandoah/shenandoahStackWatermark.hpp"
  42 #include "gc/shenandoah/shenandoahUtils.hpp"
  43 #include "gc/shenandoah/shenandoahVerifier.hpp"
  44 #include "gc/shenandoah/shenandoahVMOperations.hpp"
  45 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
  46 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  47 #include "memory/allocation.hpp"
  48 #include "prims/jvmtiTagMap.hpp"
  49 #include "runtime/vmThread.hpp"
  50 #include "utilities/events.hpp"
  51 
  52 // Breakpoint support
  53 class ShenandoahBreakpointGCScope : public StackObj {
  54 public:
  55   ShenandoahBreakpointGCScope() {
  56     ShenandoahBreakpoint::at_before_gc();
  57   }
  58 
  59   ~ShenandoahBreakpointGCScope() {
  60     ShenandoahBreakpoint::at_after_gc();
  61   }
  62 };
  63 
  64 class ShenandoahBreakpointMarkScope : public StackObj {
  65 public:
  66   ShenandoahBreakpointMarkScope() {
  67     ShenandoahBreakpoint::at_after_marking_started();
  68   }
  69 
  70   ~ShenandoahBreakpointMarkScope() {
  71     ShenandoahBreakpoint::at_before_marking_completed();
  72   }
  73 };
  74 
  75 ShenandoahConcurrentGC::ShenandoahConcurrentGC(ShenandoahGeneration* generation, bool do_old_gc_bootstrap) :
  76   _mark(generation),
  77   _degen_point(ShenandoahDegenPoint::_degenerated_unset),
  78   _mixed_evac (false),
  79   _do_old_gc_bootstrap(do_old_gc_bootstrap),
  80   _generation(generation) {
  81 }
  82 
  83 ShenandoahGC::ShenandoahDegenPoint ShenandoahConcurrentGC::degen_point() const {
  84   return _degen_point;
  85 }
  86 
  87 bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) {
  88   ShenandoahHeap* const heap = ShenandoahHeap::heap();
  89   if (cause == GCCause::_wb_breakpoint) {
  90     ShenandoahBreakpoint::start_gc();
  91   }
  92   ShenandoahBreakpointGCScope breakpoint_gc_scope;
  93 
  94   // Reset for upcoming marking
  95   entry_reset();
  96 
  97   // Start initial mark under STW
  98   vmop_entry_init_mark();
  99 
 100   {
 101     ShenandoahBreakpointMarkScope breakpoint_mark_scope;
 102 
 103     // Reset task queue stats here, rather than in mark_concurrent_roots
 104     // because remembered set scan will `push` oops into the queues and
 105     // resetting after this happens will lose those counts.
 106     TASKQUEUE_STATS_ONLY(_mark.task_queues()->reset_taskqueue_stats());
 107 
 108     // Concurrent remembered set scanning
 109     if (_generation->generation_mode() == YOUNG) {
 110       ShenandoahConcurrentPhase gc_phase("Concurrent remembered set scanning", ShenandoahPhaseTimings::init_scan_rset);
 111       _generation->scan_remembered_set();
 112     }
 113 
 114     // Concurrent mark roots
 115     entry_mark_roots();
 116     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_roots)) return false;
 117 
 118     // Continue concurrent mark
 119     entry_mark();
 120     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_mark)) return false;
 121   }
 122 
 123   // Complete marking under STW, and start evacuation
 124   vmop_entry_final_mark();
 125 
 126   // Concurrent stack processing
 127   if (heap->is_evacuation_in_progress()) {
 128     entry_thread_roots();
 129   }
 130 
 131   // Process weak roots that might still point to regions that would be broken by cleanup
 132   if (heap->is_concurrent_weak_root_in_progress()) {
 133     entry_weak_refs();
 134     entry_weak_roots();
 135   }
 136 
 137   // Final mark might have reclaimed some immediate garbage, kick cleanup to reclaim
 138   // the space. This would be the last action if there is nothing to evacuate.  Note that
 139   // we will not age young-gen objects in the case that we skip evacuation.
 140   entry_cleanup_early();
 141 
 142   {
 143     ShenandoahHeapLocker locker(heap->lock());
 144     heap->free_set()->log_status();
 145   }
 146 
 147   // Perform concurrent class unloading
 148   if (heap->unload_classes() &&
 149       heap->is_concurrent_weak_root_in_progress()) {
 150     entry_class_unloading();
 151   }
 152 
 153   // Processing strong roots
 154   // This may be skipped if there is nothing to update/evacuate.
 155   // If so, strong_root_in_progress would be unset.
 156   if (heap->is_concurrent_strong_root_in_progress()) {
 157     entry_strong_roots();
 158   }
 159 
 160   if (!heap->cancelled_gc() && heap->mode()->is_generational() && _generation->generation_mode() == GLOBAL) {
 161     entry_global_coalesce_and_fill();
 162   }
 163 
 164   // Continue the cycle with evacuation and optional update-refs.
 165   // This may be skipped if there is nothing to evacuate.
 166   // If so, evac_in_progress would be unset by collection set preparation code.
 167   if (heap->is_evacuation_in_progress()) {
 168     // Concurrently evacuate
 169     entry_evacuate();
 170     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_evac)) return false;
 171 
 172     // Perform update-refs phase.
 173     vmop_entry_init_updaterefs();
 174     entry_updaterefs();
 175     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_updaterefs)) return false;
 176 
 177     // Concurrent update thread roots
 178     entry_update_thread_roots();
 179     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_updaterefs)) return false;
 180 
 181     vmop_entry_final_updaterefs();
 182 
 183     // Update references freed up collection set, kick the cleanup to reclaim the space.
 184     entry_cleanup_complete();
 185   } else {
 186     vmop_entry_final_roots();
 187   }
 188 
 189   return true;
 190 }
 191 
 192 void ShenandoahConcurrentGC::vmop_entry_init_mark() {
 193   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 194   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 195   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_mark_gross);
 196 
 197   heap->try_inject_alloc_failure();
 198   VM_ShenandoahInitMark op(this, _do_old_gc_bootstrap);
 199   VMThread::execute(&op); // jump to entry_init_mark() under safepoint
 200 }
 201 
 202 void ShenandoahConcurrentGC::vmop_entry_final_mark() {
 203   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 204   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 205   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_mark_gross);
 206 
 207   heap->try_inject_alloc_failure();
 208   VM_ShenandoahFinalMarkStartEvac op(this);
 209   VMThread::execute(&op); // jump to entry_final_mark under safepoint
 210 }
 211 
 212 void ShenandoahConcurrentGC::vmop_entry_init_updaterefs() {
 213   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 214   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 215   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_update_refs_gross);
 216 
 217   heap->try_inject_alloc_failure();
 218   VM_ShenandoahInitUpdateRefs op(this);
 219   VMThread::execute(&op);
 220 }
 221 
 222 void ShenandoahConcurrentGC::vmop_entry_final_updaterefs() {
 223   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 224   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 225   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_update_refs_gross);
 226 
 227   heap->try_inject_alloc_failure();
 228   VM_ShenandoahFinalUpdateRefs op(this);
 229   VMThread::execute(&op);
 230 }
 231 
 232 void ShenandoahConcurrentGC::vmop_entry_final_roots() {
 233   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 234   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 235   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_roots_gross);
 236 
 237   // This phase does not use workers, no need for setup
 238   heap->try_inject_alloc_failure();
 239   VM_ShenandoahFinalRoots op(this);
 240   VMThread::execute(&op);
 241 }
 242 
 243 void ShenandoahConcurrentGC::entry_init_mark() {
 244   char msg[1024];
 245   init_mark_event_message(msg, sizeof(msg));
 246   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::init_mark);
 247   EventMark em("%s", msg);
 248 
 249   ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
 250                               ShenandoahWorkerPolicy::calc_workers_for_init_marking(),
 251                               "init marking");
 252 
 253   if (ShenandoahHeap::heap()->mode()->is_generational()
 254     && (_generation->generation_mode() == YOUNG || (_generation->generation_mode() == GLOBAL && ShenandoahVerify))) {
 255     // The current implementation of swap_remembered_set() copies the write-card-table
 256     // to the read-card-table. The remembered sets are also swapped for GLOBAL collections
 257     // so that the verifier works with the correct copy of the card table when verifying.
 258     _generation->swap_remembered_set();
 259   }
 260 
 261   op_init_mark();
 262 }
 263 
 264 void ShenandoahConcurrentGC::entry_final_mark() {
 265   char msg[1024];
 266   final_mark_event_message(msg, sizeof(msg));
 267   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_mark);
 268   EventMark em("%s", msg);
 269 
 270   ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
 271                               ShenandoahWorkerPolicy::calc_workers_for_final_marking(),
 272                               "final marking");
 273 
 274   op_final_mark();
 275 }
 276 
 277 void ShenandoahConcurrentGC::entry_init_updaterefs() {
 278   static const char* msg = "Pause Init Update Refs";
 279   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::init_update_refs);
 280   EventMark em("%s", msg);
 281 
 282   // No workers used in this phase, no setup required
 283   op_init_updaterefs();
 284 }
 285 
 286 void ShenandoahConcurrentGC::entry_final_updaterefs() {
 287   static const char* msg = "Pause Final Update Refs";
 288   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_update_refs);
 289   EventMark em("%s", msg);
 290 
 291   ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
 292                               ShenandoahWorkerPolicy::calc_workers_for_final_update_ref(),
 293                               "final reference update");
 294 
 295   op_final_updaterefs();
 296 }
 297 
 298 void ShenandoahConcurrentGC::entry_final_roots() {
 299   static const char* msg = "Pause Final Roots";
 300   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_roots);
 301   EventMark em("%s", msg);
 302 
 303   op_final_roots();
 304 }
 305 
 306 void ShenandoahConcurrentGC::entry_reset() {
 307   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 308   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 309   static const char* msg = "Concurrent reset";
 310   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_reset);
 311   EventMark em("%s", msg);
 312 
 313   ShenandoahWorkerScope scope(heap->workers(),
 314                               ShenandoahWorkerPolicy::calc_workers_for_conc_reset(),
 315                               "concurrent reset");
 316 
 317   heap->try_inject_alloc_failure();
 318   op_reset();
 319 }
 320 
 321 void ShenandoahConcurrentGC::entry_mark_roots() {
 322   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 323   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 324   const char* msg = "Concurrent marking roots";
 325   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_mark_roots);
 326   EventMark em("%s", msg);
 327 
 328   ShenandoahWorkerScope scope(heap->workers(),
 329                               ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
 330                               "concurrent marking roots");
 331 
 332   heap->try_inject_alloc_failure();
 333   op_mark_roots();
 334 }
 335 
 336 void ShenandoahConcurrentGC::entry_mark() {
 337   char msg[1024];
 338   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 339   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 340   conc_mark_event_message(msg, sizeof(msg));
 341   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_mark);
 342   EventMark em("%s", msg);
 343 
 344   ShenandoahWorkerScope scope(heap->workers(),
 345                               ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
 346                               "concurrent marking");
 347 
 348   heap->try_inject_alloc_failure();
 349   op_mark();
 350 }
 351 
 352 void ShenandoahConcurrentGC::entry_thread_roots() {
 353   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 354   static const char* msg = "Concurrent thread roots";
 355   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_thread_roots);
 356   EventMark em("%s", msg);
 357 
 358   ShenandoahWorkerScope scope(heap->workers(),
 359                               ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
 360                               msg);
 361 
 362   heap->try_inject_alloc_failure();
 363   op_thread_roots();
 364 }
 365 
 366 void ShenandoahConcurrentGC::entry_weak_refs() {
 367   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 368   static const char* msg = "Concurrent weak references";
 369   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_weak_refs);
 370   EventMark em("%s", msg);
 371 
 372   ShenandoahWorkerScope scope(heap->workers(),
 373                               ShenandoahWorkerPolicy::calc_workers_for_conc_refs_processing(),
 374                               "concurrent weak references");
 375 
 376   heap->try_inject_alloc_failure();
 377   op_weak_refs();
 378 }
 379 
 380 void ShenandoahConcurrentGC::entry_weak_roots() {
 381   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 382   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 383   static const char* msg = "Concurrent weak roots";
 384   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_weak_roots);
 385   EventMark em("%s", msg);
 386 
 387   ShenandoahWorkerScope scope(heap->workers(),
 388                               ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
 389                               "concurrent weak root");
 390 
 391   heap->try_inject_alloc_failure();
 392   op_weak_roots();
 393 }
 394 
 395 void ShenandoahConcurrentGC::entry_class_unloading() {
 396   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 397   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 398   static const char* msg = "Concurrent class unloading";
 399   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_class_unload);
 400   EventMark em("%s", msg);
 401 
 402   ShenandoahWorkerScope scope(heap->workers(),
 403                               ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
 404                               "concurrent class unloading");
 405 
 406   heap->try_inject_alloc_failure();
 407   op_class_unloading();
 408 }
 409 
 410 void ShenandoahConcurrentGC::entry_strong_roots() {
 411   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 412   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 413   static const char* msg = "Concurrent strong roots";
 414   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_strong_roots);
 415   EventMark em("%s", msg);
 416 
 417   ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_strong_roots);
 418 
 419   ShenandoahWorkerScope scope(heap->workers(),
 420                               ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
 421                               "concurrent strong root");
 422 
 423   heap->try_inject_alloc_failure();
 424   op_strong_roots();
 425 }
 426 
 427 void ShenandoahConcurrentGC::entry_cleanup_early() {
 428   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 429   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 430   static const char* msg = "Concurrent cleanup";
 431   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_cleanup_early, true /* log_heap_usage */);
 432   EventMark em("%s", msg);
 433 
 434   // This phase does not use workers, no need for setup
 435   heap->try_inject_alloc_failure();
 436   op_cleanup_early();
 437 }
 438 
 439 void ShenandoahConcurrentGC::entry_evacuate() {
 440   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 441   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 442 
 443   static const char* msg = "Concurrent evacuation";
 444   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_evac);
 445   EventMark em("%s", msg);
 446 
 447   ShenandoahWorkerScope scope(heap->workers(),
 448                               ShenandoahWorkerPolicy::calc_workers_for_conc_evac(),
 449                               "concurrent evacuation");
 450 
 451   heap->try_inject_alloc_failure();
 452   op_evacuate();
 453 }
 454 
 455 void ShenandoahConcurrentGC::entry_update_thread_roots() {
 456   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 457   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 458 
 459   static const char* msg = "Concurrent update thread roots";
 460   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_update_thread_roots);
 461   EventMark em("%s", msg);
 462 
 463   // No workers used in this phase, no setup required
 464   heap->try_inject_alloc_failure();
 465   op_update_thread_roots();
 466 }
 467 
 468 void ShenandoahConcurrentGC::entry_updaterefs() {
 469   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 470   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 471   static const char* msg = "Concurrent update references";
 472   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_update_refs);
 473   EventMark em("%s", msg);
 474 
 475   ShenandoahWorkerScope scope(heap->workers(),
 476                               ShenandoahWorkerPolicy::calc_workers_for_conc_update_ref(),
 477                               "concurrent reference update");
 478 
 479   heap->try_inject_alloc_failure();
 480   op_updaterefs();
 481 }
 482 
 483 void ShenandoahConcurrentGC::entry_cleanup_complete() {
 484   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 485   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 486   static const char* msg = "Concurrent cleanup";
 487   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_cleanup_complete, true /* log_heap_usage */);
 488   EventMark em("%s", msg);
 489 
 490   // This phase does not use workers, no need for setup
 491   heap->try_inject_alloc_failure();
 492   op_cleanup_complete();
 493 }
 494 
 495 void ShenandoahConcurrentGC::entry_global_coalesce_and_fill() {
 496   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 497 
 498   const char* msg = "Coalescing and filling old regions in global collect";
 499   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::coalesce_and_fill);
 500 
 501   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 502   EventMark em("%s", msg);
 503   ShenandoahWorkerScope scope(heap->workers(),
 504                               ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
 505                               "concurrent coalesce and fill");
 506 
 507   op_global_coalesce_and_fill();
 508 }
 509 
 510 void ShenandoahConcurrentGC::op_reset() {
 511   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 512   if (ShenandoahPacing) {
 513     heap->pacer()->setup_for_reset();
 514   }
 515   _generation->prepare_gc(_do_old_gc_bootstrap);
 516 }
 517 
 518 class ShenandoahInitMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
 519 private:
 520   ShenandoahMarkingContext* const _ctx;
 521 public:
 522   ShenandoahInitMarkUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
 523 
 524   void heap_region_do(ShenandoahHeapRegion* r) {
 525     assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index());
 526     if (r->is_active()) {
 527       // Check if region needs updating its TAMS. We have updated it already during concurrent
 528       // reset, so it is very likely we don't need to do another write here.  Since most regions
 529       // are not "active", this path is relatively rare.
 530       if (_ctx->top_at_mark_start(r) != r->top()) {
 531         _ctx->capture_top_at_mark_start(r);
 532       }
 533     } else {
 534       assert(_ctx->top_at_mark_start(r) == r->top(),
 535              "Region " SIZE_FORMAT " should already have correct TAMS", r->index());
 536     }
 537   }
 538 
 539   bool is_thread_safe() { return true; }
 540 };
 541 
 542 void ShenandoahConcurrentGC::op_init_mark() {
 543   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 544   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
 545   assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
 546 
 547   assert(_generation->is_bitmap_clear(), "need clear marking bitmap");
 548   assert(!_generation->is_mark_complete(), "should not be complete");
 549   assert(!heap->has_forwarded_objects(), "No forwarded objects on this path");
 550 
 551   if (ShenandoahVerify) {
 552     heap->verifier()->verify_before_concmark();
 553   }
 554 
 555   if (VerifyBeforeGC) {
 556     Universe::verify();
 557   }
 558 
 559   _generation->set_concurrent_mark_in_progress(true);
 560 
 561   if (_do_old_gc_bootstrap) {
 562     // Update region state for both young and old regions
 563     ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states);
 564     ShenandoahInitMarkUpdateRegionStateClosure cl;
 565     heap->parallel_heap_region_iterate(&cl);
 566     heap->old_generation()->parallel_heap_region_iterate(&cl);
 567   } else {
 568     // Update region state for only young regions
 569     ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states);
 570     ShenandoahInitMarkUpdateRegionStateClosure cl;
 571     _generation->parallel_heap_region_iterate(&cl);
 572   }
 573 
 574   // Weak reference processing
 575   ShenandoahReferenceProcessor* rp = _generation->ref_processor();
 576   rp->reset_thread_locals();
 577   rp->set_soft_reference_policy(heap->soft_ref_policy()->should_clear_all_soft_refs());
 578 
 579   // Make above changes visible to worker threads
 580   OrderAccess::fence();
 581 
 582   // Arm nmethods for concurrent marking. When a nmethod is about to be executed,
 583   // we need to make sure that all its metadata are marked. alternative is to remark
 584   // thread roots at final mark pause, but it can be potential latency killer.
 585   if (heap->unload_classes()) {
 586     ShenandoahCodeRoots::arm_nmethods();
 587   }
 588 
 589   ShenandoahStackWatermark::change_epoch_id();
 590   if (ShenandoahPacing) {
 591     heap->pacer()->setup_for_mark();
 592   }
 593 }
 594 
 595 void ShenandoahConcurrentGC::op_mark_roots() {
 596   _mark.mark_concurrent_roots();
 597 }
 598 
 599 void ShenandoahConcurrentGC::op_mark() {
 600   _mark.concurrent_mark();
 601 }
 602 
 603 void ShenandoahConcurrentGC::op_final_mark() {
 604   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 605   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
 606   assert(!heap->has_forwarded_objects(), "No forwarded objects on this path");
 607 
 608   if (ShenandoahVerify) {
 609     heap->verifier()->verify_roots_no_forwarded();
 610   }
 611 
 612   if (!heap->cancelled_gc()) {
 613     _mark.finish_mark();
 614     assert(!heap->cancelled_gc(), "STW mark cannot OOM");
 615 
 616     // Notify JVMTI that the tagmap table will need cleaning.
 617     JvmtiTagMap::set_needs_cleaning();
 618 
 619     bool mixed_evac = _generation->prepare_regions_and_collection_set(true /*concurrent*/);
 620     heap->set_mixed_evac(mixed_evac);
 621 
 622     // Has to be done after cset selection
 623     heap->prepare_concurrent_roots();
 624 
 625     if (!heap->collection_set()->is_empty()) {
 626       if (ShenandoahVerify) {
 627         heap->verifier()->verify_before_evacuation();
 628       }
 629 
 630       heap->set_evacuation_in_progress(true);
 631       // From here on, we need to update references.
 632       heap->set_has_forwarded_objects(true);
 633 
 634       // Verify before arming for concurrent processing.
 635       // Otherwise, verification can trigger stack processing.
 636       if (ShenandoahVerify) {
 637         heap->verifier()->verify_during_evacuation();
 638       }
 639 
 640       // Arm nmethods/stack for concurrent processing
 641       ShenandoahCodeRoots::arm_nmethods();
 642       ShenandoahStackWatermark::change_epoch_id();
 643 
 644       // Notify JVMTI that oops are changed.
 645       JvmtiTagMap::set_needs_rehashing();
 646 
 647       if (ShenandoahPacing) {
 648         heap->pacer()->setup_for_evac();
 649       }
 650     } else {
 651       if (ShenandoahVerify) {
 652         heap->verifier()->verify_after_concmark();
 653       }
 654 
 655       if (VerifyAfterGC) {
 656         Universe::verify();
 657       }
 658     }
 659   }
 660 }
 661 
 662 class ShenandoahConcurrentEvacThreadClosure : public ThreadClosure {
 663 private:
 664   OopClosure* const _oops;
 665 
 666 public:
 667   ShenandoahConcurrentEvacThreadClosure(OopClosure* oops);
 668   void do_thread(Thread* thread);
 669 };
 670 
 671 ShenandoahConcurrentEvacThreadClosure::ShenandoahConcurrentEvacThreadClosure(OopClosure* oops) :
 672   _oops(oops) {
 673 }
 674 
 675 void ShenandoahConcurrentEvacThreadClosure::do_thread(Thread* thread) {
 676   JavaThread* const jt = JavaThread::cast(thread);
 677   StackWatermarkSet::finish_processing(jt, _oops, StackWatermarkKind::gc);
 678 }
 679 
 680 class ShenandoahConcurrentEvacUpdateThreadTask : public AbstractGangTask {
 681 private:
 682   ShenandoahJavaThreadsIterator _java_threads;
 683 
 684 public:
 685   ShenandoahConcurrentEvacUpdateThreadTask(uint n_workers) :
 686     AbstractGangTask("Shenandoah Evacuate/Update Concurrent Thread Roots"),
 687     _java_threads(ShenandoahPhaseTimings::conc_thread_roots, n_workers) {
 688   }
 689 
 690   void work(uint worker_id) {
 691     // ShenandoahEvacOOMScope has to be setup by ShenandoahContextEvacuateUpdateRootsClosure.
 692     // Otherwise, may deadlock with watermark lock
 693     ShenandoahContextEvacuateUpdateRootsClosure oops_cl;
 694     ShenandoahConcurrentEvacThreadClosure thr_cl(&oops_cl);
 695     _java_threads.threads_do(&thr_cl, worker_id);
 696   }
 697 };
 698 
 699 void ShenandoahConcurrentGC::op_thread_roots() {
 700   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 701   assert(heap->is_evacuation_in_progress(), "Checked by caller");
 702   ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_thread_roots);
 703   ShenandoahConcurrentEvacUpdateThreadTask task(heap->workers()->active_workers());
 704   heap->workers()->run_task(&task);
 705 }
 706 
 707 void ShenandoahConcurrentGC::op_weak_refs() {
 708   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 709   assert(heap->is_concurrent_weak_root_in_progress(), "Only during this phase");
 710   // Concurrent weak refs processing
 711   ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_refs);
 712   ShenandoahBreakpoint::at_after_reference_processing_started();
 713   _generation->ref_processor()->process_references(ShenandoahPhaseTimings::conc_weak_refs, heap->workers(), true /* concurrent */);
 714 }
 715 
 716 class ShenandoahEvacUpdateCleanupOopStorageRootsClosure : public BasicOopIterateClosure {
 717 private:
 718   ShenandoahHeap* const _heap;
 719   ShenandoahMarkingContext* const _mark_context;
 720   bool  _evac_in_progress;
 721   Thread* const _thread;
 722 
 723 public:
 724   ShenandoahEvacUpdateCleanupOopStorageRootsClosure();
 725   void do_oop(oop* p);
 726   void do_oop(narrowOop* p);
 727 };
 728 
 729 ShenandoahEvacUpdateCleanupOopStorageRootsClosure::ShenandoahEvacUpdateCleanupOopStorageRootsClosure() :
 730   _heap(ShenandoahHeap::heap()),
 731   _mark_context(ShenandoahHeap::heap()->marking_context()),
 732   _evac_in_progress(ShenandoahHeap::heap()->is_evacuation_in_progress()),
 733   _thread(Thread::current()) {
 734 }
 735 
 736 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(oop* p) {
 737   const oop obj = RawAccess<>::oop_load(p);
 738   if (!CompressedOops::is_null(obj)) {
 739     if (!_mark_context->is_marked(obj)) {
 740       if (_heap->is_in_active_generation(obj)) {
 741         // TODO: This worries me. Here we are asserting that an unmarked from-space object is 'correct'.
 742         // Normally, I would call this a bogus assert, but there seems to be a legitimate use-case for
 743         // accessing from-space objects during class unloading. However, the from-space object may have
 744         // been "filled". We've made no effort to prevent old generation classes being unloaded by young
 745         // gen (and vice-versa).
 746         shenandoah_assert_correct(p, obj);
 747         ShenandoahHeap::atomic_clear_oop(p, obj);
 748       }
 749     } else if (_evac_in_progress && _heap->in_collection_set(obj)) {
 750       oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
 751       if (resolved == obj) {
 752         resolved = _heap->evacuate_object(obj, _thread);
 753       }
 754       ShenandoahHeap::atomic_update_oop(resolved, p, obj);
 755       assert(_heap->cancelled_gc() ||
 756              _mark_context->is_marked(resolved) && !_heap->in_collection_set(resolved),
 757              "Sanity");
 758     }
 759   }
 760 }
 761 
 762 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(narrowOop* p) {
 763   ShouldNotReachHere();
 764 }
 765 
 766 class ShenandoahIsCLDAliveClosure : public CLDClosure {
 767 public:
 768   void do_cld(ClassLoaderData* cld) {
 769     cld->is_alive();
 770   }
 771 };
 772 
 773 class ShenandoahIsNMethodAliveClosure: public NMethodClosure {
 774 public:
 775   void do_nmethod(nmethod* n) {
 776     n->is_unloading();
 777   }
 778 };
 779 
 780 // This task not only evacuates/updates marked weak roots, but also "NULL"
 781 // dead weak roots.
 782 class ShenandoahConcurrentWeakRootsEvacUpdateTask : public AbstractGangTask {
 783 private:
 784   ShenandoahVMWeakRoots<true /*concurrent*/> _vm_roots;
 785 
 786   // Roots related to concurrent class unloading
 787   ShenandoahClassLoaderDataRoots<true /* concurrent */, true /* single thread*/>
 788                                              _cld_roots;
 789   ShenandoahConcurrentNMethodIterator        _nmethod_itr;
 790   ShenandoahPhaseTimings::Phase              _phase;
 791 
 792 public:
 793   ShenandoahConcurrentWeakRootsEvacUpdateTask(ShenandoahPhaseTimings::Phase phase) :
 794     AbstractGangTask("Shenandoah Evacuate/Update Concurrent Weak Roots"),
 795     _vm_roots(phase),
 796     _cld_roots(phase, ShenandoahHeap::heap()->workers()->active_workers()),
 797     _nmethod_itr(ShenandoahCodeRoots::table()),
 798     _phase(phase) {
 799     if (ShenandoahHeap::heap()->unload_classes()) {
 800       MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 801       _nmethod_itr.nmethods_do_begin();
 802     }
 803   }
 804 
 805   ~ShenandoahConcurrentWeakRootsEvacUpdateTask() {
 806     if (ShenandoahHeap::heap()->unload_classes()) {
 807       MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 808       _nmethod_itr.nmethods_do_end();
 809     }
 810     // Notify runtime data structures of potentially dead oops
 811     _vm_roots.report_num_dead();
 812   }
 813 
 814   void work(uint worker_id) {
 815     ShenandoahConcurrentWorkerSession worker_session(worker_id);
 816     {
 817       ShenandoahEvacOOMScope oom;
 818       // jni_roots and weak_roots are OopStorage backed roots, concurrent iteration
 819       // may race against OopStorage::release() calls.
 820       ShenandoahEvacUpdateCleanupOopStorageRootsClosure cl;
 821       _vm_roots.oops_do(&cl, worker_id);
 822     }
 823 
 824     // If we are going to perform concurrent class unloading later on, we need to
 825     // cleanup the weak oops in CLD and determinate nmethod's unloading state, so that we
 826     // can cleanup immediate garbage sooner.
 827     if (ShenandoahHeap::heap()->unload_classes()) {
 828       // Applies ShenandoahIsCLDAlive closure to CLDs, native barrier will either NULL the
 829       // CLD's holder or evacuate it.
 830       {
 831         ShenandoahIsCLDAliveClosure is_cld_alive;
 832         _cld_roots.cld_do(&is_cld_alive, worker_id);
 833       }
 834 
 835       // Applies ShenandoahIsNMethodAliveClosure to registered nmethods.
 836       // The closure calls nmethod->is_unloading(). The is_unloading
 837       // state is cached, therefore, during concurrent class unloading phase,
 838       // we will not touch the metadata of unloading nmethods
 839       {
 840         ShenandoahWorkerTimingsTracker timer(_phase, ShenandoahPhaseTimings::CodeCacheRoots, worker_id);
 841         ShenandoahIsNMethodAliveClosure is_nmethod_alive;
 842         _nmethod_itr.nmethods_do(&is_nmethod_alive);
 843       }
 844     }
 845   }
 846 };
 847 
 848 void ShenandoahConcurrentGC::op_weak_roots() {
 849   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 850   assert(heap->is_concurrent_weak_root_in_progress(), "Only during this phase");
 851   // Concurrent weak root processing
 852   {
 853     ShenandoahTimingsTracker t(ShenandoahPhaseTimings::conc_weak_roots_work);
 854     ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_roots_work);
 855     ShenandoahConcurrentWeakRootsEvacUpdateTask task(ShenandoahPhaseTimings::conc_weak_roots_work);
 856     heap->workers()->run_task(&task);
 857   }
 858 
 859   // Perform handshake to flush out dead oops
 860   {
 861     ShenandoahTimingsTracker t(ShenandoahPhaseTimings::conc_weak_roots_rendezvous);
 862     heap->rendezvous_threads();
 863   }
 864 }
 865 
 866 void ShenandoahConcurrentGC::op_class_unloading() {
 867   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 868   assert (heap->is_concurrent_weak_root_in_progress() &&
 869           heap->unload_classes(),
 870           "Checked by caller");
 871   heap->do_class_unloading();
 872 }
 873 
 874 class ShenandoahEvacUpdateCodeCacheClosure : public NMethodClosure {
 875 private:
 876   BarrierSetNMethod* const                  _bs;
 877   ShenandoahEvacuateUpdateMetadataClosure<> _cl;
 878 
 879 public:
 880   ShenandoahEvacUpdateCodeCacheClosure() :
 881     _bs(BarrierSet::barrier_set()->barrier_set_nmethod()),
 882     _cl() {
 883   }
 884 
 885   void do_nmethod(nmethod* n) {
 886     ShenandoahNMethod* data = ShenandoahNMethod::gc_data(n);
 887     ShenandoahReentrantLocker locker(data->lock());
 888     // Setup EvacOOM scope below reentrant lock to avoid deadlock with
 889     // nmethod_entry_barrier
 890     ShenandoahEvacOOMScope oom;
 891     data->oops_do(&_cl, true/*fix relocation*/);
 892     _bs->disarm(n);
 893   }
 894 };
 895 
 896 class ShenandoahConcurrentRootsEvacUpdateTask : public AbstractGangTask {
 897 private:
 898   ShenandoahPhaseTimings::Phase                 _phase;
 899   ShenandoahVMRoots<true /*concurrent*/>        _vm_roots;
 900   ShenandoahClassLoaderDataRoots<true /*concurrent*/, false /*single threaded*/> _cld_roots;
 901   ShenandoahConcurrentNMethodIterator           _nmethod_itr;
 902 
 903 public:
 904   ShenandoahConcurrentRootsEvacUpdateTask(ShenandoahPhaseTimings::Phase phase) :
 905     AbstractGangTask("Shenandoah Evacuate/Update Concurrent Strong Roots"),
 906     _phase(phase),
 907     _vm_roots(phase),
 908     _cld_roots(phase, ShenandoahHeap::heap()->workers()->active_workers()),
 909     _nmethod_itr(ShenandoahCodeRoots::table()) {
 910     if (!ShenandoahHeap::heap()->unload_classes()) {
 911       MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 912       _nmethod_itr.nmethods_do_begin();
 913     }
 914   }
 915 
 916   ~ShenandoahConcurrentRootsEvacUpdateTask() {
 917     if (!ShenandoahHeap::heap()->unload_classes()) {
 918       MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 919       _nmethod_itr.nmethods_do_end();
 920     }
 921   }
 922 
 923   void work(uint worker_id) {
 924     ShenandoahConcurrentWorkerSession worker_session(worker_id);
 925     {
 926       ShenandoahEvacOOMScope oom;
 927       {
 928         // vm_roots and weak_roots are OopStorage backed roots, concurrent iteration
 929         // may race against OopStorage::release() calls.
 930         ShenandoahContextEvacuateUpdateRootsClosure cl;
 931         _vm_roots.oops_do<ShenandoahContextEvacuateUpdateRootsClosure>(&cl, worker_id);
 932       }
 933 
 934       {
 935         ShenandoahEvacuateUpdateMetadataClosure<> cl;
 936         CLDToOopClosure clds(&cl, ClassLoaderData::_claim_strong);
 937         _cld_roots.cld_do(&clds, worker_id);
 938       }
 939     }
 940 
 941     // Cannot setup ShenandoahEvacOOMScope here, due to potential deadlock with nmethod_entry_barrier.
 942     if (!ShenandoahHeap::heap()->unload_classes()) {
 943       ShenandoahWorkerTimingsTracker timer(_phase, ShenandoahPhaseTimings::CodeCacheRoots, worker_id);
 944       ShenandoahEvacUpdateCodeCacheClosure cl;
 945       _nmethod_itr.nmethods_do(&cl);
 946     }
 947   }
 948 };
 949 
 950 void ShenandoahConcurrentGC::op_strong_roots() {
 951   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 952   assert(heap->is_concurrent_strong_root_in_progress(), "Checked by caller");
 953   ShenandoahConcurrentRootsEvacUpdateTask task(ShenandoahPhaseTimings::conc_strong_roots);
 954   heap->workers()->run_task(&task);
 955   heap->set_concurrent_strong_root_in_progress(false);
 956 }
 957 
 958 void ShenandoahConcurrentGC::op_cleanup_early() {
 959   ShenandoahHeap::heap()->free_set()->recycle_trash();
 960 }
 961 
 962 void ShenandoahConcurrentGC::op_evacuate() {
 963   ShenandoahHeap::heap()->evacuate_collection_set(true /*concurrent*/);
 964 }
 965 
 966 void ShenandoahConcurrentGC::op_init_updaterefs() {
 967   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 968   heap->set_evacuation_in_progress(false);
 969   heap->set_concurrent_weak_root_in_progress(false);
 970   heap->prepare_update_heap_references(true /*concurrent*/);
 971   heap->set_update_refs_in_progress(true);
 972   if (ShenandoahVerify) {
 973     heap->verifier()->verify_before_updaterefs();
 974   }
 975   if (ShenandoahPacing) {
 976     heap->pacer()->setup_for_updaterefs();
 977   }
 978 }
 979 
 980 void ShenandoahConcurrentGC::op_updaterefs() {
 981   ShenandoahHeap::heap()->update_heap_references(true /*concurrent*/);
 982 }
 983 
 984 class ShenandoahUpdateThreadClosure : public HandshakeClosure {
 985 private:
 986   ShenandoahUpdateRefsClosure _cl;
 987 public:
 988   ShenandoahUpdateThreadClosure();
 989   void do_thread(Thread* thread);
 990 };
 991 
 992 ShenandoahUpdateThreadClosure::ShenandoahUpdateThreadClosure() :
 993   HandshakeClosure("Shenandoah Update Thread Roots") {
 994 }
 995 
 996 void ShenandoahUpdateThreadClosure::do_thread(Thread* thread) {
 997   if (thread->is_Java_thread()) {
 998     JavaThread* jt = JavaThread::cast(thread);
 999     ResourceMark rm;
1000     jt->oops_do(&_cl, NULL);
1001   }
1002 }
1003 
1004 void ShenandoahConcurrentGC::op_update_thread_roots() {
1005   ShenandoahUpdateThreadClosure cl;
1006   Handshake::execute(&cl);
1007 }
1008 
1009 void ShenandoahConcurrentGC::op_final_updaterefs() {
1010   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1011   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
1012   assert(!heap->_update_refs_iterator.has_next(), "Should have finished update references");
1013 
1014   heap->finish_concurrent_roots();
1015 
1016   // Clear cancelled GC, if set. On cancellation path, the block before would handle
1017   // everything.
1018   if (heap->cancelled_gc()) {
1019     heap->clear_cancelled_gc(true /* clear oom handler */);
1020   }
1021 
1022   // Has to be done before cset is clear
1023   if (ShenandoahVerify) {
1024     heap->verifier()->verify_roots_in_to_space();
1025   }
1026 
1027   heap->update_heap_region_states(true /*concurrent*/);
1028 
1029   if (heap->is_concurrent_old_mark_in_progress()) {
1030     // Purge the SATB buffers, transferring any valid, old pointers to the
1031     // old generation mark queue. From here on, no mutator will have access
1032     // to anything that will be trashed and recycled.
1033     heap->purge_old_satb_buffers(false /* abandon */);
1034   }
1035 
1036   heap->set_update_refs_in_progress(false);
1037   heap->set_has_forwarded_objects(false);
1038 
1039   // Aging_cycle is only relevant during evacuation cycle for individual objects and during final mark for
1040   // entire regions.  Both of these relevant operations occur before final update refs.
1041   heap->set_aging_cycle(false);
1042 
1043   if (ShenandoahVerify) {
1044     heap->verifier()->verify_after_updaterefs();
1045   }
1046 
1047   if (VerifyAfterGC) {
1048     Universe::verify();
1049   }
1050 
1051   heap->rebuild_free_set(true /*concurrent*/);
1052 }
1053 
1054 void ShenandoahConcurrentGC::op_final_roots() {
1055   ShenandoahHeap::heap()->set_concurrent_weak_root_in_progress(false);
1056 }
1057 
1058 void ShenandoahConcurrentGC::op_cleanup_complete() {
1059   ShenandoahHeap::heap()->free_set()->recycle_trash();
1060 }
1061 
1062 void ShenandoahConcurrentGC::op_global_coalesce_and_fill() {
1063   ShenandoahHeap::heap()->coalesce_and_fill_old_regions();
1064 }
1065 
1066 bool ShenandoahConcurrentGC::check_cancellation_and_abort(ShenandoahDegenPoint point) {
1067   if (ShenandoahHeap::heap()->cancelled_gc()) {
1068     _degen_point = point;
1069     return true;
1070   }
1071   return false;
1072 }
1073 
1074 void ShenandoahConcurrentGC::init_mark_event_message(char* buf, size_t len) const {
1075   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1076   assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here");
1077   if (heap->unload_classes()) {
1078     jio_snprintf(buf, len, "Pause Init Mark (%s) (unload classes)", _generation->name());
1079   } else {
1080     jio_snprintf(buf, len, "Pause Init Mark (%s)", _generation->name());
1081   }
1082 }
1083 
1084 void ShenandoahConcurrentGC::final_mark_event_message(char* buf, size_t len) const {
1085   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1086   assert(!heap->has_forwarded_objects() || heap->is_concurrent_old_mark_in_progress(),
1087          "Should not have forwarded objects during final mark (unless old gen concurrent mark is running)");
1088   if (heap->unload_classes()) {
1089     jio_snprintf(buf, len, "Pause Final Mark (%s) (unload classes)", _generation->name());
1090   } else {
1091     jio_snprintf(buf, len, "Pause Final Mark (%s)", _generation->name());
1092   }
1093 }
1094 
1095 void ShenandoahConcurrentGC::conc_mark_event_message(char* buf, size_t len) const {
1096   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1097   assert(!heap->has_forwarded_objects() || heap->is_concurrent_old_mark_in_progress(),
1098          "Should not have forwarded objects concurrent mark (unless old gen concurrent mark is running");
1099   if (heap->unload_classes()) {
1100     jio_snprintf(buf, len, "Concurrent marking (%s) (unload classes)", _generation->name());
1101   } else {
1102     jio_snprintf(buf, len, "Concurrent marking (%s)", _generation->name());
1103   }
1104 }