< prev index next >

src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp

Print this page

   1 /*
   2  * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2021, 2022, Red Hat, Inc. All rights reserved.

   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 
  28 #include "gc/shared/barrierSetNMethod.hpp"
  29 #include "gc/shared/collectorCounters.hpp"
  30 #include "gc/shared/continuationGCSupport.inline.hpp"
  31 #include "gc/shenandoah/shenandoahBreakpoint.hpp"

  32 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  33 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
  34 #include "gc/shenandoah/shenandoahFreeSet.hpp"




  35 #include "gc/shenandoah/shenandoahLock.hpp"
  36 #include "gc/shenandoah/shenandoahMark.inline.hpp"
  37 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  38 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  39 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  40 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
  41 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
  42 #include "gc/shenandoah/shenandoahStackWatermark.hpp"
  43 #include "gc/shenandoah/shenandoahUtils.hpp"
  44 #include "gc/shenandoah/shenandoahVerifier.hpp"
  45 #include "gc/shenandoah/shenandoahVMOperations.hpp"
  46 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
  47 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  48 #include "memory/allocation.hpp"
  49 #include "prims/jvmtiTagMap.hpp"
  50 #include "runtime/vmThread.hpp"
  51 #include "utilities/events.hpp"
  52 
  53 // Breakpoint support
  54 class ShenandoahBreakpointGCScope : public StackObj {
  55 private:
  56   const GCCause::Cause _cause;
  57 public:
  58   ShenandoahBreakpointGCScope(GCCause::Cause cause) : _cause(cause) {

  69   }
  70 };
  71 
  72 class ShenandoahBreakpointMarkScope : public StackObj {
  73 private:
  74   const GCCause::Cause _cause;
  75 public:
  76   ShenandoahBreakpointMarkScope(GCCause::Cause cause) : _cause(cause) {
  77     if (_cause == GCCause::_wb_breakpoint) {
  78       ShenandoahBreakpoint::at_after_marking_started();
  79     }
  80   }
  81 
  82   ~ShenandoahBreakpointMarkScope() {
  83     if (_cause == GCCause::_wb_breakpoint) {
  84       ShenandoahBreakpoint::at_before_marking_completed();
  85     }
  86   }
  87 };
  88 
  89 ShenandoahConcurrentGC::ShenandoahConcurrentGC() :
  90   _mark(),
  91   _degen_point(ShenandoahDegenPoint::_degenerated_unset) {



  92 }
  93 
  94 ShenandoahGC::ShenandoahDegenPoint ShenandoahConcurrentGC::degen_point() const {
  95   return _degen_point;
  96 }
  97 
  98 void ShenandoahConcurrentGC::cancel() {
  99   ShenandoahConcurrentMark::cancel();






 100 }
 101 
 102 bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) {
 103   ShenandoahHeap* const heap = ShenandoahHeap::heap();

 104   ShenandoahBreakpointGCScope breakpoint_gc_scope(cause);
 105 
 106   // Reset for upcoming marking
 107   entry_reset();
 108 
 109   // Start initial mark under STW
 110   vmop_entry_init_mark();
 111 
 112   {
 113     ShenandoahBreakpointMarkScope breakpoint_mark_scope(cause);









 114     // Concurrent mark roots
 115     entry_mark_roots();
 116     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_outside_cycle)) return false;


 117 
 118     // Continue concurrent mark
 119     entry_mark();
 120     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_mark)) return false;


 121   }
 122 
 123   // Complete marking under STW, and start evacuation
 124   vmop_entry_final_mark();
 125 












 126   // Concurrent stack processing
 127   if (heap->is_evacuation_in_progress()) {
 128     entry_thread_roots();
 129   }
 130 
 131   // Process weak roots that might still point to regions that would be broken by cleanup
 132   if (heap->is_concurrent_weak_root_in_progress()) {
 133     entry_weak_refs();
 134     entry_weak_roots();





 135   }
 136 
 137   // Final mark might have reclaimed some immediate garbage, kick cleanup to reclaim
 138   // the space. This would be the last action if there is nothing to evacuate.

 139   entry_cleanup_early();
 140 
 141   {
 142     ShenandoahHeapLocker locker(heap->lock());
 143     heap->free_set()->log_status();
 144   }
 145 
 146   // Perform concurrent class unloading
 147   if (heap->unload_classes() &&
 148       heap->is_concurrent_weak_root_in_progress()) {
 149     entry_class_unloading();
 150   }
 151 
 152   // Processing strong roots
 153   // This may be skipped if there is nothing to update/evacuate.
 154   // If so, strong_root_in_progress would be unset.
 155   if (heap->is_concurrent_strong_root_in_progress()) {
 156     entry_strong_roots();
 157   }
 158 
 159   // Continue the cycle with evacuation and optional update-refs.
 160   // This may be skipped if there is nothing to evacuate.
 161   // If so, evac_in_progress would be unset by collection set preparation code.
 162   if (heap->is_evacuation_in_progress()) {
 163     // Concurrently evacuate
 164     entry_evacuate();
 165     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_evac)) return false;




 166 
 167     // Perform update-refs phase.
 168     vmop_entry_init_updaterefs();
 169     entry_updaterefs();
 170     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_updaterefs)) return false;





 171 
 172     // Concurrent update thread roots
 173     entry_update_thread_roots();
 174     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_updaterefs)) return false;


 175 
 176     vmop_entry_final_updaterefs();
 177 
 178     // Update references freed up collection set, kick the cleanup to reclaim the space.
 179     entry_cleanup_complete();
 180   } else {
 181     vmop_entry_final_roots();








 182   }
 183 











 184   return true;
 185 }
 186 














































 187 void ShenandoahConcurrentGC::vmop_entry_init_mark() {
 188   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 189   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 190   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_mark_gross);
 191 
 192   heap->try_inject_alloc_failure();
 193   VM_ShenandoahInitMark op(this);
 194   VMThread::execute(&op); // jump to entry_init_mark() under safepoint
 195 }
 196 
 197 void ShenandoahConcurrentGC::vmop_entry_final_mark() {
 198   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 199   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 200   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_mark_gross);
 201 
 202   heap->try_inject_alloc_failure();
 203   VM_ShenandoahFinalMarkStartEvac op(this);
 204   VMThread::execute(&op); // jump to entry_final_mark under safepoint
 205 }
 206 
 207 void ShenandoahConcurrentGC::vmop_entry_init_updaterefs() {
 208   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 209   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 210   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_update_refs_gross);
 211 
 212   heap->try_inject_alloc_failure();
 213   VM_ShenandoahInitUpdateRefs op(this);
 214   VMThread::execute(&op);
 215 }
 216 
 217 void ShenandoahConcurrentGC::vmop_entry_final_updaterefs() {
 218   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 219   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 220   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_update_refs_gross);
 221 
 222   heap->try_inject_alloc_failure();
 223   VM_ShenandoahFinalUpdateRefs op(this);
 224   VMThread::execute(&op);
 225 }
 226 
 227 void ShenandoahConcurrentGC::vmop_entry_final_roots() {
 228   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 229   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 230   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_roots_gross);
 231 
 232   // This phase does not use workers, no need for setup
 233   heap->try_inject_alloc_failure();
 234   VM_ShenandoahFinalRoots op(this);
 235   VMThread::execute(&op);
 236 }
 237 
 238 void ShenandoahConcurrentGC::entry_init_mark() {
 239   const char* msg = init_mark_event_message();
 240   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::init_mark);
 241   EventMark em("%s", msg);
 242 
 243   ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
 244                               ShenandoahWorkerPolicy::calc_workers_for_init_marking(),
 245                               "init marking");
 246 
 247   op_init_mark();
 248 }
 249 
 250 void ShenandoahConcurrentGC::entry_final_mark() {
 251   const char* msg = final_mark_event_message();
 252   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_mark);
 253   EventMark em("%s", msg);
 254 
 255   ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
 256                               ShenandoahWorkerPolicy::calc_workers_for_final_marking(),
 257                               "final marking");
 258 
 259   op_final_mark();
 260 }
 261 
 262 void ShenandoahConcurrentGC::entry_init_updaterefs() {
 263   static const char* msg = "Pause Init Update Refs";
 264   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::init_update_refs);
 265   EventMark em("%s", msg);
 266 
 267   // No workers used in this phase, no setup required
 268   op_init_updaterefs();
 269 }
 270 
 271 void ShenandoahConcurrentGC::entry_final_updaterefs() {
 272   static const char* msg = "Pause Final Update Refs";
 273   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_update_refs);
 274   EventMark em("%s", msg);
 275 
 276   ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
 277                               ShenandoahWorkerPolicy::calc_workers_for_final_update_ref(),
 278                               "final reference update");
 279 
 280   op_final_updaterefs();
 281 }
 282 
 283 void ShenandoahConcurrentGC::entry_final_roots() {
 284   static const char* msg = "Pause Final Roots";
 285   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_roots);
 286   EventMark em("%s", msg);
 287 
 288   op_final_roots();
 289 }
 290 
 291 void ShenandoahConcurrentGC::entry_reset() {
 292   ShenandoahHeap* const heap = ShenandoahHeap::heap();


 293   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 294   static const char* msg = "Concurrent reset";
 295   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_reset);
 296   EventMark em("%s", msg);








 297 
 298   ShenandoahWorkerScope scope(heap->workers(),
 299                               ShenandoahWorkerPolicy::calc_workers_for_conc_reset(),
 300                               "concurrent reset");




 301 
 302   heap->try_inject_alloc_failure();
 303   op_reset();





 304 }
 305 
 306 void ShenandoahConcurrentGC::entry_mark_roots() {
 307   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 308   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 309   const char* msg = "Concurrent marking roots";
 310   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_mark_roots);
 311   EventMark em("%s", msg);
 312 
 313   ShenandoahWorkerScope scope(heap->workers(),
 314                               ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
 315                               "concurrent marking roots");
 316 
 317   heap->try_inject_alloc_failure();
 318   op_mark_roots();
 319 }
 320 
 321 void ShenandoahConcurrentGC::entry_mark() {
 322   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 323   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());

 332   heap->try_inject_alloc_failure();
 333   op_mark();
 334 }
 335 
 336 void ShenandoahConcurrentGC::entry_thread_roots() {
 337   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 338   static const char* msg = "Concurrent thread roots";
 339   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_thread_roots);
 340   EventMark em("%s", msg);
 341 
 342   ShenandoahWorkerScope scope(heap->workers(),
 343                               ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
 344                               msg);
 345 
 346   heap->try_inject_alloc_failure();
 347   op_thread_roots();
 348 }
 349 
 350 void ShenandoahConcurrentGC::entry_weak_refs() {
 351   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 352   static const char* msg = "Concurrent weak references";
 353   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_weak_refs);
 354   EventMark em("%s", msg);
 355 
 356   ShenandoahWorkerScope scope(heap->workers(),
 357                               ShenandoahWorkerPolicy::calc_workers_for_conc_refs_processing(),
 358                               "concurrent weak references");
 359 
 360   heap->try_inject_alloc_failure();
 361   op_weak_refs();
 362 }
 363 
 364 void ShenandoahConcurrentGC::entry_weak_roots() {
 365   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 366   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 367   static const char* msg = "Concurrent weak roots";
 368   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_weak_roots);
 369   EventMark em("%s", msg);
 370 
 371   ShenandoahWorkerScope scope(heap->workers(),
 372                               ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
 373                               "concurrent weak root");
 374 
 375   heap->try_inject_alloc_failure();
 376   op_weak_roots();
 377 }
 378 
 379 void ShenandoahConcurrentGC::entry_class_unloading() {
 380   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 381   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 382   static const char* msg = "Concurrent class unloading";
 383   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_class_unload);
 384   EventMark em("%s", msg);
 385 
 386   ShenandoahWorkerScope scope(heap->workers(),
 387                               ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),

 394 void ShenandoahConcurrentGC::entry_strong_roots() {
 395   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 396   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 397   static const char* msg = "Concurrent strong roots";
 398   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_strong_roots);
 399   EventMark em("%s", msg);
 400 
 401   ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_strong_roots);
 402 
 403   ShenandoahWorkerScope scope(heap->workers(),
 404                               ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
 405                               "concurrent strong root");
 406 
 407   heap->try_inject_alloc_failure();
 408   op_strong_roots();
 409 }
 410 
 411 void ShenandoahConcurrentGC::entry_cleanup_early() {
 412   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 413   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 414   static const char* msg = "Concurrent cleanup";
 415   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_cleanup_early, true /* log_heap_usage */);
 416   EventMark em("%s", msg);
 417 
 418   // This phase does not use workers, no need for setup
 419   heap->try_inject_alloc_failure();
 420   op_cleanup_early();
 421 }
 422 
 423 void ShenandoahConcurrentGC::entry_evacuate() {
 424   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 425   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 426 
 427   static const char* msg = "Concurrent evacuation";
 428   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_evac);
 429   EventMark em("%s", msg);
 430 
 431   ShenandoahWorkerScope scope(heap->workers(),
 432                               ShenandoahWorkerPolicy::calc_workers_for_conc_evac(),
 433                               "concurrent evacuation");
 434 
 435   heap->try_inject_alloc_failure();
 436   op_evacuate();
 437 }
 438 










 439 void ShenandoahConcurrentGC::entry_update_thread_roots() {
 440   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 441   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 442 
 443   static const char* msg = "Concurrent update thread roots";
 444   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_update_thread_roots);
 445   EventMark em("%s", msg);
 446 
 447   // No workers used in this phase, no setup required
 448   heap->try_inject_alloc_failure();
 449   op_update_thread_roots();
 450 }
 451 
 452 void ShenandoahConcurrentGC::entry_updaterefs() {
 453   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 454   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 455   static const char* msg = "Concurrent update references";
 456   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_update_refs);
 457   EventMark em("%s", msg);
 458 
 459   ShenandoahWorkerScope scope(heap->workers(),
 460                               ShenandoahWorkerPolicy::calc_workers_for_conc_update_ref(),
 461                               "concurrent reference update");
 462 
 463   heap->try_inject_alloc_failure();
 464   op_updaterefs();
 465 }
 466 
 467 void ShenandoahConcurrentGC::entry_cleanup_complete() {
 468   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 469   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 470   static const char* msg = "Concurrent cleanup";
 471   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_cleanup_complete, true /* log_heap_usage */);
 472   EventMark em("%s", msg);
 473 
 474   // This phase does not use workers, no need for setup
 475   heap->try_inject_alloc_failure();
 476   op_cleanup_complete();
 477 }
 478 










 479 void ShenandoahConcurrentGC::op_reset() {
 480   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 481   if (ShenandoahPacing) {
 482     heap->pacer()->setup_for_reset();
 483   }
 484 
 485   heap->prepare_gc();






 486 }
 487 
 488 class ShenandoahInitMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
 489 private:
 490   ShenandoahMarkingContext* const _ctx;
 491 public:
 492   ShenandoahInitMarkUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
 493 
 494   void heap_region_do(ShenandoahHeapRegion* r) {
 495     assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index());
 496     if (r->is_active()) {
 497       // Check if region needs updating its TAMS. We have updated it already during concurrent
 498       // reset, so it is very likely we don't need to do another write here.

 499       if (_ctx->top_at_mark_start(r) != r->top()) {
 500         _ctx->capture_top_at_mark_start(r);
 501       }
 502     } else {
 503       assert(_ctx->top_at_mark_start(r) == r->top(),
 504              "Region " SIZE_FORMAT " should already have correct TAMS", r->index());
 505     }
 506   }
 507 
 508   bool is_thread_safe() { return true; }
 509 };
 510 
 511 void ShenandoahConcurrentGC::start_mark() {
 512   _mark.start_mark();
 513 }
 514 
 515 void ShenandoahConcurrentGC::op_init_mark() {
 516   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 517   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
 518   assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
 519 
 520   assert(heap->marking_context()->is_bitmap_clear(), "need clear marking bitmap");
 521   assert(!heap->marking_context()->is_complete(), "should not be complete");
 522   assert(!heap->has_forwarded_objects(), "No forwarded objects on this path");
 523 



















 524   if (ShenandoahVerify) {

 525     heap->verifier()->verify_before_concmark();
 526   }
 527 
 528   if (VerifyBeforeGC) {
 529     Universe::verify();
 530   }
 531 
 532   heap->set_concurrent_mark_in_progress(true);
 533 
 534   start_mark();
 535 
 536   {


 537     ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states);
 538     ShenandoahInitMarkUpdateRegionStateClosure cl;
 539     heap->parallel_heap_region_iterate(&cl);






 540   }
 541 
 542   // Weak reference processing
 543   ShenandoahReferenceProcessor* rp = heap->ref_processor();
 544   rp->reset_thread_locals();
 545   rp->set_soft_reference_policy(heap->soft_ref_policy()->should_clear_all_soft_refs());
 546 
 547   // Make above changes visible to worker threads
 548   OrderAccess::fence();
 549 
 550   // Arm nmethods for concurrent mark
 551   ShenandoahCodeRoots::arm_nmethods_for_mark();
 552 
 553   ShenandoahStackWatermark::change_epoch_id();
 554   if (ShenandoahPacing) {
 555     heap->pacer()->setup_for_mark();
 556   }





 557 }
 558 
 559 void ShenandoahConcurrentGC::op_mark_roots() {
 560   _mark.mark_concurrent_roots();
 561 }
 562 
 563 void ShenandoahConcurrentGC::op_mark() {
 564   _mark.concurrent_mark();
 565 }
 566 
 567 void ShenandoahConcurrentGC::op_final_mark() {
 568   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 569   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
 570   assert(!heap->has_forwarded_objects(), "No forwarded objects on this path");
 571 
 572   if (ShenandoahVerify) {
 573     heap->verifier()->verify_roots_no_forwarded();
 574   }
 575 
 576   if (!heap->cancelled_gc()) {
 577     _mark.finish_mark();
 578     assert(!heap->cancelled_gc(), "STW mark cannot OOM");
 579 
 580     // Notify JVMTI that the tagmap table will need cleaning.
 581     JvmtiTagMap::set_needs_cleaning();
 582 
 583     heap->prepare_regions_and_collection_set(true /*concurrent*/);



 584 
 585     // Has to be done after cset selection
 586     heap->prepare_concurrent_roots();
 587 
 588     if (!heap->collection_set()->is_empty()) {







 589       if (ShenandoahVerify) {

 590         heap->verifier()->verify_before_evacuation();
 591       }
 592 
 593       heap->set_evacuation_in_progress(true);
 594       // From here on, we need to update references.
 595       heap->set_has_forwarded_objects(true);
 596 
 597       // Verify before arming for concurrent processing.
 598       // Otherwise, verification can trigger stack processing.
 599       if (ShenandoahVerify) {
 600         heap->verifier()->verify_during_evacuation();
 601       }
 602 
 603       // Arm nmethods/stack for concurrent processing
 604       ShenandoahCodeRoots::arm_nmethods_for_evac();
 605       ShenandoahStackWatermark::change_epoch_id();
 606 
 607       if (ShenandoahPacing) {
 608         heap->pacer()->setup_for_evac();
 609       }
 610     } else {
 611       if (ShenandoahVerify) {
 612         heap->verifier()->verify_after_concmark();
 613       }
 614 
 615       if (VerifyAfterGC) {
 616         Universe::verify();

 617       }
 618     }
 619   }





 620 }
 621 





 622 class ShenandoahConcurrentEvacThreadClosure : public ThreadClosure {
 623 private:
 624   OopClosure* const _oops;
 625 
 626 public:
 627   ShenandoahConcurrentEvacThreadClosure(OopClosure* oops);
 628   void do_thread(Thread* thread);
 629 };
 630 
 631 ShenandoahConcurrentEvacThreadClosure::ShenandoahConcurrentEvacThreadClosure(OopClosure* oops) :
 632   _oops(oops) {
 633 }
 634 
 635 void ShenandoahConcurrentEvacThreadClosure::do_thread(Thread* thread) {
 636   JavaThread* const jt = JavaThread::cast(thread);
 637   StackWatermarkSet::finish_processing(jt, _oops, StackWatermarkKind::gc);
 638 }




 639 

 640 class ShenandoahConcurrentEvacUpdateThreadTask : public WorkerTask {
 641 private:
 642   ShenandoahJavaThreadsIterator _java_threads;
 643 
 644 public:
 645   ShenandoahConcurrentEvacUpdateThreadTask(uint n_workers) :
 646     WorkerTask("Shenandoah Evacuate/Update Concurrent Thread Roots"),
 647     _java_threads(ShenandoahPhaseTimings::conc_thread_roots, n_workers) {
 648   }
 649 
 650   void work(uint worker_id) {





 651     // ShenandoahEvacOOMScope has to be setup by ShenandoahContextEvacuateUpdateRootsClosure.
 652     // Otherwise, may deadlock with watermark lock
 653     ShenandoahContextEvacuateUpdateRootsClosure oops_cl;
 654     ShenandoahConcurrentEvacThreadClosure thr_cl(&oops_cl);
 655     _java_threads.threads_do(&thr_cl, worker_id);
 656   }
 657 };
 658 
 659 void ShenandoahConcurrentGC::op_thread_roots() {
 660   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 661   assert(heap->is_evacuation_in_progress(), "Checked by caller");
 662   ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_thread_roots);
 663   ShenandoahConcurrentEvacUpdateThreadTask task(heap->workers()->active_workers());
 664   heap->workers()->run_task(&task);





 665 }
 666 
 667 void ShenandoahConcurrentGC::op_weak_refs() {
 668   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 669   assert(heap->is_concurrent_weak_root_in_progress(), "Only during this phase");
 670   // Concurrent weak refs processing
 671   ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_refs);
 672   if (heap->gc_cause() == GCCause::_wb_breakpoint) {
 673     ShenandoahBreakpoint::at_after_reference_processing_started();
 674   }
 675   heap->ref_processor()->process_references(ShenandoahPhaseTimings::conc_weak_refs, heap->workers(), true /* concurrent */);
 676 }
 677 
 678 class ShenandoahEvacUpdateCleanupOopStorageRootsClosure : public BasicOopIterateClosure {
 679 private:
 680   ShenandoahHeap* const _heap;
 681   ShenandoahMarkingContext* const _mark_context;
 682   bool  _evac_in_progress;
 683   Thread* const _thread;
 684 
 685 public:
 686   ShenandoahEvacUpdateCleanupOopStorageRootsClosure();
 687   void do_oop(oop* p);
 688   void do_oop(narrowOop* p);
 689 };
 690 
 691 ShenandoahEvacUpdateCleanupOopStorageRootsClosure::ShenandoahEvacUpdateCleanupOopStorageRootsClosure() :
 692   _heap(ShenandoahHeap::heap()),
 693   _mark_context(ShenandoahHeap::heap()->marking_context()),
 694   _evac_in_progress(ShenandoahHeap::heap()->is_evacuation_in_progress()),
 695   _thread(Thread::current()) {
 696 }
 697 
 698 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(oop* p) {
 699   const oop obj = RawAccess<>::oop_load(p);
 700   if (!CompressedOops::is_null(obj)) {
 701     if (!_mark_context->is_marked(obj)) {
 702       shenandoah_assert_correct(p, obj);
 703       ShenandoahHeap::atomic_clear_oop(p, obj);



 704     } else if (_evac_in_progress && _heap->in_collection_set(obj)) {
 705       oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
 706       if (resolved == obj) {
 707         resolved = _heap->evacuate_object(obj, _thread);
 708       }

 709       ShenandoahHeap::atomic_update_oop(resolved, p, obj);
 710       assert(_heap->cancelled_gc() ||
 711              _mark_context->is_marked(resolved) && !_heap->in_collection_set(resolved),
 712              "Sanity");
 713     }
 714   }
 715 }
 716 
 717 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(narrowOop* p) {
 718   ShouldNotReachHere();
 719 }
 720 
 721 class ShenandoahIsCLDAliveClosure : public CLDClosure {
 722 public:
 723   void do_cld(ClassLoaderData* cld) {
 724     cld->is_alive();
 725   }
 726 };
 727 
 728 class ShenandoahIsNMethodAliveClosure: public NMethodClosure {
 729 public:
 730   void do_nmethod(nmethod* n) {
 731     n->is_unloading();
 732   }

 752     _nmethod_itr(ShenandoahCodeRoots::table()),
 753     _phase(phase) {}
 754 
 755   ~ShenandoahConcurrentWeakRootsEvacUpdateTask() {
 756     // Notify runtime data structures of potentially dead oops
 757     _vm_roots.report_num_dead();
 758   }
 759 
 760   void work(uint worker_id) {
 761     ShenandoahConcurrentWorkerSession worker_session(worker_id);
 762     ShenandoahSuspendibleThreadSetJoiner sts_join;
 763     {
 764       ShenandoahEvacOOMScope oom;
 765       // jni_roots and weak_roots are OopStorage backed roots, concurrent iteration
 766       // may race against OopStorage::release() calls.
 767       ShenandoahEvacUpdateCleanupOopStorageRootsClosure cl;
 768       _vm_roots.oops_do(&cl, worker_id);
 769     }
 770 
 771     // If we are going to perform concurrent class unloading later on, we need to
 772     // cleanup the weak oops in CLD and determinate nmethod's unloading state, so that we
 773     // can cleanup immediate garbage sooner.
 774     if (ShenandoahHeap::heap()->unload_classes()) {
 775       // Applies ShenandoahIsCLDAlive closure to CLDs, native barrier will either null the
 776       // CLD's holder or evacuate it.
 777       {
 778         ShenandoahIsCLDAliveClosure is_cld_alive;
 779         _cld_roots.cld_do(&is_cld_alive, worker_id);
 780       }
 781 
 782       // Applies ShenandoahIsNMethodAliveClosure to registered nmethods.
 783       // The closure calls nmethod->is_unloading(). The is_unloading
 784       // state is cached, therefore, during concurrent class unloading phase,
 785       // we will not touch the metadata of unloading nmethods
 786       {
 787         ShenandoahWorkerTimingsTracker timer(_phase, ShenandoahPhaseTimings::CodeCacheRoots, worker_id);
 788         ShenandoahIsNMethodAliveClosure is_nmethod_alive;
 789         _nmethod_itr.nmethods_do(&is_nmethod_alive);
 790       }
 791     }
 792   }
 793 };
 794 
 795 void ShenandoahConcurrentGC::op_weak_roots() {
 796   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 797   assert(heap->is_concurrent_weak_root_in_progress(), "Only during this phase");
 798   // Concurrent weak root processing
 799   {

 800     ShenandoahTimingsTracker t(ShenandoahPhaseTimings::conc_weak_roots_work);
 801     ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_roots_work);
 802     ShenandoahConcurrentWeakRootsEvacUpdateTask task(ShenandoahPhaseTimings::conc_weak_roots_work);
 803     heap->workers()->run_task(&task);
 804   }
 805 
 806   // Perform handshake to flush out dead oops
 807   {







 808     ShenandoahTimingsTracker t(ShenandoahPhaseTimings::conc_weak_roots_rendezvous);
 809     heap->rendezvous_threads();
 810   }
 811 }
 812 
 813 void ShenandoahConcurrentGC::op_class_unloading() {
 814   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 815   assert (heap->is_concurrent_weak_root_in_progress() &&
 816           heap->unload_classes(),
 817           "Checked by caller");
 818   heap->do_class_unloading();
 819 }
 820 
 821 class ShenandoahEvacUpdateCodeCacheClosure : public NMethodClosure {
 822 private:
 823   BarrierSetNMethod* const                  _bs;
 824   ShenandoahEvacuateUpdateMetadataClosure   _cl;
 825 
 826 public:
 827   ShenandoahEvacUpdateCodeCacheClosure() :

 875     }
 876 
 877     // Cannot setup ShenandoahEvacOOMScope here, due to potential deadlock with nmethod_entry_barrier.
 878     if (!ShenandoahHeap::heap()->unload_classes()) {
 879       ShenandoahWorkerTimingsTracker timer(_phase, ShenandoahPhaseTimings::CodeCacheRoots, worker_id);
 880       ShenandoahEvacUpdateCodeCacheClosure cl;
 881       _nmethod_itr.nmethods_do(&cl);
 882     }
 883   }
 884 };
 885 
 886 void ShenandoahConcurrentGC::op_strong_roots() {
 887   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 888   assert(heap->is_concurrent_strong_root_in_progress(), "Checked by caller");
 889   ShenandoahConcurrentRootsEvacUpdateTask task(ShenandoahPhaseTimings::conc_strong_roots);
 890   heap->workers()->run_task(&task);
 891   heap->set_concurrent_strong_root_in_progress(false);
 892 }
 893 
 894 void ShenandoahConcurrentGC::op_cleanup_early() {
 895   ShenandoahHeap::heap()->free_set()->recycle_trash();



 896 }
 897 
 898 void ShenandoahConcurrentGC::op_evacuate() {
 899   ShenandoahHeap::heap()->evacuate_collection_set(true /*concurrent*/);
 900 }
 901 
 902 void ShenandoahConcurrentGC::op_init_updaterefs() {
 903   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 904   heap->set_evacuation_in_progress(false);
 905   heap->set_concurrent_weak_root_in_progress(false);
 906   heap->prepare_update_heap_references(true /*concurrent*/);
 907   heap->set_update_refs_in_progress(true);
 908 
 909   if (ShenandoahPacing) {
 910     heap->pacer()->setup_for_updaterefs();
 911   }
 912 }
 913 
 914 void ShenandoahConcurrentGC::op_updaterefs() {
 915   ShenandoahHeap::heap()->update_heap_references(true /*concurrent*/);
 916 }
 917 
 918 class ShenandoahUpdateThreadClosure : public HandshakeClosure {
 919 private:
 920   ShenandoahUpdateRefsClosure _cl;
 921 public:
 922   ShenandoahUpdateThreadClosure();
 923   void do_thread(Thread* thread);
 924 };
 925 
 926 ShenandoahUpdateThreadClosure::ShenandoahUpdateThreadClosure() :
 927   HandshakeClosure("Shenandoah Update Thread Roots") {
 928 }
 929 
 930 void ShenandoahUpdateThreadClosure::do_thread(Thread* thread) {
 931   if (thread->is_Java_thread()) {
 932     JavaThread* jt = JavaThread::cast(thread);
 933     ResourceMark rm;
 934     jt->oops_do(&_cl, nullptr);
 935   }
 936 }
 937 
 938 void ShenandoahConcurrentGC::op_update_thread_roots() {
 939   ShenandoahUpdateThreadClosure cl;
 940   Handshake::execute(&cl);
 941 }
 942 
 943 void ShenandoahConcurrentGC::op_final_updaterefs() {
 944   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 945   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
 946   assert(!heap->_update_refs_iterator.has_next(), "Should have finished update references");
 947 
 948   heap->finish_concurrent_roots();
 949 
 950   // Clear cancelled GC, if set. On cancellation path, the block before would handle
 951   // everything.
 952   if (heap->cancelled_gc()) {
 953     heap->clear_cancelled_gc();
 954   }
 955 
 956   // Has to be done before cset is clear
 957   if (ShenandoahVerify) {
 958     heap->verifier()->verify_roots_in_to_space();
 959   }
 960 


 961   heap->update_heap_region_states(true /*concurrent*/);
 962 
 963   heap->set_update_refs_in_progress(false);
 964   heap->set_has_forwarded_objects(false);
 965 






















 966   if (ShenandoahVerify) {
 967     heap->verifier()->verify_after_updaterefs();

 968   }
 969 
 970   if (VerifyAfterGC) {
 971     Universe::verify();
 972   }
 973 
 974   heap->rebuild_free_set(true /*concurrent*/);





 975 }
 976 
 977 void ShenandoahConcurrentGC::op_final_roots() {
 978   ShenandoahHeap::heap()->set_concurrent_weak_root_in_progress(false);
























 979 }
 980 
 981 void ShenandoahConcurrentGC::op_cleanup_complete() {
 982   ShenandoahHeap::heap()->free_set()->recycle_trash();





















 983 }
 984 
 985 bool ShenandoahConcurrentGC::check_cancellation_and_abort(ShenandoahDegenPoint point) {
 986   if (ShenandoahHeap::heap()->cancelled_gc()) {
 987     _degen_point = point;
 988     return true;
 989   }
 990   return false;
 991 }
 992 
 993 const char* ShenandoahConcurrentGC::init_mark_event_message() const {
 994   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 995   assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here");
 996   if (heap->unload_classes()) {
 997     return "Pause Init Mark (unload classes)";
 998   } else {
 999     return "Pause Init Mark";
1000   }
1001 }
1002 
1003 const char* ShenandoahConcurrentGC::final_mark_event_message() const {
1004   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1005   assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here");


1006   if (heap->unload_classes()) {
1007     return "Pause Final Mark (unload classes)";
1008   } else {
1009     return "Pause Final Mark";
1010   }
1011 }
1012 
1013 const char* ShenandoahConcurrentGC::conc_mark_event_message() const {
1014   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1015   assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here");

1016   if (heap->unload_classes()) {
1017     return "Concurrent marking (unload classes)";
































































1018   } else {
1019     return "Concurrent marking";
1020   }
1021 }

   1 /*
   2  * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2021, 2022, Red Hat, Inc. All rights reserved.
   4  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
   5  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   6  *
   7  * This code is free software; you can redistribute it and/or modify it
   8  * under the terms of the GNU General Public License version 2 only, as
   9  * published by the Free Software Foundation.
  10  *
  11  * This code is distributed in the hope that it will be useful, but WITHOUT
  12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  14  * version 2 for more details (a copy is included in the LICENSE file that
  15  * accompanied this code).
  16  *
  17  * You should have received a copy of the GNU General Public License version
  18  * 2 along with this work; if not, write to the Free Software Foundation,
  19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20  *
  21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22  * or visit www.oracle.com if you need additional information or have any
  23  * questions.
  24  *
  25  */
  26 
  27 #include "precompiled.hpp"
  28 
  29 #include "gc/shared/barrierSetNMethod.hpp"
  30 #include "gc/shared/collectorCounters.hpp"
  31 #include "gc/shared/continuationGCSupport.inline.hpp"
  32 #include "gc/shenandoah/shenandoahBreakpoint.hpp"
  33 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
  34 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  35 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
  36 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  37 #include "gc/shenandoah/shenandoahGeneration.hpp"
  38 #include "gc/shenandoah/shenandoahGenerationalHeap.hpp"
  39 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
  40 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
  41 #include "gc/shenandoah/shenandoahLock.hpp"
  42 #include "gc/shenandoah/shenandoahMark.inline.hpp"
  43 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"

  44 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  45 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
  46 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
  47 #include "gc/shenandoah/shenandoahStackWatermark.hpp"
  48 #include "gc/shenandoah/shenandoahUtils.hpp"
  49 #include "gc/shenandoah/shenandoahVerifier.hpp"
  50 #include "gc/shenandoah/shenandoahVMOperations.hpp"
  51 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
  52 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  53 #include "memory/allocation.hpp"
  54 #include "prims/jvmtiTagMap.hpp"
  55 #include "runtime/vmThread.hpp"
  56 #include "utilities/events.hpp"
  57 
  58 // Breakpoint support
  59 class ShenandoahBreakpointGCScope : public StackObj {
  60 private:
  61   const GCCause::Cause _cause;
  62 public:
  63   ShenandoahBreakpointGCScope(GCCause::Cause cause) : _cause(cause) {

  74   }
  75 };
  76 
  77 class ShenandoahBreakpointMarkScope : public StackObj {
  78 private:
  79   const GCCause::Cause _cause;
  80 public:
  81   ShenandoahBreakpointMarkScope(GCCause::Cause cause) : _cause(cause) {
  82     if (_cause == GCCause::_wb_breakpoint) {
  83       ShenandoahBreakpoint::at_after_marking_started();
  84     }
  85   }
  86 
  87   ~ShenandoahBreakpointMarkScope() {
  88     if (_cause == GCCause::_wb_breakpoint) {
  89       ShenandoahBreakpoint::at_before_marking_completed();
  90     }
  91   }
  92 };
  93 
  94 ShenandoahConcurrentGC::ShenandoahConcurrentGC(ShenandoahGeneration* generation, bool do_old_gc_bootstrap) :
  95   _mark(generation),
  96   _generation(generation),
  97   _degen_point(ShenandoahDegenPoint::_degenerated_unset),
  98   _abbreviated(false),
  99   _do_old_gc_bootstrap(do_old_gc_bootstrap) {
 100 }
 101 
 102 ShenandoahGC::ShenandoahDegenPoint ShenandoahConcurrentGC::degen_point() const {
 103   return _degen_point;
 104 }
 105 
 106 void ShenandoahConcurrentGC::entry_concurrent_update_refs_prepare(ShenandoahHeap* const heap) {
 107   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 108   const char* msg = conc_init_update_refs_event_message();
 109   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_update_refs_prepare);
 110   EventMark em("%s", msg);
 111 
 112   // Evacuation is complete, retire gc labs and change gc state
 113   heap->concurrent_prepare_for_update_refs();
 114 }
 115 
 116 bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) {
 117   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 118 
 119   ShenandoahBreakpointGCScope breakpoint_gc_scope(cause);
 120 
 121   // Reset for upcoming marking
 122   entry_reset();
 123 
 124   // Start initial mark under STW
 125   vmop_entry_init_mark();
 126 
 127   {
 128     ShenandoahBreakpointMarkScope breakpoint_mark_scope(cause);
 129 
 130     // Reset task queue stats here, rather than in mark_concurrent_roots,
 131     // because remembered set scan will `push` oops into the queues and
 132     // resetting after this happens will lose those counts.
 133     TASKQUEUE_STATS_ONLY(_mark.task_queues()->reset_taskqueue_stats());
 134 
 135     // Concurrent remembered set scanning
 136     entry_scan_remembered_set();
 137 
 138     // Concurrent mark roots
 139     entry_mark_roots();
 140     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_roots)) {
 141       return false;
 142     }
 143 
 144     // Continue concurrent mark
 145     entry_mark();
 146     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_mark)) {
 147       return false;
 148     }
 149   }
 150 
 151   // Complete marking under STW, and start evacuation
 152   vmop_entry_final_mark();
 153 
 154   // If the GC was cancelled before final mark, nothing happens on the safepoint. We are still
 155   // in the marking phase and must resume the degenerated cycle from there. If the GC was cancelled
 156   // after final mark, then we've entered the evacuation phase and must resume the degenerated cycle
 157   // from that phase.
 158   if (_generation->is_concurrent_mark_in_progress()) {
 159     bool cancelled = check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_mark);
 160     assert(cancelled, "GC must have been cancelled between concurrent and final mark");
 161     return false;
 162   }
 163 
 164   assert(heap->is_concurrent_weak_root_in_progress(), "Must be doing weak roots now");
 165 
 166   // Concurrent stack processing
 167   if (heap->is_evacuation_in_progress()) {
 168     entry_thread_roots();
 169   }
 170 
 171   // Process weak roots that might still point to regions that would be broken by cleanup.
 172   // We cannot recycle regions because weak roots need to know what is marked in trashed regions.
 173   entry_weak_refs();
 174   entry_weak_roots();
 175 
 176   // Perform concurrent class unloading before any regions get recycled. Class unloading may
 177   // need to inspect unmarked objects in trashed regions.
 178   if (heap->unload_classes()) {
 179     entry_class_unloading();
 180   }
 181 
 182   // Final mark might have reclaimed some immediate garbage, kick cleanup to reclaim
 183   // the space. This would be the last action if there is nothing to evacuate.  Note that
 184   // we will not age young-gen objects in the case that we skip evacuation.
 185   entry_cleanup_early();
 186 
 187   heap->free_set()->log_status_under_lock();









 188 
 189   // Processing strong roots
 190   // This may be skipped if there is nothing to update/evacuate.
 191   // If so, strong_root_in_progress would be unset.
 192   if (heap->is_concurrent_strong_root_in_progress()) {
 193     entry_strong_roots();
 194   }
 195 
 196   // Continue the cycle with evacuation and optional update-refs.
 197   // This may be skipped if there is nothing to evacuate.
 198   // If so, evac_in_progress would be unset by collection set preparation code.
 199   if (heap->is_evacuation_in_progress()) {
 200     // Concurrently evacuate
 201     entry_evacuate();
 202     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_evac)) {
 203       return false;
 204     }
 205 
 206     entry_concurrent_update_refs_prepare(heap);
 207 
 208     // Perform update-refs phase.
 209     if (ShenandoahVerify || ShenandoahPacing) {
 210       vmop_entry_init_update_refs();
 211     }
 212 
 213     entry_update_refs();
 214     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_update_refs)) {
 215       return false;
 216     }
 217 
 218     // Concurrent update thread roots
 219     entry_update_thread_roots();
 220     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_update_refs)) {
 221       return false;
 222     }
 223 
 224     vmop_entry_final_update_refs();
 225 
 226     // Update references freed up collection set, kick the cleanup to reclaim the space.
 227     entry_cleanup_complete();
 228   } else {
 229     if (!entry_final_roots()) {
 230       assert(_degen_point != _degenerated_unset, "Need to know where to start degenerated cycle");
 231       return false;
 232     }
 233 
 234     if (VerifyAfterGC) {
 235       vmop_entry_verify_final_roots();
 236     }
 237     _abbreviated = true;
 238   }
 239 
 240   // We defer generation resizing actions until after cset regions have been recycled.  We do this even following an
 241   // abbreviated cycle.
 242   if (heap->mode()->is_generational()) {
 243     ShenandoahGenerationalHeap::heap()->complete_concurrent_cycle();
 244   }
 245 
 246   // Instead of always resetting immediately before the start of a new GC, we can often reset at the end of the
 247   // previous GC. This allows us to start the next GC cycle more quickly after a trigger condition is detected,
 248   // reducing the likelihood that GC will degenerate.
 249   entry_reset_after_collect();
 250 
 251   return true;
 252 }
 253 
 254 bool ShenandoahConcurrentGC::complete_abbreviated_cycle() {
 255   shenandoah_assert_generational();
 256 
 257   ShenandoahGenerationalHeap* const heap = ShenandoahGenerationalHeap::heap();
 258 
 259   // We chose not to evacuate because we found sufficient immediate garbage.
 260   // However, there may still be regions to promote in place, so do that now.
 261   if (heap->old_generation()->has_in_place_promotions()) {
 262     entry_promote_in_place();
 263 
 264     // If the promote-in-place operation was cancelled, we can have the degenerated
 265     // cycle complete the operation. It will see that no evacuations are in progress,
 266     // and that there are regions wanting promotion. The risk with not handling the
 267     // cancellation would be failing to restore top for these regions and leaving
 268     // them unable to serve allocations for the old generation.This will leave the weak
 269     // roots flag set (the degenerated cycle will unset it).
 270     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_evac)) {
 271       return false;
 272     }
 273   }
 274 
 275   // At this point, the cycle is effectively complete. If the cycle has been cancelled here,
 276   // the control thread will detect it on its next iteration and run a degenerated young cycle.
 277   if (!_generation->is_old()) {
 278     heap->update_region_ages(_generation->complete_marking_context());
 279   }
 280 
 281   if (!heap->is_concurrent_old_mark_in_progress()) {
 282     heap->concurrent_final_roots();
 283   } else {
 284     // Since the cycle was shortened for having enough immediate garbage, this will be
 285     // the last phase before concurrent marking of old resumes. We must be sure
 286     // that old mark threads don't see any pointers to garbage in the SATB queues. Even
 287     // though nothing was evacuated, overwriting unreachable weak roots with null may still
 288     // put pointers to regions that become trash in the SATB queues. The following will
 289     // piggyback flushing the thread local SATB queues on the same handshake that propagates
 290     // the gc state change.
 291     ShenandoahSATBMarkQueueSet& satb_queues = ShenandoahBarrierSet::satb_mark_queue_set();
 292     ShenandoahFlushSATBHandshakeClosure complete_thread_local_satb_buffers(satb_queues);
 293     heap->concurrent_final_roots(&complete_thread_local_satb_buffers);
 294     heap->old_generation()->concurrent_transfer_pointers_from_satb();
 295   }
 296   return true;
 297 }
 298 
 299 
 300 void ShenandoahConcurrentGC::vmop_entry_init_mark() {
 301   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 302   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 303   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_mark_gross);
 304 
 305   heap->try_inject_alloc_failure();
 306   VM_ShenandoahInitMark op(this);
 307   VMThread::execute(&op); // jump to entry_init_mark() under safepoint
 308 }
 309 
 310 void ShenandoahConcurrentGC::vmop_entry_final_mark() {
 311   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 312   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 313   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_mark_gross);
 314 
 315   heap->try_inject_alloc_failure();
 316   VM_ShenandoahFinalMarkStartEvac op(this);
 317   VMThread::execute(&op); // jump to entry_final_mark under safepoint
 318 }
 319 
 320 void ShenandoahConcurrentGC::vmop_entry_init_update_refs() {
 321   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 322   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 323   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_update_refs_gross);
 324 
 325   heap->try_inject_alloc_failure();
 326   VM_ShenandoahInitUpdateRefs op(this);
 327   VMThread::execute(&op);
 328 }
 329 
 330 void ShenandoahConcurrentGC::vmop_entry_final_update_refs() {
 331   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 332   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 333   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_update_refs_gross);
 334 
 335   heap->try_inject_alloc_failure();
 336   VM_ShenandoahFinalUpdateRefs op(this);
 337   VMThread::execute(&op);
 338 }
 339 
 340 void ShenandoahConcurrentGC::vmop_entry_verify_final_roots() {
 341   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 342   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 343   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_roots_gross);
 344 
 345   // This phase does not use workers, no need for setup
 346   heap->try_inject_alloc_failure();
 347   VM_ShenandoahFinalRoots op(this);
 348   VMThread::execute(&op);
 349 }
 350 
 351 void ShenandoahConcurrentGC::entry_init_mark() {
 352   const char* msg = init_mark_event_message();
 353   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::init_mark);
 354   EventMark em("%s", msg);
 355 
 356   ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
 357                               ShenandoahWorkerPolicy::calc_workers_for_init_marking(),
 358                               "init marking");
 359 
 360   op_init_mark();
 361 }
 362 
 363 void ShenandoahConcurrentGC::entry_final_mark() {
 364   const char* msg = final_mark_event_message();
 365   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_mark);
 366   EventMark em("%s", msg);
 367 
 368   ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
 369                               ShenandoahWorkerPolicy::calc_workers_for_final_marking(),
 370                               "final marking");
 371 
 372   op_final_mark();
 373 }
 374 
 375 void ShenandoahConcurrentGC::entry_init_update_refs() {
 376   static const char* msg = "Pause Init Update Refs";
 377   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::init_update_refs);
 378   EventMark em("%s", msg);
 379 
 380   // No workers used in this phase, no setup required
 381   op_init_update_refs();
 382 }
 383 
 384 void ShenandoahConcurrentGC::entry_final_update_refs() {
 385   static const char* msg = "Pause Final Update Refs";
 386   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_update_refs);
 387   EventMark em("%s", msg);
 388 
 389   ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
 390                               ShenandoahWorkerPolicy::calc_workers_for_final_update_ref(),
 391                               "final reference update");
 392 
 393   op_final_update_refs();
 394 }
 395 
 396 void ShenandoahConcurrentGC::entry_verify_final_roots() {
 397   const char* msg = verify_final_roots_event_message();
 398   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_roots);
 399   EventMark em("%s", msg);
 400 
 401   op_verify_final_roots();
 402 }
 403 
 404 void ShenandoahConcurrentGC::entry_reset() {
 405   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 406   heap->try_inject_alloc_failure();
 407 
 408   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 409   {
 410     const char* msg = conc_reset_event_message();
 411     ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_reset);
 412     EventMark em("%s", msg);
 413 
 414     ShenandoahWorkerScope scope(heap->workers(),
 415                                 ShenandoahWorkerPolicy::calc_workers_for_conc_reset(),
 416                                 msg);
 417     op_reset();
 418   }
 419 }
 420 
 421 void ShenandoahConcurrentGC::entry_scan_remembered_set() {
 422   if (_generation->is_young()) {
 423     ShenandoahHeap* const heap = ShenandoahHeap::heap();
 424     TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 425     const char* msg = "Concurrent remembered set scanning";
 426     ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::init_scan_rset);
 427     EventMark em("%s", msg);
 428 
 429     ShenandoahWorkerScope scope(heap->workers(),
 430                                 ShenandoahWorkerPolicy::calc_workers_for_rs_scanning(),
 431                                 msg);
 432 
 433     heap->try_inject_alloc_failure();
 434     _generation->scan_remembered_set(true /* is_concurrent */);
 435   }
 436 }
 437 
 438 void ShenandoahConcurrentGC::entry_mark_roots() {
 439   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 440   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 441   const char* msg = "Concurrent marking roots";
 442   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_mark_roots);
 443   EventMark em("%s", msg);
 444 
 445   ShenandoahWorkerScope scope(heap->workers(),
 446                               ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
 447                               "concurrent marking roots");
 448 
 449   heap->try_inject_alloc_failure();
 450   op_mark_roots();
 451 }
 452 
 453 void ShenandoahConcurrentGC::entry_mark() {
 454   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 455   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());

 464   heap->try_inject_alloc_failure();
 465   op_mark();
 466 }
 467 
 468 void ShenandoahConcurrentGC::entry_thread_roots() {
 469   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 470   static const char* msg = "Concurrent thread roots";
 471   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_thread_roots);
 472   EventMark em("%s", msg);
 473 
 474   ShenandoahWorkerScope scope(heap->workers(),
 475                               ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
 476                               msg);
 477 
 478   heap->try_inject_alloc_failure();
 479   op_thread_roots();
 480 }
 481 
 482 void ShenandoahConcurrentGC::entry_weak_refs() {
 483   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 484   const char* msg = conc_weak_refs_event_message();
 485   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_weak_refs);
 486   EventMark em("%s", msg);
 487 
 488   ShenandoahWorkerScope scope(heap->workers(),
 489                               ShenandoahWorkerPolicy::calc_workers_for_conc_refs_processing(),
 490                               "concurrent weak references");
 491 
 492   heap->try_inject_alloc_failure();
 493   op_weak_refs();
 494 }
 495 
 496 void ShenandoahConcurrentGC::entry_weak_roots() {
 497   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 498   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 499   const char* msg = conc_weak_roots_event_message();
 500   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_weak_roots);
 501   EventMark em("%s", msg);
 502 
 503   ShenandoahWorkerScope scope(heap->workers(),
 504                               ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
 505                               "concurrent weak root");
 506 
 507   heap->try_inject_alloc_failure();
 508   op_weak_roots();
 509 }
 510 
 511 void ShenandoahConcurrentGC::entry_class_unloading() {
 512   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 513   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 514   static const char* msg = "Concurrent class unloading";
 515   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_class_unload);
 516   EventMark em("%s", msg);
 517 
 518   ShenandoahWorkerScope scope(heap->workers(),
 519                               ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),

 526 void ShenandoahConcurrentGC::entry_strong_roots() {
 527   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 528   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 529   static const char* msg = "Concurrent strong roots";
 530   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_strong_roots);
 531   EventMark em("%s", msg);
 532 
 533   ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_strong_roots);
 534 
 535   ShenandoahWorkerScope scope(heap->workers(),
 536                               ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
 537                               "concurrent strong root");
 538 
 539   heap->try_inject_alloc_failure();
 540   op_strong_roots();
 541 }
 542 
 543 void ShenandoahConcurrentGC::entry_cleanup_early() {
 544   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 545   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 546   const char* msg = conc_cleanup_event_message();
 547   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_cleanup_early, true /* log_heap_usage */);
 548   EventMark em("%s", msg);
 549 
 550   // This phase does not use workers, no need for setup
 551   heap->try_inject_alloc_failure();
 552   op_cleanup_early();
 553 }
 554 
 555 void ShenandoahConcurrentGC::entry_evacuate() {
 556   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 557   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 558 
 559   static const char* msg = "Concurrent evacuation";
 560   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_evac);
 561   EventMark em("%s", msg);
 562 
 563   ShenandoahWorkerScope scope(heap->workers(),
 564                               ShenandoahWorkerPolicy::calc_workers_for_conc_evac(),
 565                               "concurrent evacuation");
 566 
 567   heap->try_inject_alloc_failure();
 568   op_evacuate();
 569 }
 570 
 571 void ShenandoahConcurrentGC::entry_promote_in_place() const {
 572   shenandoah_assert_generational();
 573 
 574   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::promote_in_place);
 575   ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::promote_in_place);
 576   EventMark em("%s", "Promote in place");
 577 
 578   ShenandoahGenerationalHeap::heap()->promote_regions_in_place(true);
 579 }
 580 
 581 void ShenandoahConcurrentGC::entry_update_thread_roots() {
 582   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 583   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 584 
 585   static const char* msg = "Concurrent update thread roots";
 586   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_update_thread_roots);
 587   EventMark em("%s", msg);
 588 
 589   // No workers used in this phase, no setup required
 590   heap->try_inject_alloc_failure();
 591   op_update_thread_roots();
 592 }
 593 
 594 void ShenandoahConcurrentGC::entry_update_refs() {
 595   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 596   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 597   static const char* msg = "Concurrent update references";
 598   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_update_refs);
 599   EventMark em("%s", msg);
 600 
 601   ShenandoahWorkerScope scope(heap->workers(),
 602                               ShenandoahWorkerPolicy::calc_workers_for_conc_update_ref(),
 603                               "concurrent reference update");
 604 
 605   heap->try_inject_alloc_failure();
 606   op_update_refs();
 607 }
 608 
 609 void ShenandoahConcurrentGC::entry_cleanup_complete() {
 610   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 611   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 612   const char* msg = conc_cleanup_event_message();
 613   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_cleanup_complete, true /* log_heap_usage */);
 614   EventMark em("%s", msg);
 615 
 616   // This phase does not use workers, no need for setup
 617   heap->try_inject_alloc_failure();
 618   op_cleanup_complete();
 619 }
 620 
 621 void ShenandoahConcurrentGC::entry_reset_after_collect() {
 622   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 623   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 624   const char* msg = conc_reset_after_collect_event_message();
 625   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_reset_after_collect);
 626   EventMark em("%s", msg);
 627 
 628   op_reset_after_collect();
 629 }
 630 
 631 void ShenandoahConcurrentGC::op_reset() {
 632   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 633   if (ShenandoahPacing) {
 634     heap->pacer()->setup_for_reset();
 635   }
 636   // If it is old GC bootstrap cycle, always clear bitmap for global gen
 637   // to ensure bitmap for old gen is clear for old GC cycle after this.
 638   if (_do_old_gc_bootstrap) {
 639     assert(!heap->is_prepare_for_old_mark_in_progress(), "Cannot reset old without making it parsable");
 640     heap->global_generation()->prepare_gc();
 641   } else {
 642     _generation->prepare_gc();
 643   }
 644 }
 645 
 646 class ShenandoahInitMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
 647 private:
 648   ShenandoahMarkingContext* const _ctx;
 649 public:
 650   ShenandoahInitMarkUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
 651 
 652   void heap_region_do(ShenandoahHeapRegion* r) {
 653     assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index());
 654     if (r->is_active()) {
 655       // Check if region needs updating its TAMS. We have updated it already during concurrent
 656       // reset, so it is very likely we don't need to do another write here.  Since most regions
 657       // are not "active", this path is relatively rare.
 658       if (_ctx->top_at_mark_start(r) != r->top()) {
 659         _ctx->capture_top_at_mark_start(r);
 660       }
 661     } else {
 662       assert(_ctx->top_at_mark_start(r) == r->top(),
 663              "Region " SIZE_FORMAT " should already have correct TAMS", r->index());
 664     }
 665   }
 666 
 667   bool is_thread_safe() { return true; }
 668 };
 669 
 670 void ShenandoahConcurrentGC::start_mark() {
 671   _mark.start_mark();
 672 }
 673 
 674 void ShenandoahConcurrentGC::op_init_mark() {
 675   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 676   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
 677   assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
 678 
 679   assert(_generation->is_bitmap_clear(), "need clear marking bitmap");
 680   assert(!_generation->is_mark_complete(), "should not be complete");
 681   assert(!heap->has_forwarded_objects(), "No forwarded objects on this path");
 682 
 683 
 684   if (heap->mode()->is_generational()) {
 685     if (_generation->is_young()) {
 686       // The current implementation of swap_remembered_set() copies the write-card-table to the read-card-table.
 687       ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_swap_rset);
 688       _generation->swap_remembered_set();
 689     }
 690 
 691     if (_generation->is_global()) {
 692       heap->old_generation()->cancel_gc();
 693     } else if (heap->is_concurrent_old_mark_in_progress()) {
 694       // Purge the SATB buffers, transferring any valid, old pointers to the
 695       // old generation mark queue. Any pointers in a young region will be
 696       // abandoned.
 697       ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_transfer_satb);
 698       heap->old_generation()->transfer_pointers_from_satb();
 699     }
 700   }
 701 
 702   if (ShenandoahVerify) {
 703     ShenandoahTimingsTracker v(ShenandoahPhaseTimings::init_mark_verify);
 704     heap->verifier()->verify_before_concmark();
 705   }
 706 
 707   if (VerifyBeforeGC) {
 708     Universe::verify();
 709   }
 710 
 711   _generation->set_concurrent_mark_in_progress(true);
 712 
 713   start_mark();
 714 
 715   if (_do_old_gc_bootstrap) {
 716     shenandoah_assert_generational();
 717     // Update region state for both young and old regions
 718     ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states);
 719     ShenandoahInitMarkUpdateRegionStateClosure cl;
 720     heap->parallel_heap_region_iterate(&cl);
 721     heap->old_generation()->ref_processor()->reset_thread_locals();
 722   } else {
 723     // Update region state for only young regions
 724     ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states);
 725     ShenandoahInitMarkUpdateRegionStateClosure cl;
 726     _generation->parallel_heap_region_iterate(&cl);
 727   }
 728 
 729   // Weak reference processing
 730   ShenandoahReferenceProcessor* rp = _generation->ref_processor();
 731   rp->reset_thread_locals();
 732   rp->set_soft_reference_policy(heap->soft_ref_policy()->should_clear_all_soft_refs());
 733 
 734   // Make above changes visible to worker threads
 735   OrderAccess::fence();
 736 
 737   // Arm nmethods for concurrent mark
 738   ShenandoahCodeRoots::arm_nmethods_for_mark();
 739 
 740   ShenandoahStackWatermark::change_epoch_id();
 741   if (ShenandoahPacing) {
 742     heap->pacer()->setup_for_mark();
 743   }
 744 
 745   {
 746     ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_propagate_gc_state);
 747     heap->propagate_gc_state_to_all_threads();
 748   }
 749 }
 750 
 751 void ShenandoahConcurrentGC::op_mark_roots() {
 752   _mark.mark_concurrent_roots();
 753 }
 754 
 755 void ShenandoahConcurrentGC::op_mark() {
 756   _mark.concurrent_mark();
 757 }
 758 
 759 void ShenandoahConcurrentGC::op_final_mark() {
 760   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 761   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
 762   assert(!heap->has_forwarded_objects(), "No forwarded objects on this path");
 763 
 764   if (ShenandoahVerify) {
 765     heap->verifier()->verify_roots_no_forwarded();
 766   }
 767 
 768   if (!heap->cancelled_gc()) {
 769     _mark.finish_mark();
 770     assert(!heap->cancelled_gc(), "STW mark cannot OOM");
 771 
 772     // Notify JVMTI that the tagmap table will need cleaning.
 773     JvmtiTagMap::set_needs_cleaning();
 774 
 775     // The collection set is chosen by prepare_regions_and_collection_set(). Additionally, certain parameters have been
 776     // established to govern the evacuation efforts that are about to begin.  Refer to comments on reserve members in
 777     // ShenandoahGeneration and ShenandoahOldGeneration for more detail.
 778     _generation->prepare_regions_and_collection_set(true /*concurrent*/);
 779 
 780     // Has to be done after cset selection
 781     heap->prepare_concurrent_roots();
 782 
 783     if (!heap->collection_set()->is_empty()) {
 784       LogTarget(Debug, gc, cset) lt;
 785       if (lt.is_enabled()) {
 786         ResourceMark rm;
 787         LogStream ls(lt);
 788         heap->collection_set()->print_on(&ls);
 789       }
 790 
 791       if (ShenandoahVerify) {
 792         ShenandoahTimingsTracker v(ShenandoahPhaseTimings::final_mark_verify);
 793         heap->verifier()->verify_before_evacuation();
 794       }
 795 
 796       heap->set_evacuation_in_progress(true);
 797       // From here on, we need to update references.
 798       heap->set_has_forwarded_objects(true);
 799 






 800       // Arm nmethods/stack for concurrent processing
 801       ShenandoahCodeRoots::arm_nmethods_for_evac();
 802       ShenandoahStackWatermark::change_epoch_id();
 803 
 804       if (ShenandoahPacing) {
 805         heap->pacer()->setup_for_evac();
 806       }
 807     } else {
 808       if (ShenandoahVerify) {
 809         ShenandoahTimingsTracker v(ShenandoahPhaseTimings::final_mark_verify);
 810         if (has_in_place_promotions(heap)) {
 811           heap->verifier()->verify_after_concmark_with_promotions();
 812         } else {
 813           heap->verifier()->verify_after_concmark();
 814         }
 815       }
 816     }
 817   }
 818 
 819   {
 820     ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_mark_propagate_gc_state);
 821     heap->propagate_gc_state_to_all_threads();
 822   }
 823 }
 824 
 825 bool ShenandoahConcurrentGC::has_in_place_promotions(ShenandoahHeap* heap) {
 826   return heap->mode()->is_generational() && heap->old_generation()->has_in_place_promotions();
 827 }
 828 
 829 template<bool GENERATIONAL>
 830 class ShenandoahConcurrentEvacThreadClosure : public ThreadClosure {
 831 private:
 832   OopClosure* const _oops;

 833 public:
 834   explicit ShenandoahConcurrentEvacThreadClosure(OopClosure* oops) : _oops(oops) {}






 835 
 836   void do_thread(Thread* thread) override {
 837     JavaThread* const jt = JavaThread::cast(thread);
 838     StackWatermarkSet::finish_processing(jt, _oops, StackWatermarkKind::gc);
 839     if (GENERATIONAL) {
 840       ShenandoahThreadLocalData::enable_plab_promotions(thread);
 841     }
 842   }
 843 };
 844 
 845 template<bool GENERATIONAL>
 846 class ShenandoahConcurrentEvacUpdateThreadTask : public WorkerTask {
 847 private:
 848   ShenandoahJavaThreadsIterator _java_threads;
 849 
 850 public:
 851   explicit ShenandoahConcurrentEvacUpdateThreadTask(uint n_workers) :
 852     WorkerTask("Shenandoah Evacuate/Update Concurrent Thread Roots"),
 853     _java_threads(ShenandoahPhaseTimings::conc_thread_roots, n_workers) {
 854   }
 855 
 856   void work(uint worker_id) override {
 857     if (GENERATIONAL) {
 858       Thread* worker_thread = Thread::current();
 859       ShenandoahThreadLocalData::enable_plab_promotions(worker_thread);
 860     }
 861 
 862     // ShenandoahEvacOOMScope has to be setup by ShenandoahContextEvacuateUpdateRootsClosure.
 863     // Otherwise, may deadlock with watermark lock
 864     ShenandoahContextEvacuateUpdateRootsClosure oops_cl;
 865     ShenandoahConcurrentEvacThreadClosure<GENERATIONAL> thr_cl(&oops_cl);
 866     _java_threads.threads_do(&thr_cl, worker_id);
 867   }
 868 };
 869 
 870 void ShenandoahConcurrentGC::op_thread_roots() {
 871   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 872   assert(heap->is_evacuation_in_progress(), "Checked by caller");
 873   ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_thread_roots);
 874   if (heap->mode()->is_generational()) {
 875     ShenandoahConcurrentEvacUpdateThreadTask<true> task(heap->workers()->active_workers());
 876     heap->workers()->run_task(&task);
 877   } else {
 878     ShenandoahConcurrentEvacUpdateThreadTask<false> task(heap->workers()->active_workers());
 879     heap->workers()->run_task(&task);
 880   }
 881 }
 882 
 883 void ShenandoahConcurrentGC::op_weak_refs() {
 884   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 885   assert(heap->is_concurrent_weak_root_in_progress(), "Only during this phase");
 886   // Concurrent weak refs processing
 887   ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_refs);
 888   if (heap->gc_cause() == GCCause::_wb_breakpoint) {
 889     ShenandoahBreakpoint::at_after_reference_processing_started();
 890   }
 891   _generation->ref_processor()->process_references(ShenandoahPhaseTimings::conc_weak_refs, heap->workers(), true /* concurrent */);
 892 }
 893 
 894 class ShenandoahEvacUpdateCleanupOopStorageRootsClosure : public BasicOopIterateClosure {
 895 private:
 896   ShenandoahHeap* const _heap;
 897   ShenandoahMarkingContext* const _mark_context;
 898   bool  _evac_in_progress;
 899   Thread* const _thread;
 900 
 901 public:
 902   ShenandoahEvacUpdateCleanupOopStorageRootsClosure();
 903   void do_oop(oop* p);
 904   void do_oop(narrowOop* p);
 905 };
 906 
 907 ShenandoahEvacUpdateCleanupOopStorageRootsClosure::ShenandoahEvacUpdateCleanupOopStorageRootsClosure() :
 908   _heap(ShenandoahHeap::heap()),
 909   _mark_context(ShenandoahHeap::heap()->marking_context()),
 910   _evac_in_progress(ShenandoahHeap::heap()->is_evacuation_in_progress()),
 911   _thread(Thread::current()) {
 912 }
 913 
 914 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(oop* p) {
 915   const oop obj = RawAccess<>::oop_load(p);
 916   if (!CompressedOops::is_null(obj)) {
 917     if (!_mark_context->is_marked(obj)) {
 918       shenandoah_assert_generations_reconciled();
 919       if (_heap->is_in_active_generation(obj)) {
 920         // Note: The obj is dead here. Do not touch it, just clear.
 921         ShenandoahHeap::atomic_clear_oop(p, obj);
 922       }
 923     } else if (_evac_in_progress && _heap->in_collection_set(obj)) {
 924       oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
 925       if (resolved == obj) {
 926         resolved = _heap->evacuate_object(obj, _thread);
 927       }
 928       shenandoah_assert_not_in_cset_except(p, resolved, _heap->cancelled_gc());
 929       ShenandoahHeap::atomic_update_oop(resolved, p, obj);



 930     }
 931   }
 932 }
 933 
 934 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(narrowOop* p) {
 935   ShouldNotReachHere();
 936 }
 937 
 938 class ShenandoahIsCLDAliveClosure : public CLDClosure {
 939 public:
 940   void do_cld(ClassLoaderData* cld) {
 941     cld->is_alive();
 942   }
 943 };
 944 
 945 class ShenandoahIsNMethodAliveClosure: public NMethodClosure {
 946 public:
 947   void do_nmethod(nmethod* n) {
 948     n->is_unloading();
 949   }

 969     _nmethod_itr(ShenandoahCodeRoots::table()),
 970     _phase(phase) {}
 971 
 972   ~ShenandoahConcurrentWeakRootsEvacUpdateTask() {
 973     // Notify runtime data structures of potentially dead oops
 974     _vm_roots.report_num_dead();
 975   }
 976 
 977   void work(uint worker_id) {
 978     ShenandoahConcurrentWorkerSession worker_session(worker_id);
 979     ShenandoahSuspendibleThreadSetJoiner sts_join;
 980     {
 981       ShenandoahEvacOOMScope oom;
 982       // jni_roots and weak_roots are OopStorage backed roots, concurrent iteration
 983       // may race against OopStorage::release() calls.
 984       ShenandoahEvacUpdateCleanupOopStorageRootsClosure cl;
 985       _vm_roots.oops_do(&cl, worker_id);
 986     }
 987 
 988     // If we are going to perform concurrent class unloading later on, we need to
 989     // clean up the weak oops in CLD and determine nmethod's unloading state, so that we
 990     // can clean up immediate garbage sooner.
 991     if (ShenandoahHeap::heap()->unload_classes()) {
 992       // Applies ShenandoahIsCLDAlive closure to CLDs, native barrier will either null the
 993       // CLD's holder or evacuate it.
 994       {
 995         ShenandoahIsCLDAliveClosure is_cld_alive;
 996         _cld_roots.cld_do(&is_cld_alive, worker_id);
 997       }
 998 
 999       // Applies ShenandoahIsNMethodAliveClosure to registered nmethods.
1000       // The closure calls nmethod->is_unloading(). The is_unloading
1001       // state is cached, therefore, during concurrent class unloading phase,
1002       // we will not touch the metadata of unloading nmethods
1003       {
1004         ShenandoahWorkerTimingsTracker timer(_phase, ShenandoahPhaseTimings::CodeCacheRoots, worker_id);
1005         ShenandoahIsNMethodAliveClosure is_nmethod_alive;
1006         _nmethod_itr.nmethods_do(&is_nmethod_alive);
1007       }
1008     }
1009   }
1010 };
1011 
1012 void ShenandoahConcurrentGC::op_weak_roots() {
1013   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1014   assert(heap->is_concurrent_weak_root_in_progress(), "Only during this phase");

1015   {
1016     // Concurrent weak root processing
1017     ShenandoahTimingsTracker t(ShenandoahPhaseTimings::conc_weak_roots_work);
1018     ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_roots_work);
1019     ShenandoahConcurrentWeakRootsEvacUpdateTask task(ShenandoahPhaseTimings::conc_weak_roots_work);
1020     heap->workers()->run_task(&task);
1021   }
1022 

1023   {
1024     // It is possible for mutators executing the load reference barrier to have
1025     // loaded an oop through a weak handle that has since been nulled out by
1026     // weak root processing. Handshaking here forces them to complete the
1027     // barrier before the GC cycle continues and does something that would
1028     // change the evaluation of the barrier (for example, resetting the TAMS
1029     // on trashed regions could make an oop appear to be marked _after_ the
1030     // region has been recycled).
1031     ShenandoahTimingsTracker t(ShenandoahPhaseTimings::conc_weak_roots_rendezvous);
1032     heap->rendezvous_threads();
1033   }
1034 }
1035 
1036 void ShenandoahConcurrentGC::op_class_unloading() {
1037   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1038   assert (heap->is_concurrent_weak_root_in_progress() &&
1039           heap->unload_classes(),
1040           "Checked by caller");
1041   heap->do_class_unloading();
1042 }
1043 
1044 class ShenandoahEvacUpdateCodeCacheClosure : public NMethodClosure {
1045 private:
1046   BarrierSetNMethod* const                  _bs;
1047   ShenandoahEvacuateUpdateMetadataClosure   _cl;
1048 
1049 public:
1050   ShenandoahEvacUpdateCodeCacheClosure() :

1098     }
1099 
1100     // Cannot setup ShenandoahEvacOOMScope here, due to potential deadlock with nmethod_entry_barrier.
1101     if (!ShenandoahHeap::heap()->unload_classes()) {
1102       ShenandoahWorkerTimingsTracker timer(_phase, ShenandoahPhaseTimings::CodeCacheRoots, worker_id);
1103       ShenandoahEvacUpdateCodeCacheClosure cl;
1104       _nmethod_itr.nmethods_do(&cl);
1105     }
1106   }
1107 };
1108 
1109 void ShenandoahConcurrentGC::op_strong_roots() {
1110   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1111   assert(heap->is_concurrent_strong_root_in_progress(), "Checked by caller");
1112   ShenandoahConcurrentRootsEvacUpdateTask task(ShenandoahPhaseTimings::conc_strong_roots);
1113   heap->workers()->run_task(&task);
1114   heap->set_concurrent_strong_root_in_progress(false);
1115 }
1116 
1117 void ShenandoahConcurrentGC::op_cleanup_early() {
1118   ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
1119                               ShenandoahWorkerPolicy::calc_workers_for_conc_cleanup(),
1120                               "cleanup early.");
1121   ShenandoahHeap::heap()->recycle_trash();
1122 }
1123 
1124 void ShenandoahConcurrentGC::op_evacuate() {
1125   ShenandoahHeap::heap()->evacuate_collection_set(true /*concurrent*/);
1126 }
1127 
1128 void ShenandoahConcurrentGC::op_init_update_refs() {
1129   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1130   if (ShenandoahVerify) {
1131     ShenandoahTimingsTracker v(ShenandoahPhaseTimings::init_update_refs_verify);
1132     heap->verifier()->verify_before_update_refs();
1133   }

1134   if (ShenandoahPacing) {
1135     heap->pacer()->setup_for_update_refs();
1136   }
1137 }
1138 
1139 void ShenandoahConcurrentGC::op_update_refs() {
1140   ShenandoahHeap::heap()->update_heap_references(true /*concurrent*/);
1141 }
1142 
1143 class ShenandoahUpdateThreadClosure : public HandshakeClosure {
1144 private:
1145   ShenandoahUpdateRefsClosure _cl;
1146 public:
1147   ShenandoahUpdateThreadClosure();
1148   void do_thread(Thread* thread);
1149 };
1150 
1151 ShenandoahUpdateThreadClosure::ShenandoahUpdateThreadClosure() :
1152   HandshakeClosure("Shenandoah Update Thread Roots") {
1153 }
1154 
1155 void ShenandoahUpdateThreadClosure::do_thread(Thread* thread) {
1156   if (thread->is_Java_thread()) {
1157     JavaThread* jt = JavaThread::cast(thread);
1158     ResourceMark rm;
1159     jt->oops_do(&_cl, nullptr);
1160   }
1161 }
1162 
1163 void ShenandoahConcurrentGC::op_update_thread_roots() {
1164   ShenandoahUpdateThreadClosure cl;
1165   Handshake::execute(&cl);
1166 }
1167 
1168 void ShenandoahConcurrentGC::op_final_update_refs() {
1169   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1170   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
1171   assert(!heap->_update_refs_iterator.has_next(), "Should have finished update references");
1172 
1173   heap->finish_concurrent_roots();
1174 
1175   // Clear cancelled GC, if set. On cancellation path, the block before would handle
1176   // everything.
1177   if (heap->cancelled_gc()) {
1178     heap->clear_cancelled_gc(true /* clear oom handler */);
1179   }
1180 
1181   // Has to be done before cset is clear
1182   if (ShenandoahVerify) {
1183     heap->verifier()->verify_roots_in_to_space();
1184   }
1185 
1186   // If we are running in generational mode and this is an aging cycle, this will also age active
1187   // regions that haven't been used for allocation.
1188   heap->update_heap_region_states(true /*concurrent*/);
1189 
1190   heap->set_update_refs_in_progress(false);
1191   heap->set_has_forwarded_objects(false);
1192 
1193   if (heap->mode()->is_generational() && heap->is_concurrent_old_mark_in_progress()) {
1194     // When the SATB barrier is left on to support concurrent old gen mark, it may pick up writes to
1195     // objects in the collection set. After those objects are evacuated, the pointers in the
1196     // SATB are no longer safe. Once we have finished update references, we are guaranteed that
1197     // no more writes to the collection set are possible.
1198     //
1199     // This will transfer any old pointers in _active_ regions from the SATB to the old gen
1200     // mark queues. All other pointers will be discarded. This would also discard any pointers
1201     // in old regions that were included in a mixed evacuation. We aren't using the SATB filter
1202     // methods here because we cannot control when they execute. If the SATB filter runs _after_
1203     // a region has been recycled, we will not be able to detect the bad pointer.
1204     //
1205     // We are not concerned about skipping this step in abbreviated cycles because regions
1206     // with no live objects cannot have been written to and so cannot have entries in the SATB
1207     // buffers.
1208     heap->old_generation()->transfer_pointers_from_satb();
1209 
1210     // Aging_cycle is only relevant during evacuation cycle for individual objects and during final mark for
1211     // entire regions.  Both of these relevant operations occur before final update refs.
1212     ShenandoahGenerationalHeap::heap()->set_aging_cycle(false);
1213   }
1214 
1215   if (ShenandoahVerify) {
1216     ShenandoahTimingsTracker v(ShenandoahPhaseTimings::final_update_refs_verify);
1217     heap->verifier()->verify_after_update_refs();
1218   }
1219 
1220   if (VerifyAfterGC) {
1221     Universe::verify();
1222   }
1223 
1224   heap->rebuild_free_set(true /*concurrent*/);
1225 
1226   {
1227     ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_update_refs_propagate_gc_state);
1228     heap->propagate_gc_state_to_all_threads();
1229   }
1230 }
1231 
1232 bool ShenandoahConcurrentGC::entry_final_roots() {
1233   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1234   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
1235 
1236 
1237   const char* msg = conc_final_roots_event_message();
1238   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_final_roots);
1239   EventMark em("%s", msg);
1240   ShenandoahWorkerScope scope(heap->workers(),
1241                               ShenandoahWorkerPolicy::calc_workers_for_conc_evac(),
1242                               msg);
1243 
1244   if (!heap->mode()->is_generational()) {
1245     heap->concurrent_final_roots();
1246   } else {
1247     if (!complete_abbreviated_cycle()) {
1248       return false;
1249     }
1250   }
1251   return true;
1252 }
1253 
1254 void ShenandoahConcurrentGC::op_verify_final_roots() {
1255   if (VerifyAfterGC) {
1256     Universe::verify();
1257   }
1258 }
1259 
1260 void ShenandoahConcurrentGC::op_cleanup_complete() {
1261   ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
1262                               ShenandoahWorkerPolicy::calc_workers_for_conc_cleanup(),
1263                               "cleanup complete.");
1264   ShenandoahHeap::heap()->recycle_trash();
1265 }
1266 
1267 void ShenandoahConcurrentGC::op_reset_after_collect() {
1268   ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
1269                           ShenandoahWorkerPolicy::calc_workers_for_conc_reset(),
1270                           "reset after collection.");
1271 
1272   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1273   if (heap->mode()->is_generational()) {
1274     // If we are in the midst of an old gc bootstrap or an old marking, we want to leave the mark bit map of
1275     // the young generation intact. In particular, reference processing in the old generation may potentially
1276     // need the reachability of a young generation referent of a Reference object in the old generation.
1277     if (!_do_old_gc_bootstrap && !heap->is_concurrent_old_mark_in_progress()) {
1278       heap->young_generation()->reset_mark_bitmap<false>();
1279     }
1280   } else {
1281     _generation->reset_mark_bitmap<false>();
1282   }
1283 }
1284 
1285 bool ShenandoahConcurrentGC::check_cancellation_and_abort(ShenandoahDegenPoint point) {
1286   if (ShenandoahHeap::heap()->cancelled_gc()) {
1287     _degen_point = point;
1288     return true;
1289   }
1290   return false;
1291 }
1292 
1293 const char* ShenandoahConcurrentGC::init_mark_event_message() const {
1294   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1295   assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here");
1296   if (heap->unload_classes()) {
1297     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Init Mark", " (unload classes)");
1298   } else {
1299     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Init Mark", "");
1300   }
1301 }
1302 
1303 const char* ShenandoahConcurrentGC::final_mark_event_message() const {
1304   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1305   assert(!heap->has_forwarded_objects() || heap->is_concurrent_old_mark_in_progress(),
1306          "Should not have forwarded objects during final mark, unless old gen concurrent mark is running");
1307 
1308   if (heap->unload_classes()) {
1309     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Final Mark", " (unload classes)");
1310   } else {
1311     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Final Mark", "");
1312   }
1313 }
1314 
1315 const char* ShenandoahConcurrentGC::conc_mark_event_message() const {
1316   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1317   assert(!heap->has_forwarded_objects() || heap->is_concurrent_old_mark_in_progress(),
1318          "Should not have forwarded objects concurrent mark, unless old gen concurrent mark is running");
1319   if (heap->unload_classes()) {
1320     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent marking", " (unload classes)");
1321   } else {
1322     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent marking", "");
1323   }
1324 }
1325 
1326 const char* ShenandoahConcurrentGC::conc_reset_event_message() const {
1327   if (ShenandoahHeap::heap()->unload_classes()) {
1328     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent reset", " (unload classes)");
1329   } else {
1330     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent reset", "");
1331   }
1332 }
1333 
1334 const char* ShenandoahConcurrentGC::conc_reset_after_collect_event_message() const {
1335   if (ShenandoahHeap::heap()->unload_classes()) {
1336     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent reset after collect", " (unload classes)");
1337   } else {
1338     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent reset after collect", "");
1339   }
1340 }
1341 
1342 const char* ShenandoahConcurrentGC::verify_final_roots_event_message() const {
1343   if (ShenandoahHeap::heap()->unload_classes()) {
1344     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Verify Final Roots", " (unload classes)");
1345   } else {
1346     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Verify Final Roots", "");
1347   }
1348 }
1349 
1350 const char* ShenandoahConcurrentGC::conc_final_roots_event_message() const {
1351   if (ShenandoahHeap::heap()->unload_classes()) {
1352     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent Final Roots", " (unload classes)");
1353   } else {
1354     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent Final Roots", "");
1355   }
1356 }
1357 
1358 const char* ShenandoahConcurrentGC::conc_weak_refs_event_message() const {
1359   if (ShenandoahHeap::heap()->unload_classes()) {
1360     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent weak references", " (unload classes)");
1361   } else {
1362     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent weak references", "");
1363   }
1364 }
1365 
1366 const char* ShenandoahConcurrentGC::conc_weak_roots_event_message() const {
1367   if (ShenandoahHeap::heap()->unload_classes()) {
1368     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent weak roots", " (unload classes)");
1369   } else {
1370     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent weak roots", "");
1371   }
1372 }
1373 
1374 const char* ShenandoahConcurrentGC::conc_cleanup_event_message() const {
1375   if (ShenandoahHeap::heap()->unload_classes()) {
1376     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent cleanup", " (unload classes)");
1377   } else {
1378     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent cleanup", "");
1379   }
1380 }
1381 
1382 const char* ShenandoahConcurrentGC::conc_init_update_refs_event_message() const {
1383   if (ShenandoahHeap::heap()->unload_classes()) {
1384     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent Init Update Refs", " (unload classes)");
1385   } else {
1386     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent Init Update Refs", "");
1387   }
1388 }
< prev index next >