1 /*
   2  * Copyright (c) 2021, 2022, Red Hat, Inc. All rights reserved.
   3  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 
  28 #include "gc/shared/barrierSetNMethod.hpp"
  29 #include "gc/shared/collectorCounters.hpp"
  30 #include "gc/shared/continuationGCSupport.inline.hpp"
  31 #include "gc/shenandoah/shenandoahBreakpoint.hpp"
  32 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  33 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
  34 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  35 #include "gc/shenandoah/shenandoahGeneration.hpp"
  36 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
  37 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
  38 #include "gc/shenandoah/shenandoahLock.hpp"
  39 #include "gc/shenandoah/shenandoahMark.inline.hpp"
  40 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  41 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  42 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  43 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
  44 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
  45 #include "gc/shenandoah/shenandoahStackWatermark.hpp"
  46 #include "gc/shenandoah/shenandoahUtils.hpp"
  47 #include "gc/shenandoah/shenandoahVerifier.hpp"
  48 #include "gc/shenandoah/shenandoahVMOperations.hpp"
  49 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
  50 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  51 #include "memory/allocation.hpp"
  52 #include "prims/jvmtiTagMap.hpp"
  53 #include "runtime/vmThread.hpp"
  54 #include "utilities/events.hpp"
  55 
  56 // Breakpoint support
  57 class ShenandoahBreakpointGCScope : public StackObj {
  58 private:
  59   const GCCause::Cause _cause;
  60 public:
  61   ShenandoahBreakpointGCScope(GCCause::Cause cause) : _cause(cause) {
  62     if (cause == GCCause::_wb_breakpoint) {
  63       ShenandoahBreakpoint::start_gc();
  64       ShenandoahBreakpoint::at_before_gc();
  65     }
  66   }
  67 
  68   ~ShenandoahBreakpointGCScope() {
  69     if (_cause == GCCause::_wb_breakpoint) {
  70       ShenandoahBreakpoint::at_after_gc();
  71     }
  72   }
  73 };
  74 
  75 class ShenandoahBreakpointMarkScope : public StackObj {
  76 private:
  77   const GCCause::Cause _cause;
  78 public:
  79   ShenandoahBreakpointMarkScope(GCCause::Cause cause) : _cause(cause) {
  80     if (_cause == GCCause::_wb_breakpoint) {
  81       ShenandoahBreakpoint::at_after_marking_started();
  82     }
  83   }
  84 
  85   ~ShenandoahBreakpointMarkScope() {
  86     if (_cause == GCCause::_wb_breakpoint) {
  87       ShenandoahBreakpoint::at_before_marking_completed();
  88     }
  89   }
  90 };
  91 
  92 ShenandoahConcurrentGC::ShenandoahConcurrentGC(ShenandoahGeneration* generation, bool do_old_gc_bootstrap) :
  93   _mark(generation),
  94   _degen_point(ShenandoahDegenPoint::_degenerated_unset),
  95   _abbreviated(false),
  96   _do_old_gc_bootstrap(do_old_gc_bootstrap),
  97   _generation(generation) {
  98 }
  99 
 100 ShenandoahGC::ShenandoahDegenPoint ShenandoahConcurrentGC::degen_point() const {
 101   return _degen_point;
 102 }
 103 
 104 bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) {
 105   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 106   heap->start_conc_gc();
 107 
 108   ShenandoahBreakpointGCScope breakpoint_gc_scope(cause);
 109 
 110   // Reset for upcoming marking
 111   entry_reset();
 112 
 113   // Start initial mark under STW
 114   vmop_entry_init_mark();
 115 
 116   {
 117     ShenandoahBreakpointMarkScope breakpoint_mark_scope(cause);
 118 
 119     // Reset task queue stats here, rather than in mark_concurrent_roots,
 120     // because remembered set scan will `push` oops into the queues and
 121     // resetting after this happens will lose those counts.
 122     TASKQUEUE_STATS_ONLY(_mark.task_queues()->reset_taskqueue_stats());
 123 
 124     // Concurrent remembered set scanning
 125     entry_scan_remembered_set();
 126     // TODO: When RS scanning yields, we will need a check_cancellation_and_abort() degeneration point here.
 127 
 128     // Concurrent mark roots
 129     entry_mark_roots();
 130     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_roots)) return false;
 131 
 132     // Continue concurrent mark
 133     entry_mark();
 134     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_mark)) return false;
 135   }
 136 
 137   // Complete marking under STW, and start evacuation
 138   vmop_entry_final_mark();
 139 
 140   // If GC was cancelled before final mark, then the safepoint operation will do nothing
 141   // and the concurrent mark will still be in progress. In this case it is safe to resume
 142   // the degenerated cycle from the marking phase. On the other hand, if the GC is cancelled
 143   // after final mark (but before this check), then the final mark safepoint operation
 144   // will have finished the mark (setting concurrent mark in progress to false). Final mark
 145   // will also have setup state (in concurrent stack processing) that will not be safe to
 146   // resume from the marking phase in the degenerated cycle. That is, if the cancellation
 147   // occurred after final mark, we must resume the degenerated cycle after the marking phase.
 148   if (_generation->is_concurrent_mark_in_progress() && check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_mark)) {
 149     assert(!heap->is_concurrent_weak_root_in_progress(), "Weak roots should not be in progress when concurrent mark is in progress");
 150     return false;
 151   }
 152 
 153   // Concurrent stack processing
 154   if (heap->is_evacuation_in_progress()) {
 155     entry_thread_roots();
 156   }
 157 
 158   // Process weak roots that might still point to regions that would be broken by cleanup
 159   if (heap->is_concurrent_weak_root_in_progress()) {
 160     entry_weak_refs();
 161     entry_weak_roots();
 162   }
 163 
 164   // Final mark might have reclaimed some immediate garbage, kick cleanup to reclaim
 165   // the space. This would be the last action if there is nothing to evacuate.  Note that
 166   // we will not age young-gen objects in the case that we skip evacuation.
 167   entry_cleanup_early();
 168 
 169   {
 170     ShenandoahHeapLocker locker(heap->lock());
 171     heap->free_set()->log_status();
 172   }
 173 
 174   // Perform concurrent class unloading
 175   if (heap->unload_classes() &&
 176       heap->is_concurrent_weak_root_in_progress()) {
 177     entry_class_unloading();
 178   }
 179 
 180   // Processing strong roots
 181   // This may be skipped if there is nothing to update/evacuate.
 182   // If so, strong_root_in_progress would be unset.
 183   if (heap->is_concurrent_strong_root_in_progress()) {
 184     entry_strong_roots();
 185   }
 186 
 187   // Global marking has completed. We need to fill in any unmarked objects in the old generation
 188   // so that subsequent remembered set scans will not walk pointers into reclaimed memory.
 189   if (!heap->cancelled_gc() && heap->mode()->is_generational() && _generation->is_global()) {
 190     entry_global_coalesce_and_fill();
 191   }
 192 
 193   // Continue the cycle with evacuation and optional update-refs.
 194   // This may be skipped if there is nothing to evacuate.
 195   // If so, evac_in_progress would be unset by collection set preparation code.
 196   if (heap->is_evacuation_in_progress()) {
 197     // Concurrently evacuate
 198     entry_evacuate();
 199     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_evac)) return false;
 200 
 201     // Perform update-refs phase.
 202     vmop_entry_init_updaterefs();
 203     entry_updaterefs();
 204     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_updaterefs)) return false;
 205 
 206     // Concurrent update thread roots
 207     entry_update_thread_roots();
 208     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_updaterefs)) return false;
 209 
 210     vmop_entry_final_updaterefs();
 211 
 212     // Update references freed up collection set, kick the cleanup to reclaim the space.
 213     entry_cleanup_complete();
 214   } else {
 215     // We chose not to evacuate because we found sufficient immediate garbage.
 216     vmop_entry_final_roots();
 217     _abbreviated = true;
 218   }
 219 
 220   // We defer generation resizing actions until after cset regions have been recycled.  We do this even following an
 221   // abbreviated cycle.
 222   if (heap->mode()->is_generational()) {
 223     bool success;
 224     size_t region_xfer;
 225     const char* region_destination;
 226     ShenandoahYoungGeneration* young_gen = heap->young_generation();
 227     ShenandoahGeneration* old_gen = heap->old_generation();
 228     {
 229       ShenandoahHeapLocker locker(heap->lock());
 230 
 231       size_t old_region_surplus = heap->get_old_region_surplus();
 232       size_t old_region_deficit = heap->get_old_region_deficit();
 233       if (old_region_surplus) {
 234         success = heap->generation_sizer()->transfer_to_young(old_region_surplus);
 235         region_destination = "young";
 236         region_xfer = old_region_surplus;
 237       } else if (old_region_deficit) {
 238         success = heap->generation_sizer()->transfer_to_old(old_region_deficit);
 239         region_destination = "old";
 240         region_xfer = old_region_deficit;
 241         if (!success) {
 242           ((ShenandoahOldHeuristics *) old_gen->heuristics())->trigger_cannot_expand();
 243         }
 244       } else {
 245         region_destination = "none";
 246         region_xfer = 0;
 247         success = true;
 248       }
 249       heap->set_old_region_surplus(0);
 250       heap->set_old_region_deficit(0);
 251 
 252       size_t old_usage_before_evac = heap->capture_old_usage(0);
 253       size_t old_usage_now = old_gen->used();
 254       size_t promoted_bytes = old_usage_now - old_usage_before_evac;
 255       heap->set_previous_promotion(promoted_bytes);
 256       heap->set_young_evac_reserve(0);
 257       heap->set_old_evac_reserve(0);
 258       heap->reset_old_evac_expended();
 259       heap->set_promoted_reserve(0);
 260     }
 261 
 262     // Report outside the heap lock
 263     size_t young_available = young_gen->available();
 264     size_t old_available = old_gen->available();
 265     log_info(gc, ergo)("After cleanup, %s " SIZE_FORMAT " regions to %s to prepare for next gc, old available: "
 266                        SIZE_FORMAT "%s, young_available: " SIZE_FORMAT "%s",
 267                        success? "successfully transferred": "failed to transfer", region_xfer, region_destination,
 268                        byte_size_in_proper_unit(old_available), proper_unit_for_byte_size(old_available),
 269                        byte_size_in_proper_unit(young_available), proper_unit_for_byte_size(young_available));
 270   }
 271   return true;
 272 }
 273 
 274 void ShenandoahConcurrentGC::vmop_entry_init_mark() {
 275   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 276   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 277   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_mark_gross);
 278 
 279   heap->try_inject_alloc_failure();
 280   VM_ShenandoahInitMark op(this);
 281   VMThread::execute(&op); // jump to entry_init_mark() under safepoint
 282 }
 283 
 284 void ShenandoahConcurrentGC::vmop_entry_final_mark() {
 285   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 286   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 287   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_mark_gross);
 288 
 289   heap->try_inject_alloc_failure();
 290   VM_ShenandoahFinalMarkStartEvac op(this);
 291   VMThread::execute(&op); // jump to entry_final_mark under safepoint
 292 }
 293 
 294 void ShenandoahConcurrentGC::vmop_entry_init_updaterefs() {
 295   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 296   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 297   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_update_refs_gross);
 298 
 299   heap->try_inject_alloc_failure();
 300   VM_ShenandoahInitUpdateRefs op(this);
 301   VMThread::execute(&op);
 302 }
 303 
 304 void ShenandoahConcurrentGC::vmop_entry_final_updaterefs() {
 305   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 306   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 307   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_update_refs_gross);
 308 
 309   heap->try_inject_alloc_failure();
 310   VM_ShenandoahFinalUpdateRefs op(this);
 311   VMThread::execute(&op);
 312 }
 313 
 314 void ShenandoahConcurrentGC::vmop_entry_final_roots() {
 315   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 316   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 317   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_roots_gross);
 318 
 319   // This phase does not use workers, no need for setup
 320   heap->try_inject_alloc_failure();
 321   VM_ShenandoahFinalRoots op(this);
 322   VMThread::execute(&op);
 323 }
 324 
 325 void ShenandoahConcurrentGC::entry_init_mark() {
 326   const char* msg = init_mark_event_message();
 327   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::init_mark);
 328   EventMark em("%s", msg);
 329 
 330   ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
 331                               ShenandoahWorkerPolicy::calc_workers_for_init_marking(),
 332                               "init marking");
 333 
 334   op_init_mark();
 335 }
 336 
 337 void ShenandoahConcurrentGC::entry_final_mark() {
 338   const char* msg = final_mark_event_message();
 339   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_mark);
 340   EventMark em("%s", msg);
 341 
 342   ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
 343                               ShenandoahWorkerPolicy::calc_workers_for_final_marking(),
 344                               "final marking");
 345 
 346   op_final_mark();
 347 }
 348 
 349 void ShenandoahConcurrentGC::entry_init_updaterefs() {
 350   static const char* msg = "Pause Init Update Refs";
 351   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::init_update_refs);
 352   EventMark em("%s", msg);
 353 
 354   // No workers used in this phase, no setup required
 355   op_init_updaterefs();
 356 }
 357 
 358 void ShenandoahConcurrentGC::entry_final_updaterefs() {
 359   static const char* msg = "Pause Final Update Refs";
 360   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_update_refs);
 361   EventMark em("%s", msg);
 362 
 363   ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
 364                               ShenandoahWorkerPolicy::calc_workers_for_final_update_ref(),
 365                               "final reference update");
 366 
 367   op_final_updaterefs();
 368 }
 369 
 370 void ShenandoahConcurrentGC::entry_final_roots() {
 371   static const char* msg = "Pause Final Roots";
 372   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_roots);
 373   EventMark em("%s", msg);
 374 
 375   op_final_roots();
 376 }
 377 
 378 void ShenandoahConcurrentGC::entry_reset() {
 379   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 380   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 381   static const char* msg = "Concurrent reset";
 382   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_reset);
 383   EventMark em("%s", msg);
 384 
 385   ShenandoahWorkerScope scope(heap->workers(),
 386                               ShenandoahWorkerPolicy::calc_workers_for_conc_reset(),
 387                               "concurrent reset");
 388 
 389   heap->try_inject_alloc_failure();
 390   op_reset();
 391 }
 392 
 393 void ShenandoahConcurrentGC::entry_scan_remembered_set() {
 394   if (_generation->is_young()) {
 395     ShenandoahHeap* const heap = ShenandoahHeap::heap();
 396     TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 397     const char* msg = "Concurrent remembered set scanning";
 398     ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::init_scan_rset);
 399     EventMark em("%s", msg);
 400 
 401     ShenandoahWorkerScope scope(heap->workers(),
 402                                 ShenandoahWorkerPolicy::calc_workers_for_rs_scanning(),
 403                                 msg);
 404 
 405     heap->try_inject_alloc_failure();
 406     _generation->scan_remembered_set(true /* is_concurrent */);
 407   }
 408 }
 409 
 410 void ShenandoahConcurrentGC::entry_mark_roots() {
 411   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 412   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 413   const char* msg = "Concurrent marking roots";
 414   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_mark_roots);
 415   EventMark em("%s", msg);
 416 
 417   ShenandoahWorkerScope scope(heap->workers(),
 418                               ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
 419                               "concurrent marking roots");
 420 
 421   heap->try_inject_alloc_failure();
 422   op_mark_roots();
 423 }
 424 
 425 void ShenandoahConcurrentGC::entry_mark() {
 426   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 427   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 428   const char* msg = conc_mark_event_message();
 429   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_mark);
 430   EventMark em("%s", msg);
 431 
 432   ShenandoahWorkerScope scope(heap->workers(),
 433                               ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
 434                               "concurrent marking");
 435 
 436   heap->try_inject_alloc_failure();
 437   op_mark();
 438 }
 439 
 440 void ShenandoahConcurrentGC::entry_thread_roots() {
 441   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 442   static const char* msg = "Concurrent thread roots";
 443   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_thread_roots);
 444   EventMark em("%s", msg);
 445 
 446   ShenandoahWorkerScope scope(heap->workers(),
 447                               ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
 448                               msg);
 449 
 450   heap->try_inject_alloc_failure();
 451   op_thread_roots();
 452 }
 453 
 454 void ShenandoahConcurrentGC::entry_weak_refs() {
 455   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 456   static const char* msg = "Concurrent weak references";
 457   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_weak_refs);
 458   EventMark em("%s", msg);
 459 
 460   ShenandoahWorkerScope scope(heap->workers(),
 461                               ShenandoahWorkerPolicy::calc_workers_for_conc_refs_processing(),
 462                               "concurrent weak references");
 463 
 464   heap->try_inject_alloc_failure();
 465   op_weak_refs();
 466 }
 467 
 468 void ShenandoahConcurrentGC::entry_weak_roots() {
 469   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 470   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 471   static const char* msg = "Concurrent weak roots";
 472   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_weak_roots);
 473   EventMark em("%s", msg);
 474 
 475   ShenandoahWorkerScope scope(heap->workers(),
 476                               ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
 477                               "concurrent weak root");
 478 
 479   heap->try_inject_alloc_failure();
 480   op_weak_roots();
 481 }
 482 
 483 void ShenandoahConcurrentGC::entry_class_unloading() {
 484   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 485   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 486   static const char* msg = "Concurrent class unloading";
 487   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_class_unload);
 488   EventMark em("%s", msg);
 489 
 490   ShenandoahWorkerScope scope(heap->workers(),
 491                               ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
 492                               "concurrent class unloading");
 493 
 494   heap->try_inject_alloc_failure();
 495   op_class_unloading();
 496 }
 497 
 498 void ShenandoahConcurrentGC::entry_strong_roots() {
 499   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 500   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 501   static const char* msg = "Concurrent strong roots";
 502   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_strong_roots);
 503   EventMark em("%s", msg);
 504 
 505   ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_strong_roots);
 506 
 507   ShenandoahWorkerScope scope(heap->workers(),
 508                               ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
 509                               "concurrent strong root");
 510 
 511   heap->try_inject_alloc_failure();
 512   op_strong_roots();
 513 }
 514 
 515 void ShenandoahConcurrentGC::entry_cleanup_early() {
 516   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 517   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 518   static const char* msg = "Concurrent cleanup";
 519   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_cleanup_early, true /* log_heap_usage */);
 520   EventMark em("%s", msg);
 521 
 522   // This phase does not use workers, no need for setup
 523   heap->try_inject_alloc_failure();
 524   op_cleanup_early();
 525 }
 526 
 527 void ShenandoahConcurrentGC::entry_evacuate() {
 528   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 529   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 530 
 531   static const char* msg = "Concurrent evacuation";
 532   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_evac);
 533   EventMark em("%s", msg);
 534 
 535   ShenandoahWorkerScope scope(heap->workers(),
 536                               ShenandoahWorkerPolicy::calc_workers_for_conc_evac(),
 537                               "concurrent evacuation");
 538 
 539   heap->try_inject_alloc_failure();
 540   op_evacuate();
 541 }
 542 
 543 void ShenandoahConcurrentGC::entry_update_thread_roots() {
 544   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 545   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 546 
 547   static const char* msg = "Concurrent update thread roots";
 548   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_update_thread_roots);
 549   EventMark em("%s", msg);
 550 
 551   // No workers used in this phase, no setup required
 552   heap->try_inject_alloc_failure();
 553   op_update_thread_roots();
 554 }
 555 
 556 void ShenandoahConcurrentGC::entry_updaterefs() {
 557   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 558   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 559   static const char* msg = "Concurrent update references";
 560   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_update_refs);
 561   EventMark em("%s", msg);
 562 
 563   ShenandoahWorkerScope scope(heap->workers(),
 564                               ShenandoahWorkerPolicy::calc_workers_for_conc_update_ref(),
 565                               "concurrent reference update");
 566 
 567   heap->try_inject_alloc_failure();
 568   op_updaterefs();
 569 }
 570 
 571 void ShenandoahConcurrentGC::entry_cleanup_complete() {
 572   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 573   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 574   static const char* msg = "Concurrent cleanup";
 575   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_cleanup_complete, true /* log_heap_usage */);
 576   EventMark em("%s", msg);
 577 
 578   // This phase does not use workers, no need for setup
 579   heap->try_inject_alloc_failure();
 580   op_cleanup_complete();
 581 }
 582 
 583 void ShenandoahConcurrentGC::entry_global_coalesce_and_fill() {
 584   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 585 
 586   const char* msg = "Coalescing and filling old regions in global collect";
 587   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::coalesce_and_fill);
 588 
 589   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 590   EventMark em("%s", msg);
 591   ShenandoahWorkerScope scope(heap->workers(),
 592                               ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
 593                               "concurrent coalesce and fill");
 594 
 595   op_global_coalesce_and_fill();
 596 }
 597 
 598 void ShenandoahConcurrentGC::op_reset() {
 599   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 600   if (ShenandoahPacing) {
 601     heap->pacer()->setup_for_reset();
 602   }
 603   _generation->prepare_gc();
 604 }
 605 
 606 class ShenandoahInitMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
 607 private:
 608   ShenandoahMarkingContext* const _ctx;
 609 public:
 610   ShenandoahInitMarkUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
 611 
 612   void heap_region_do(ShenandoahHeapRegion* r) {
 613     assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index());
 614     if (r->is_active()) {
 615       // Check if region needs updating its TAMS. We have updated it already during concurrent
 616       // reset, so it is very likely we don't need to do another write here.  Since most regions
 617       // are not "active", this path is relatively rare.
 618       if (_ctx->top_at_mark_start(r) != r->top()) {
 619         _ctx->capture_top_at_mark_start(r);
 620       }
 621     } else {
 622       assert(_ctx->top_at_mark_start(r) == r->top(),
 623              "Region " SIZE_FORMAT " should already have correct TAMS", r->index());
 624     }
 625   }
 626 
 627   bool is_thread_safe() { return true; }
 628 };
 629 
 630 void ShenandoahConcurrentGC::start_mark() {
 631   _mark.start_mark();
 632 }
 633 
 634 void ShenandoahConcurrentGC::op_init_mark() {
 635   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 636   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
 637   assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
 638 
 639   assert(_generation->is_bitmap_clear(), "need clear marking bitmap");
 640   assert(!_generation->is_mark_complete(), "should not be complete");
 641   assert(!heap->has_forwarded_objects(), "No forwarded objects on this path");
 642 
 643 
 644   if (heap->mode()->is_generational()) {
 645     if (_generation->is_young() || (_generation->is_global() && ShenandoahVerify)) {
 646       // The current implementation of swap_remembered_set() copies the write-card-table
 647       // to the read-card-table. The remembered sets are also swapped for GLOBAL collections
 648       // so that the verifier works with the correct copy of the card table when verifying.
 649       // TODO: This path should not really depend on ShenandoahVerify.
 650       ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_swap_rset);
 651       _generation->swap_remembered_set();
 652     }
 653 
 654     if (_generation->is_global()) {
 655       heap->cancel_old_gc();
 656     } else if (heap->is_concurrent_old_mark_in_progress()) {
 657       // Purge the SATB buffers, transferring any valid, old pointers to the
 658       // old generation mark queue. Any pointers in a young region will be
 659       // abandoned.
 660       ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_transfer_satb);
 661       heap->transfer_old_pointers_from_satb();
 662     }
 663   }
 664 
 665   if (ShenandoahVerify) {
 666     heap->verifier()->verify_before_concmark();
 667   }
 668 
 669   if (VerifyBeforeGC) {
 670     Universe::verify();
 671   }
 672 
 673   _generation->set_concurrent_mark_in_progress(true);
 674 
 675   start_mark();
 676 
 677   if (_do_old_gc_bootstrap) {
 678     // Update region state for both young and old regions
 679     // TODO: We should be able to pull this out of the safepoint for the bootstrap
 680     // cycle. The top of an old region will only move when a GC cycle evacuates
 681     // objects into it. When we start an old cycle, we know that nothing can touch
 682     // the top of old regions.
 683     ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states);
 684     ShenandoahInitMarkUpdateRegionStateClosure cl;
 685     heap->parallel_heap_region_iterate(&cl);
 686   } else {
 687     // Update region state for only young regions
 688     ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states);
 689     ShenandoahInitMarkUpdateRegionStateClosure cl;
 690     _generation->parallel_heap_region_iterate(&cl);
 691   }
 692 
 693   // Weak reference processing
 694   ShenandoahReferenceProcessor* rp = _generation->ref_processor();
 695   rp->reset_thread_locals();
 696   rp->set_soft_reference_policy(heap->soft_ref_policy()->should_clear_all_soft_refs());
 697 
 698   // Make above changes visible to worker threads
 699   OrderAccess::fence();
 700 
 701   // Arm nmethods for concurrent marking. When a nmethod is about to be executed,
 702   // we need to make sure that all its metadata are marked. alternative is to remark
 703   // thread roots at final mark pause, but it can be potential latency killer.
 704   if (heap->unload_classes()) {
 705     ShenandoahCodeRoots::arm_nmethods();
 706   }
 707 
 708   ShenandoahStackWatermark::change_epoch_id();
 709   if (ShenandoahPacing) {
 710     heap->pacer()->setup_for_mark();
 711   }
 712 }
 713 
 714 void ShenandoahConcurrentGC::op_mark_roots() {
 715   _mark.mark_concurrent_roots();
 716 }
 717 
 718 void ShenandoahConcurrentGC::op_mark() {
 719   _mark.concurrent_mark();
 720 }
 721 
 722 void ShenandoahConcurrentGC::op_final_mark() {
 723   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 724   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
 725   assert(!heap->has_forwarded_objects(), "No forwarded objects on this path");
 726 
 727   if (ShenandoahVerify) {
 728     heap->verifier()->verify_roots_no_forwarded();
 729   }
 730 
 731   if (!heap->cancelled_gc()) {
 732     _mark.finish_mark();
 733     assert(!heap->cancelled_gc(), "STW mark cannot OOM");
 734 
 735     // Notify JVMTI that the tagmap table will need cleaning.
 736     JvmtiTagMap::set_needs_cleaning();
 737 
 738     // The collection set is chosen by prepare_regions_and_collection_set().
 739     //
 740     // TODO: Under severe memory overload conditions that can be checked here, we may want to limit
 741     // the inclusion of old-gen candidates within the collection set.  This would allow us to prioritize efforts on
 742     // evacuating young-gen,  This remediation is most appropriate when old-gen availability is very high (so there
 743     // are negligible negative impacts from delaying completion of old-gen evacuation) and when young-gen collections
 744     // are "under duress" (as signalled by very low availability of memory within young-gen, indicating that/ young-gen
 745     // collections are not triggering frequently enough).
 746     _generation->prepare_regions_and_collection_set(true /*concurrent*/);
 747 
 748     // Upon return from prepare_regions_and_collection_set(), certain parameters have been established to govern the
 749     // evacuation efforts that are about to begin.  In particular:
 750     //
 751     // heap->get_promoted_reserve() represents the amount of memory within old-gen's available memory that has
 752     //   been set aside to hold objects promoted from young-gen memory.  This represents an estimated percentage
 753     //   of the live young-gen memory within the collection set.  If there is more data ready to be promoted than
 754     //   can fit within this reserve, the promotion of some objects will be deferred until a subsequent evacuation
 755     //   pass.
 756     //
 757     // heap->get_old_evac_reserve() represents the amount of memory within old-gen's available memory that has been
 758     //  set aside to hold objects evacuated from the old-gen collection set.
 759     //
 760     // heap->get_young_evac_reserve() represents the amount of memory within young-gen's available memory that has
 761     //  been set aside to hold objects evacuated from the young-gen collection set.  Conservatively, this value
 762     //  equals the entire amount of live young-gen memory within the collection set, even though some of this memory
 763     //  will likely be promoted.
 764 
 765     // Has to be done after cset selection
 766     heap->prepare_concurrent_roots();
 767 
 768     if (heap->mode()->is_generational()) {
 769       ShenandoahGeneration* young_gen = heap->young_generation();
 770       size_t humongous_regions_promoted = heap->get_promotable_humongous_regions();
 771       size_t regular_regions_promoted_in_place = heap->get_regular_regions_promoted_in_place();
 772       if (!heap->collection_set()->is_empty() || (humongous_regions_promoted + regular_regions_promoted_in_place > 0)) {
 773         // Even if the collection set is empty, we need to do evacuation if there are regions to be promoted in place.
 774         // Concurrent evacuation takes responsibility for registering objects and setting the remembered set cards to dirty.
 775 
 776         LogTarget(Debug, gc, cset) lt;
 777         if (lt.is_enabled()) {
 778           ResourceMark rm;
 779           LogStream ls(lt);
 780           heap->collection_set()->print_on(&ls);
 781         }
 782 
 783         if (ShenandoahVerify) {
 784           heap->verifier()->verify_before_evacuation();
 785         }
 786         // TODO: we do not need to run update-references following evacuation if collection_set->is_empty().
 787 
 788         heap->set_evacuation_in_progress(true);
 789         // From here on, we need to update references.
 790         heap->set_has_forwarded_objects(true);
 791 
 792         // Verify before arming for concurrent processing.
 793         // Otherwise, verification can trigger stack processing.
 794         if (ShenandoahVerify) {
 795           heap->verifier()->verify_during_evacuation();
 796         }
 797 
 798         // Arm nmethods/stack for concurrent processing
 799         ShenandoahCodeRoots::arm_nmethods();
 800         ShenandoahStackWatermark::change_epoch_id();
 801 
 802         if (ShenandoahPacing) {
 803           heap->pacer()->setup_for_evac();
 804         }
 805       } else {
 806         if (ShenandoahVerify) {
 807           heap->verifier()->verify_after_concmark();
 808         }
 809 
 810         if (VerifyAfterGC) {
 811           Universe::verify();
 812         }
 813       }
 814     } else {
 815       // Not is_generational()
 816       if (!heap->collection_set()->is_empty()) {
 817         LogTarget(Info, gc, ergo) lt;
 818         if (lt.is_enabled()) {
 819           ResourceMark rm;
 820           LogStream ls(lt);
 821           heap->collection_set()->print_on(&ls);
 822         }
 823 
 824         if (ShenandoahVerify) {
 825           heap->verifier()->verify_before_evacuation();
 826         }
 827 
 828         heap->set_evacuation_in_progress(true);
 829         // From here on, we need to update references.
 830         heap->set_has_forwarded_objects(true);
 831 
 832         // Verify before arming for concurrent processing.
 833         // Otherwise, verification can trigger stack processing.
 834         if (ShenandoahVerify) {
 835           heap->verifier()->verify_during_evacuation();
 836         }
 837 
 838         // Arm nmethods/stack for concurrent processing
 839         ShenandoahCodeRoots::arm_nmethods();
 840         ShenandoahStackWatermark::change_epoch_id();
 841 
 842         if (ShenandoahPacing) {
 843           heap->pacer()->setup_for_evac();
 844         }
 845       } else {
 846         if (ShenandoahVerify) {
 847           heap->verifier()->verify_after_concmark();
 848         }
 849 
 850         if (VerifyAfterGC) {
 851           Universe::verify();
 852         }
 853       }
 854     }
 855   }
 856 }
 857 
 858 class ShenandoahConcurrentEvacThreadClosure : public ThreadClosure {
 859 private:
 860   OopClosure* const _oops;
 861 
 862 public:
 863   ShenandoahConcurrentEvacThreadClosure(OopClosure* oops);
 864   void do_thread(Thread* thread);
 865 };
 866 
 867 ShenandoahConcurrentEvacThreadClosure::ShenandoahConcurrentEvacThreadClosure(OopClosure* oops) :
 868   _oops(oops) {
 869 }
 870 
 871 void ShenandoahConcurrentEvacThreadClosure::do_thread(Thread* thread) {
 872   JavaThread* const jt = JavaThread::cast(thread);
 873   StackWatermarkSet::finish_processing(jt, _oops, StackWatermarkKind::gc);
 874   ShenandoahThreadLocalData::enable_plab_promotions(thread);
 875 }
 876 
 877 class ShenandoahConcurrentEvacUpdateThreadTask : public WorkerTask {
 878 private:
 879   ShenandoahJavaThreadsIterator _java_threads;
 880 
 881 public:
 882   ShenandoahConcurrentEvacUpdateThreadTask(uint n_workers) :
 883     WorkerTask("Shenandoah Evacuate/Update Concurrent Thread Roots"),
 884     _java_threads(ShenandoahPhaseTimings::conc_thread_roots, n_workers) {
 885   }
 886 
 887   void work(uint worker_id) {
 888     Thread* worker_thread = Thread::current();
 889     ShenandoahThreadLocalData::enable_plab_promotions(worker_thread);
 890 
 891     // ShenandoahEvacOOMScope has to be setup by ShenandoahContextEvacuateUpdateRootsClosure.
 892     // Otherwise, may deadlock with watermark lock
 893     ShenandoahContextEvacuateUpdateRootsClosure oops_cl;
 894     ShenandoahConcurrentEvacThreadClosure thr_cl(&oops_cl);
 895     _java_threads.threads_do(&thr_cl, worker_id);
 896   }
 897 };
 898 
 899 void ShenandoahConcurrentGC::op_thread_roots() {
 900   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 901   assert(heap->is_evacuation_in_progress(), "Checked by caller");
 902   ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_thread_roots);
 903   ShenandoahConcurrentEvacUpdateThreadTask task(heap->workers()->active_workers());
 904   heap->workers()->run_task(&task);
 905 }
 906 
 907 void ShenandoahConcurrentGC::op_weak_refs() {
 908   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 909   assert(heap->is_concurrent_weak_root_in_progress(), "Only during this phase");
 910   // Concurrent weak refs processing
 911   ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_refs);
 912   if (heap->gc_cause() == GCCause::_wb_breakpoint) {
 913     ShenandoahBreakpoint::at_after_reference_processing_started();
 914   }
 915   _generation->ref_processor()->process_references(ShenandoahPhaseTimings::conc_weak_refs, heap->workers(), true /* concurrent */);
 916 }
 917 
 918 class ShenandoahEvacUpdateCleanupOopStorageRootsClosure : public BasicOopIterateClosure {
 919 private:
 920   ShenandoahHeap* const _heap;
 921   ShenandoahMarkingContext* const _mark_context;
 922   bool  _evac_in_progress;
 923   Thread* const _thread;
 924 
 925 public:
 926   ShenandoahEvacUpdateCleanupOopStorageRootsClosure();
 927   void do_oop(oop* p);
 928   void do_oop(narrowOop* p);
 929 };
 930 
 931 ShenandoahEvacUpdateCleanupOopStorageRootsClosure::ShenandoahEvacUpdateCleanupOopStorageRootsClosure() :
 932   _heap(ShenandoahHeap::heap()),
 933   _mark_context(ShenandoahHeap::heap()->marking_context()),
 934   _evac_in_progress(ShenandoahHeap::heap()->is_evacuation_in_progress()),
 935   _thread(Thread::current()) {
 936 }
 937 
 938 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(oop* p) {
 939   const oop obj = RawAccess<>::oop_load(p);
 940   if (!CompressedOops::is_null(obj)) {
 941     if (!_mark_context->is_marked(obj)) {
 942       if (_heap->is_in_active_generation(obj)) {
 943         // TODO: This worries me. Here we are asserting that an unmarked from-space object is 'correct'.
 944         // Normally, I would call this a bogus assert, but there seems to be a legitimate use-case for
 945         // accessing from-space objects during class unloading. However, the from-space object may have
 946         // been "filled". We've made no effort to prevent old generation classes being unloaded by young
 947         // gen (and vice-versa).
 948         shenandoah_assert_correct(p, obj);
 949         ShenandoahHeap::atomic_clear_oop(p, obj);
 950       }
 951     } else if (_evac_in_progress && _heap->in_collection_set(obj)) {
 952       oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
 953       if (resolved == obj) {
 954         resolved = _heap->evacuate_object(obj, _thread);
 955       }
 956       ShenandoahHeap::atomic_update_oop(resolved, p, obj);
 957       assert(_heap->cancelled_gc() ||
 958              _mark_context->is_marked(resolved) && !_heap->in_collection_set(resolved),
 959              "Sanity");
 960     }
 961   }
 962 }
 963 
 964 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(narrowOop* p) {
 965   ShouldNotReachHere();
 966 }
 967 
 968 class ShenandoahIsCLDAliveClosure : public CLDClosure {
 969 public:
 970   void do_cld(ClassLoaderData* cld) {
 971     cld->is_alive();
 972   }
 973 };
 974 
 975 class ShenandoahIsNMethodAliveClosure: public NMethodClosure {
 976 public:
 977   void do_nmethod(nmethod* n) {
 978     n->is_unloading();
 979   }
 980 };
 981 
 982 // This task not only evacuates/updates marked weak roots, but also "null"
 983 // dead weak roots.
 984 class ShenandoahConcurrentWeakRootsEvacUpdateTask : public WorkerTask {
 985 private:
 986   ShenandoahVMWeakRoots<true /*concurrent*/> _vm_roots;
 987 
 988   // Roots related to concurrent class unloading
 989   ShenandoahClassLoaderDataRoots<true /* concurrent */>
 990                                              _cld_roots;
 991   ShenandoahConcurrentNMethodIterator        _nmethod_itr;
 992   ShenandoahPhaseTimings::Phase              _phase;
 993 
 994 public:
 995   ShenandoahConcurrentWeakRootsEvacUpdateTask(ShenandoahPhaseTimings::Phase phase) :
 996     WorkerTask("Shenandoah Evacuate/Update Concurrent Weak Roots"),
 997     _vm_roots(phase),
 998     _cld_roots(phase, ShenandoahHeap::heap()->workers()->active_workers(), false /*heap iteration*/),
 999     _nmethod_itr(ShenandoahCodeRoots::table()),
1000     _phase(phase) {
1001     if (ShenandoahHeap::heap()->unload_classes()) {
1002       MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1003       _nmethod_itr.nmethods_do_begin();
1004     }
1005   }
1006 
1007   ~ShenandoahConcurrentWeakRootsEvacUpdateTask() {
1008     if (ShenandoahHeap::heap()->unload_classes()) {
1009       MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1010       _nmethod_itr.nmethods_do_end();
1011     }
1012     // Notify runtime data structures of potentially dead oops
1013     _vm_roots.report_num_dead();
1014   }
1015 
1016   void work(uint worker_id) {
1017     ShenandoahConcurrentWorkerSession worker_session(worker_id);
1018     ShenandoahSuspendibleThreadSetJoiner sts_join;
1019     {
1020       ShenandoahEvacOOMScope oom;
1021       // jni_roots and weak_roots are OopStorage backed roots, concurrent iteration
1022       // may race against OopStorage::release() calls.
1023       ShenandoahEvacUpdateCleanupOopStorageRootsClosure cl;
1024       _vm_roots.oops_do(&cl, worker_id);
1025     }
1026 
1027     // If we are going to perform concurrent class unloading later on, we need to
1028     // cleanup the weak oops in CLD and determinate nmethod's unloading state, so that we
1029     // can cleanup immediate garbage sooner.
1030     if (ShenandoahHeap::heap()->unload_classes()) {
1031       // Applies ShenandoahIsCLDAlive closure to CLDs, native barrier will either null the
1032       // CLD's holder or evacuate it.
1033       {
1034         ShenandoahIsCLDAliveClosure is_cld_alive;
1035         _cld_roots.cld_do(&is_cld_alive, worker_id);
1036       }
1037 
1038       // Applies ShenandoahIsNMethodAliveClosure to registered nmethods.
1039       // The closure calls nmethod->is_unloading(). The is_unloading
1040       // state is cached, therefore, during concurrent class unloading phase,
1041       // we will not touch the metadata of unloading nmethods
1042       {
1043         ShenandoahWorkerTimingsTracker timer(_phase, ShenandoahPhaseTimings::CodeCacheRoots, worker_id);
1044         ShenandoahIsNMethodAliveClosure is_nmethod_alive;
1045         _nmethod_itr.nmethods_do(&is_nmethod_alive);
1046       }
1047     }
1048   }
1049 };
1050 
1051 void ShenandoahConcurrentGC::op_weak_roots() {
1052   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1053   assert(heap->is_concurrent_weak_root_in_progress(), "Only during this phase");
1054   // Concurrent weak root processing
1055   {
1056     ShenandoahTimingsTracker t(ShenandoahPhaseTimings::conc_weak_roots_work);
1057     ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_roots_work);
1058     ShenandoahConcurrentWeakRootsEvacUpdateTask task(ShenandoahPhaseTimings::conc_weak_roots_work);
1059     heap->workers()->run_task(&task);
1060   }
1061 
1062   // Perform handshake to flush out dead oops
1063   {
1064     ShenandoahTimingsTracker t(ShenandoahPhaseTimings::conc_weak_roots_rendezvous);
1065     heap->rendezvous_threads();
1066   }
1067 }
1068 
1069 void ShenandoahConcurrentGC::op_class_unloading() {
1070   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1071   assert (heap->is_concurrent_weak_root_in_progress() &&
1072           heap->unload_classes(),
1073           "Checked by caller");
1074   heap->do_class_unloading();
1075 }
1076 
1077 class ShenandoahEvacUpdateCodeCacheClosure : public NMethodClosure {
1078 private:
1079   BarrierSetNMethod* const                  _bs;
1080   ShenandoahEvacuateUpdateMetadataClosure   _cl;
1081 
1082 public:
1083   ShenandoahEvacUpdateCodeCacheClosure() :
1084     _bs(BarrierSet::barrier_set()->barrier_set_nmethod()),
1085     _cl() {
1086   }
1087 
1088   void do_nmethod(nmethod* n) {
1089     ShenandoahNMethod* data = ShenandoahNMethod::gc_data(n);
1090     ShenandoahReentrantLocker locker(data->lock());
1091     // Setup EvacOOM scope below reentrant lock to avoid deadlock with
1092     // nmethod_entry_barrier
1093     ShenandoahEvacOOMScope oom;
1094     data->oops_do(&_cl, true/*fix relocation*/);
1095     _bs->disarm(n);
1096   }
1097 };
1098 
1099 class ShenandoahConcurrentRootsEvacUpdateTask : public WorkerTask {
1100 private:
1101   ShenandoahPhaseTimings::Phase                 _phase;
1102   ShenandoahVMRoots<true /*concurrent*/>        _vm_roots;
1103   ShenandoahClassLoaderDataRoots<true /*concurrent*/>
1104                                                 _cld_roots;
1105   ShenandoahConcurrentNMethodIterator           _nmethod_itr;
1106 
1107 public:
1108   ShenandoahConcurrentRootsEvacUpdateTask(ShenandoahPhaseTimings::Phase phase) :
1109     WorkerTask("Shenandoah Evacuate/Update Concurrent Strong Roots"),
1110     _phase(phase),
1111     _vm_roots(phase),
1112     _cld_roots(phase, ShenandoahHeap::heap()->workers()->active_workers(), false /*heap iteration*/),
1113     _nmethod_itr(ShenandoahCodeRoots::table()) {
1114     if (!ShenandoahHeap::heap()->unload_classes()) {
1115       MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1116       _nmethod_itr.nmethods_do_begin();
1117     }
1118   }
1119 
1120   ~ShenandoahConcurrentRootsEvacUpdateTask() {
1121     if (!ShenandoahHeap::heap()->unload_classes()) {
1122       MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1123       _nmethod_itr.nmethods_do_end();
1124     }
1125   }
1126 
1127   void work(uint worker_id) {
1128     ShenandoahConcurrentWorkerSession worker_session(worker_id);
1129     {
1130       ShenandoahEvacOOMScope oom;
1131       {
1132         // vm_roots and weak_roots are OopStorage backed roots, concurrent iteration
1133         // may race against OopStorage::release() calls.
1134         ShenandoahContextEvacuateUpdateRootsClosure cl;
1135         _vm_roots.oops_do<ShenandoahContextEvacuateUpdateRootsClosure>(&cl, worker_id);
1136       }
1137 
1138       {
1139         ShenandoahEvacuateUpdateMetadataClosure cl;
1140         CLDToOopClosure clds(&cl, ClassLoaderData::_claim_strong);
1141         _cld_roots.cld_do(&clds, worker_id);
1142       }
1143     }
1144 
1145     // Cannot setup ShenandoahEvacOOMScope here, due to potential deadlock with nmethod_entry_barrier.
1146     if (!ShenandoahHeap::heap()->unload_classes()) {
1147       ShenandoahWorkerTimingsTracker timer(_phase, ShenandoahPhaseTimings::CodeCacheRoots, worker_id);
1148       ShenandoahEvacUpdateCodeCacheClosure cl;
1149       _nmethod_itr.nmethods_do(&cl);
1150     }
1151   }
1152 };
1153 
1154 void ShenandoahConcurrentGC::op_strong_roots() {
1155   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1156   assert(heap->is_concurrent_strong_root_in_progress(), "Checked by caller");
1157   ShenandoahConcurrentRootsEvacUpdateTask task(ShenandoahPhaseTimings::conc_strong_roots);
1158   heap->workers()->run_task(&task);
1159   heap->set_concurrent_strong_root_in_progress(false);
1160 }
1161 
1162 void ShenandoahConcurrentGC::op_cleanup_early() {
1163   ShenandoahHeap::heap()->free_set()->recycle_trash();
1164 }
1165 
1166 void ShenandoahConcurrentGC::op_evacuate() {
1167   ShenandoahHeap::heap()->evacuate_collection_set(true /*concurrent*/);
1168 }
1169 
1170 void ShenandoahConcurrentGC::op_init_updaterefs() {
1171   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1172   heap->set_evacuation_in_progress(false);
1173   heap->set_concurrent_weak_root_in_progress(false);
1174   heap->prepare_update_heap_references(true /*concurrent*/);
1175   heap->set_update_refs_in_progress(true);
1176   if (ShenandoahVerify) {
1177     heap->verifier()->verify_before_updaterefs();
1178   }
1179   if (ShenandoahPacing) {
1180     heap->pacer()->setup_for_updaterefs();
1181   }
1182 }
1183 
1184 void ShenandoahConcurrentGC::op_updaterefs() {
1185   ShenandoahHeap::heap()->update_heap_references(true /*concurrent*/);
1186 }
1187 
1188 class ShenandoahUpdateThreadClosure : public HandshakeClosure {
1189 private:
1190   ShenandoahUpdateRefsClosure _cl;
1191 public:
1192   ShenandoahUpdateThreadClosure();
1193   void do_thread(Thread* thread);
1194 };
1195 
1196 ShenandoahUpdateThreadClosure::ShenandoahUpdateThreadClosure() :
1197   HandshakeClosure("Shenandoah Update Thread Roots") {
1198 }
1199 
1200 void ShenandoahUpdateThreadClosure::do_thread(Thread* thread) {
1201   if (thread->is_Java_thread()) {
1202     JavaThread* jt = JavaThread::cast(thread);
1203     ResourceMark rm;
1204     jt->oops_do(&_cl, nullptr);
1205   }
1206 }
1207 
1208 void ShenandoahConcurrentGC::op_update_thread_roots() {
1209   ShenandoahUpdateThreadClosure cl;
1210   Handshake::execute(&cl);
1211 }
1212 
1213 void ShenandoahConcurrentGC::op_final_updaterefs() {
1214   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1215   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
1216   assert(!heap->_update_refs_iterator.has_next(), "Should have finished update references");
1217 
1218   heap->finish_concurrent_roots();
1219 
1220   // Clear cancelled GC, if set. On cancellation path, the block before would handle
1221   // everything.
1222   if (heap->cancelled_gc()) {
1223     heap->clear_cancelled_gc(true /* clear oom handler */);
1224   }
1225 
1226   // Has to be done before cset is clear
1227   if (ShenandoahVerify) {
1228     heap->verifier()->verify_roots_in_to_space();
1229   }
1230 
1231   if (heap->mode()->is_generational() && heap->is_concurrent_old_mark_in_progress()) {
1232     // When the SATB barrier is left on to support concurrent old gen mark, it may pick up writes to
1233     // objects in the collection set. After those objects are evacuated, the pointers in the
1234     // SATB are no longer safe. Once we have finished update references, we are guaranteed that
1235     // no more writes to the collection set are possible.
1236     //
1237     // This will transfer any old pointers in _active_ regions from the SATB to the old gen
1238     // mark queues. All other pointers will be discarded. This would also discard any pointers
1239     // in old regions that were included in a mixed evacuation. We aren't using the SATB filter
1240     // methods here because we cannot control when they execute. If the SATB filter runs _after_
1241     // a region has been recycled, we will not be able to detect the bad pointer.
1242     //
1243     // We are not concerned about skipping this step in abbreviated cycles because regions
1244     // with no live objects cannot have been written to and so cannot have entries in the SATB
1245     // buffers.
1246     heap->transfer_old_pointers_from_satb();
1247   }
1248 
1249   heap->update_heap_region_states(true /*concurrent*/);
1250 
1251   heap->set_update_refs_in_progress(false);
1252   heap->set_has_forwarded_objects(false);
1253 
1254   // Aging_cycle is only relevant during evacuation cycle for individual objects and during final mark for
1255   // entire regions.  Both of these relevant operations occur before final update refs.
1256   heap->set_aging_cycle(false);
1257 
1258   if (ShenandoahVerify) {
1259     heap->verifier()->verify_after_updaterefs();
1260   }
1261 
1262   if (VerifyAfterGC) {
1263     Universe::verify();
1264   }
1265 
1266   heap->rebuild_free_set(true /*concurrent*/);
1267 }
1268 
1269 void ShenandoahConcurrentGC::op_final_roots() {
1270 
1271   ShenandoahHeap* heap = ShenandoahHeap::heap();
1272   if (heap->is_aging_cycle()) {
1273     ShenandoahMarkingContext* ctx = heap->complete_marking_context();
1274 
1275     for (size_t i = 0; i < heap->num_regions(); i++) {
1276       ShenandoahHeapRegion *r = heap->get_region(i);
1277       if (r->is_active() && r->is_young()) {
1278         HeapWord* tams = ctx->top_at_mark_start(r);
1279         HeapWord* top = r->top();
1280         if (top > tams) {
1281           r->reset_age();
1282         } else {
1283           r->increment_age();
1284         }
1285       }
1286     }
1287   }
1288 
1289   ShenandoahHeap::heap()->set_concurrent_weak_root_in_progress(false);
1290 }
1291 
1292 void ShenandoahConcurrentGC::op_cleanup_complete() {
1293   ShenandoahHeap::heap()->free_set()->recycle_trash();
1294 }
1295 
1296 void ShenandoahConcurrentGC::op_global_coalesce_and_fill() {
1297   ShenandoahHeap::heap()->coalesce_and_fill_old_regions();
1298 }
1299 
1300 bool ShenandoahConcurrentGC::check_cancellation_and_abort(ShenandoahDegenPoint point) {
1301   if (ShenandoahHeap::heap()->cancelled_gc()) {
1302     _degen_point = point;
1303     return true;
1304   }
1305   return false;
1306 }
1307 
1308 const char* ShenandoahConcurrentGC::init_mark_event_message() const {
1309   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1310   assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here");
1311   if (heap->unload_classes()) {
1312     SHENANDOAH_RETURN_EVENT_MESSAGE(heap, _generation->type(), "Pause Init Mark", " (unload classes)");
1313   } else {
1314     SHENANDOAH_RETURN_EVENT_MESSAGE(heap, _generation->type(), "Pause Init Mark", "");
1315   }
1316 }
1317 
1318 const char* ShenandoahConcurrentGC::final_mark_event_message() const {
1319   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1320   assert(!heap->has_forwarded_objects() || heap->is_concurrent_old_mark_in_progress(),
1321          "Should not have forwarded objects during final mark, unless old gen concurrent mark is running");
1322 
1323   if (heap->unload_classes()) {
1324     SHENANDOAH_RETURN_EVENT_MESSAGE(heap, _generation->type(), "Pause Final Mark", " (unload classes)");
1325   } else {
1326     SHENANDOAH_RETURN_EVENT_MESSAGE(heap, _generation->type(), "Pause Final Mark", "");
1327   }
1328 }
1329 
1330 const char* ShenandoahConcurrentGC::conc_mark_event_message() const {
1331   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1332   assert(!heap->has_forwarded_objects() || heap->is_concurrent_old_mark_in_progress(),
1333          "Should not have forwarded objects concurrent mark, unless old gen concurrent mark is running");
1334   if (heap->unload_classes()) {
1335     SHENANDOAH_RETURN_EVENT_MESSAGE(heap, _generation->type(), "Concurrent marking", " (unload classes)");
1336   } else {
1337     SHENANDOAH_RETURN_EVENT_MESSAGE(heap, _generation->type(), "Concurrent marking", "");
1338   }
1339 }