1 /*
   2  * Copyright (c) 2021, 2022, Red Hat, Inc. All rights reserved.
   3  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 
  28 #include "gc/shared/barrierSetNMethod.hpp"
  29 #include "gc/shared/collectorCounters.hpp"
  30 #include "gc/shared/continuationGCSupport.inline.hpp"
  31 #include "gc/shenandoah/shenandoahBreakpoint.hpp"
  32 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  33 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
  34 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  35 #include "gc/shenandoah/shenandoahGeneration.hpp"
  36 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
  37 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
  38 #include "gc/shenandoah/shenandoahLock.hpp"
  39 #include "gc/shenandoah/shenandoahMark.inline.hpp"
  40 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  41 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  42 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  43 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
  44 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
  45 #include "gc/shenandoah/shenandoahStackWatermark.hpp"
  46 #include "gc/shenandoah/shenandoahUtils.hpp"
  47 #include "gc/shenandoah/shenandoahVerifier.hpp"
  48 #include "gc/shenandoah/shenandoahVMOperations.hpp"
  49 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
  50 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  51 #include "memory/allocation.hpp"
  52 #include "prims/jvmtiTagMap.hpp"
  53 #include "runtime/vmThread.hpp"
  54 #include "utilities/events.hpp"
  55 
  56 // Breakpoint support
  57 class ShenandoahBreakpointGCScope : public StackObj {
  58 private:
  59   const GCCause::Cause _cause;
  60 public:
  61   ShenandoahBreakpointGCScope(GCCause::Cause cause) : _cause(cause) {
  62     if (cause == GCCause::_wb_breakpoint) {
  63       ShenandoahBreakpoint::start_gc();
  64       ShenandoahBreakpoint::at_before_gc();
  65     }
  66   }
  67 
  68   ~ShenandoahBreakpointGCScope() {
  69     if (_cause == GCCause::_wb_breakpoint) {
  70       ShenandoahBreakpoint::at_after_gc();
  71     }
  72   }
  73 };
  74 
  75 class ShenandoahBreakpointMarkScope : public StackObj {
  76 private:
  77   const GCCause::Cause _cause;
  78 public:
  79   ShenandoahBreakpointMarkScope(GCCause::Cause cause) : _cause(cause) {
  80     if (_cause == GCCause::_wb_breakpoint) {
  81       ShenandoahBreakpoint::at_after_marking_started();
  82     }
  83   }
  84 
  85   ~ShenandoahBreakpointMarkScope() {
  86     if (_cause == GCCause::_wb_breakpoint) {
  87       ShenandoahBreakpoint::at_before_marking_completed();
  88     }
  89   }
  90 };
  91 
  92 ShenandoahConcurrentGC::ShenandoahConcurrentGC(ShenandoahGeneration* generation, bool do_old_gc_bootstrap) :
  93   _mark(generation),
  94   _degen_point(ShenandoahDegenPoint::_degenerated_unset),
  95   _abbreviated(false),
  96   _do_old_gc_bootstrap(do_old_gc_bootstrap),
  97   _generation(generation) {
  98 }
  99 
 100 ShenandoahGC::ShenandoahDegenPoint ShenandoahConcurrentGC::degen_point() const {
 101   return _degen_point;
 102 }
 103 
 104 bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) {
 105   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 106 
 107   ShenandoahBreakpointGCScope breakpoint_gc_scope(cause);
 108 
 109   // Reset for upcoming marking
 110   entry_reset();
 111 
 112   // Start initial mark under STW
 113   vmop_entry_init_mark();
 114 
 115   {
 116     ShenandoahBreakpointMarkScope breakpoint_mark_scope(cause);
 117 
 118     // Reset task queue stats here, rather than in mark_concurrent_roots,
 119     // because remembered set scan will `push` oops into the queues and
 120     // resetting after this happens will lose those counts.
 121     TASKQUEUE_STATS_ONLY(_mark.task_queues()->reset_taskqueue_stats());
 122 
 123     // Concurrent remembered set scanning
 124     entry_scan_remembered_set();
 125     // TODO: When RS scanning yields, we will need a check_cancellation_and_abort() degeneration point here.
 126 
 127     // Concurrent mark roots
 128     entry_mark_roots();
 129     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_roots)) {
 130       return false;
 131     }
 132 
 133     // Continue concurrent mark
 134     entry_mark();
 135     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_mark)) {
 136       return false;
 137     }
 138   }
 139 
 140   // Complete marking under STW, and start evacuation
 141   vmop_entry_final_mark();
 142 
 143   // If GC was cancelled before final mark, then the safepoint operation will do nothing
 144   // and the concurrent mark will still be in progress. In this case it is safe to resume
 145   // the degenerated cycle from the marking phase. On the other hand, if the GC is cancelled
 146   // after final mark (but before this check), then the final mark safepoint operation
 147   // will have finished the mark (setting concurrent mark in progress to false). Final mark
 148   // will also have setup state (in concurrent stack processing) that will not be safe to
 149   // resume from the marking phase in the degenerated cycle. That is, if the cancellation
 150   // occurred after final mark, we must resume the degenerated cycle after the marking phase.
 151   if (_generation->is_concurrent_mark_in_progress() && check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_mark)) {
 152     assert(!heap->is_concurrent_weak_root_in_progress(), "Weak roots should not be in progress when concurrent mark is in progress");
 153     return false;
 154   }
 155 
 156   // Concurrent stack processing
 157   if (heap->is_evacuation_in_progress()) {
 158     entry_thread_roots();
 159   }
 160 
 161   // Process weak roots that might still point to regions that would be broken by cleanup
 162   if (heap->is_concurrent_weak_root_in_progress()) {
 163     entry_weak_refs();
 164     entry_weak_roots();
 165   }
 166 
 167   // Final mark might have reclaimed some immediate garbage, kick cleanup to reclaim
 168   // the space. This would be the last action if there is nothing to evacuate.  Note that
 169   // we will not age young-gen objects in the case that we skip evacuation.
 170   entry_cleanup_early();
 171 
 172   {
 173     // TODO: Not sure there is value in logging free-set status right here.  Note that whenever the free set is rebuilt,
 174     // it logs the newly rebuilt status.
 175     ShenandoahHeapLocker locker(heap->lock());
 176     heap->free_set()->log_status();
 177   }
 178 
 179   // Perform concurrent class unloading
 180   if (heap->unload_classes() &&
 181       heap->is_concurrent_weak_root_in_progress()) {
 182     entry_class_unloading();
 183   }
 184 
 185   // Processing strong roots
 186   // This may be skipped if there is nothing to update/evacuate.
 187   // If so, strong_root_in_progress would be unset.
 188   if (heap->is_concurrent_strong_root_in_progress()) {
 189     entry_strong_roots();
 190   }
 191 
 192   // Continue the cycle with evacuation and optional update-refs.
 193   // This may be skipped if there is nothing to evacuate.
 194   // If so, evac_in_progress would be unset by collection set preparation code.
 195   if (heap->is_evacuation_in_progress()) {
 196     // Concurrently evacuate
 197     entry_evacuate();
 198     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_evac)) {
 199       return false;
 200     }
 201   }
 202 
 203   if (heap->has_forwarded_objects()) {
 204     // Perform update-refs phase.
 205     vmop_entry_init_updaterefs();
 206     entry_updaterefs();
 207     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_updaterefs)) {
 208       return false;
 209     }
 210 
 211     // Concurrent update thread roots
 212     entry_update_thread_roots();
 213     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_updaterefs)) {
 214       return false;
 215     }
 216 
 217     vmop_entry_final_updaterefs();
 218 
 219     // Update references freed up collection set, kick the cleanup to reclaim the space.
 220     entry_cleanup_complete();
 221   } else {
 222     // We chose not to evacuate because we found sufficient immediate garbage. Note that we
 223     // do not check for cancellation here because, at this point, the cycle is effectively
 224     // complete. If the cycle has been cancelled here, the control thread will detect it
 225     // on its next iteration and run a degenerated young cycle.
 226     vmop_entry_final_roots();
 227     _abbreviated = true;
 228   }
 229 
 230   // We defer generation resizing actions until after cset regions have been recycled.  We do this even following an
 231   // abbreviated cycle.
 232   if (heap->mode()->is_generational()) {
 233     bool success;
 234     size_t region_xfer;
 235     const char* region_destination;
 236     ShenandoahYoungGeneration* young_gen = heap->young_generation();
 237     ShenandoahGeneration* old_gen = heap->old_generation();
 238     {
 239       ShenandoahHeapLocker locker(heap->lock());
 240 
 241       size_t old_region_surplus = heap->get_old_region_surplus();
 242       size_t old_region_deficit = heap->get_old_region_deficit();
 243       if (old_region_surplus) {
 244         success = heap->generation_sizer()->transfer_to_young(old_region_surplus);
 245         region_destination = "young";
 246         region_xfer = old_region_surplus;
 247       } else if (old_region_deficit) {
 248         success = heap->generation_sizer()->transfer_to_old(old_region_deficit);
 249         region_destination = "old";
 250         region_xfer = old_region_deficit;
 251         if (!success) {
 252           ((ShenandoahOldHeuristics *) old_gen->heuristics())->trigger_cannot_expand();
 253         }
 254       } else {
 255         region_destination = "none";
 256         region_xfer = 0;
 257         success = true;
 258       }
 259       heap->set_old_region_surplus(0);
 260       heap->set_old_region_deficit(0);
 261       heap->set_young_evac_reserve(0);
 262       heap->set_old_evac_reserve(0);
 263       heap->set_promoted_reserve(0);
 264     }
 265 
 266     // Report outside the heap lock
 267     size_t young_available = young_gen->available();
 268     size_t old_available = old_gen->available();
 269     log_info(gc, ergo)("After cleanup, %s " SIZE_FORMAT " regions to %s to prepare for next gc, old available: "
 270                        SIZE_FORMAT "%s, young_available: " SIZE_FORMAT "%s",
 271                        success? "successfully transferred": "failed to transfer", region_xfer, region_destination,
 272                        byte_size_in_proper_unit(old_available), proper_unit_for_byte_size(old_available),
 273                        byte_size_in_proper_unit(young_available), proper_unit_for_byte_size(young_available));
 274   }
 275   return true;
 276 }
 277 
 278 void ShenandoahConcurrentGC::vmop_entry_init_mark() {
 279   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 280   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 281   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_mark_gross);
 282 
 283   heap->try_inject_alloc_failure();
 284   VM_ShenandoahInitMark op(this);
 285   VMThread::execute(&op); // jump to entry_init_mark() under safepoint
 286 }
 287 
 288 void ShenandoahConcurrentGC::vmop_entry_final_mark() {
 289   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 290   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 291   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_mark_gross);
 292 
 293   heap->try_inject_alloc_failure();
 294   VM_ShenandoahFinalMarkStartEvac op(this);
 295   VMThread::execute(&op); // jump to entry_final_mark under safepoint
 296 }
 297 
 298 void ShenandoahConcurrentGC::vmop_entry_init_updaterefs() {
 299   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 300   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 301   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_update_refs_gross);
 302 
 303   heap->try_inject_alloc_failure();
 304   VM_ShenandoahInitUpdateRefs op(this);
 305   VMThread::execute(&op);
 306 }
 307 
 308 void ShenandoahConcurrentGC::vmop_entry_final_updaterefs() {
 309   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 310   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 311   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_update_refs_gross);
 312 
 313   heap->try_inject_alloc_failure();
 314   VM_ShenandoahFinalUpdateRefs op(this);
 315   VMThread::execute(&op);
 316 }
 317 
 318 void ShenandoahConcurrentGC::vmop_entry_final_roots() {
 319   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 320   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 321   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_roots_gross);
 322 
 323   // This phase does not use workers, no need for setup
 324   heap->try_inject_alloc_failure();
 325   VM_ShenandoahFinalRoots op(this);
 326   VMThread::execute(&op);
 327 }
 328 
 329 void ShenandoahConcurrentGC::entry_init_mark() {
 330   const char* msg = init_mark_event_message();
 331   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::init_mark);
 332   EventMark em("%s", msg);
 333 
 334   ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
 335                               ShenandoahWorkerPolicy::calc_workers_for_init_marking(),
 336                               "init marking");
 337 
 338   op_init_mark();
 339 }
 340 
 341 void ShenandoahConcurrentGC::entry_final_mark() {
 342   const char* msg = final_mark_event_message();
 343   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_mark);
 344   EventMark em("%s", msg);
 345 
 346   ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
 347                               ShenandoahWorkerPolicy::calc_workers_for_final_marking(),
 348                               "final marking");
 349 
 350   op_final_mark();
 351 }
 352 
 353 void ShenandoahConcurrentGC::entry_init_updaterefs() {
 354   static const char* msg = "Pause Init Update Refs";
 355   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::init_update_refs);
 356   EventMark em("%s", msg);
 357 
 358   // No workers used in this phase, no setup required
 359   op_init_updaterefs();
 360 }
 361 
 362 void ShenandoahConcurrentGC::entry_final_updaterefs() {
 363   static const char* msg = "Pause Final Update Refs";
 364   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_update_refs);
 365   EventMark em("%s", msg);
 366 
 367   ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
 368                               ShenandoahWorkerPolicy::calc_workers_for_final_update_ref(),
 369                               "final reference update");
 370 
 371   op_final_updaterefs();
 372 }
 373 
 374 void ShenandoahConcurrentGC::entry_final_roots() {
 375   static const char* msg = "Pause Final Roots";
 376   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_roots);
 377   EventMark em("%s", msg);
 378 
 379   op_final_roots();
 380 }
 381 
 382 void ShenandoahConcurrentGC::entry_reset() {
 383   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 384   heap->try_inject_alloc_failure();
 385 
 386   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 387   {
 388     static const char* msg = "Concurrent reset";
 389     ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_reset);
 390     EventMark em("%s", msg);
 391 
 392     ShenandoahWorkerScope scope(heap->workers(),
 393                                 ShenandoahWorkerPolicy::calc_workers_for_conc_reset(),
 394                                 msg);
 395     op_reset();
 396   }
 397 
 398   if (_do_old_gc_bootstrap) {
 399     static const char* msg = "Concurrent reset (OLD)";
 400     ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_reset_old);
 401     ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
 402                                 ShenandoahWorkerPolicy::calc_workers_for_conc_reset(),
 403                                 msg);
 404     EventMark em("%s", msg);
 405 
 406     heap->old_generation()->prepare_gc();
 407   }
 408 }
 409 
 410 void ShenandoahConcurrentGC::entry_scan_remembered_set() {
 411   if (_generation->is_young()) {
 412     ShenandoahHeap* const heap = ShenandoahHeap::heap();
 413     TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 414     const char* msg = "Concurrent remembered set scanning";
 415     ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::init_scan_rset);
 416     EventMark em("%s", msg);
 417 
 418     ShenandoahWorkerScope scope(heap->workers(),
 419                                 ShenandoahWorkerPolicy::calc_workers_for_rs_scanning(),
 420                                 msg);
 421 
 422     heap->try_inject_alloc_failure();
 423     _generation->scan_remembered_set(true /* is_concurrent */);
 424   }
 425 }
 426 
 427 void ShenandoahConcurrentGC::entry_mark_roots() {
 428   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 429   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 430   const char* msg = "Concurrent marking roots";
 431   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_mark_roots);
 432   EventMark em("%s", msg);
 433 
 434   ShenandoahWorkerScope scope(heap->workers(),
 435                               ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
 436                               "concurrent marking roots");
 437 
 438   heap->try_inject_alloc_failure();
 439   op_mark_roots();
 440 }
 441 
 442 void ShenandoahConcurrentGC::entry_mark() {
 443   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 444   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 445   const char* msg = conc_mark_event_message();
 446   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_mark);
 447   EventMark em("%s", msg);
 448 
 449   ShenandoahWorkerScope scope(heap->workers(),
 450                               ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
 451                               "concurrent marking");
 452 
 453   heap->try_inject_alloc_failure();
 454   op_mark();
 455 }
 456 
 457 void ShenandoahConcurrentGC::entry_thread_roots() {
 458   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 459   static const char* msg = "Concurrent thread roots";
 460   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_thread_roots);
 461   EventMark em("%s", msg);
 462 
 463   ShenandoahWorkerScope scope(heap->workers(),
 464                               ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
 465                               msg);
 466 
 467   heap->try_inject_alloc_failure();
 468   op_thread_roots();
 469 }
 470 
 471 void ShenandoahConcurrentGC::entry_weak_refs() {
 472   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 473   static const char* msg = "Concurrent weak references";
 474   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_weak_refs);
 475   EventMark em("%s", msg);
 476 
 477   ShenandoahWorkerScope scope(heap->workers(),
 478                               ShenandoahWorkerPolicy::calc_workers_for_conc_refs_processing(),
 479                               "concurrent weak references");
 480 
 481   heap->try_inject_alloc_failure();
 482   op_weak_refs();
 483 }
 484 
 485 void ShenandoahConcurrentGC::entry_weak_roots() {
 486   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 487   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 488   static const char* msg = "Concurrent weak roots";
 489   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_weak_roots);
 490   EventMark em("%s", msg);
 491 
 492   ShenandoahWorkerScope scope(heap->workers(),
 493                               ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
 494                               "concurrent weak root");
 495 
 496   heap->try_inject_alloc_failure();
 497   op_weak_roots();
 498 }
 499 
 500 void ShenandoahConcurrentGC::entry_class_unloading() {
 501   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 502   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 503   static const char* msg = "Concurrent class unloading";
 504   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_class_unload);
 505   EventMark em("%s", msg);
 506 
 507   ShenandoahWorkerScope scope(heap->workers(),
 508                               ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
 509                               "concurrent class unloading");
 510 
 511   heap->try_inject_alloc_failure();
 512   op_class_unloading();
 513 }
 514 
 515 void ShenandoahConcurrentGC::entry_strong_roots() {
 516   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 517   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 518   static const char* msg = "Concurrent strong roots";
 519   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_strong_roots);
 520   EventMark em("%s", msg);
 521 
 522   ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_strong_roots);
 523 
 524   ShenandoahWorkerScope scope(heap->workers(),
 525                               ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
 526                               "concurrent strong root");
 527 
 528   heap->try_inject_alloc_failure();
 529   op_strong_roots();
 530 }
 531 
 532 void ShenandoahConcurrentGC::entry_cleanup_early() {
 533   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 534   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 535   static const char* msg = "Concurrent cleanup";
 536   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_cleanup_early, true /* log_heap_usage */);
 537   EventMark em("%s", msg);
 538 
 539   // This phase does not use workers, no need for setup
 540   heap->try_inject_alloc_failure();
 541   op_cleanup_early();
 542 }
 543 
 544 void ShenandoahConcurrentGC::entry_evacuate() {
 545   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 546   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 547 
 548   static const char* msg = "Concurrent evacuation";
 549   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_evac);
 550   EventMark em("%s", msg);
 551 
 552   ShenandoahWorkerScope scope(heap->workers(),
 553                               ShenandoahWorkerPolicy::calc_workers_for_conc_evac(),
 554                               "concurrent evacuation");
 555 
 556   heap->try_inject_alloc_failure();
 557   op_evacuate();
 558 }
 559 
 560 void ShenandoahConcurrentGC::entry_update_thread_roots() {
 561   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 562   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 563 
 564   static const char* msg = "Concurrent update thread roots";
 565   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_update_thread_roots);
 566   EventMark em("%s", msg);
 567 
 568   // No workers used in this phase, no setup required
 569   heap->try_inject_alloc_failure();
 570   op_update_thread_roots();
 571 }
 572 
 573 void ShenandoahConcurrentGC::entry_updaterefs() {
 574   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 575   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 576   static const char* msg = "Concurrent update references";
 577   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_update_refs);
 578   EventMark em("%s", msg);
 579 
 580   ShenandoahWorkerScope scope(heap->workers(),
 581                               ShenandoahWorkerPolicy::calc_workers_for_conc_update_ref(),
 582                               "concurrent reference update");
 583 
 584   heap->try_inject_alloc_failure();
 585   op_updaterefs();
 586 }
 587 
 588 void ShenandoahConcurrentGC::entry_cleanup_complete() {
 589   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 590   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 591   static const char* msg = "Concurrent cleanup";
 592   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_cleanup_complete, true /* log_heap_usage */);
 593   EventMark em("%s", msg);
 594 
 595   // This phase does not use workers, no need for setup
 596   heap->try_inject_alloc_failure();
 597   op_cleanup_complete();
 598 }
 599 
 600 void ShenandoahConcurrentGC::op_reset() {
 601   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 602   if (ShenandoahPacing) {
 603     heap->pacer()->setup_for_reset();
 604   }
 605   _generation->prepare_gc();
 606 }
 607 
 608 class ShenandoahInitMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
 609 private:
 610   ShenandoahMarkingContext* const _ctx;
 611 public:
 612   ShenandoahInitMarkUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
 613 
 614   void heap_region_do(ShenandoahHeapRegion* r) {
 615     assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index());
 616     if (r->is_active()) {
 617       // Check if region needs updating its TAMS. We have updated it already during concurrent
 618       // reset, so it is very likely we don't need to do another write here.  Since most regions
 619       // are not "active", this path is relatively rare.
 620       if (_ctx->top_at_mark_start(r) != r->top()) {
 621         _ctx->capture_top_at_mark_start(r);
 622       }
 623     } else {
 624       assert(_ctx->top_at_mark_start(r) == r->top(),
 625              "Region " SIZE_FORMAT " should already have correct TAMS", r->index());
 626     }
 627   }
 628 
 629   bool is_thread_safe() { return true; }
 630 };
 631 
 632 void ShenandoahConcurrentGC::start_mark() {
 633   _mark.start_mark();
 634 }
 635 
 636 void ShenandoahConcurrentGC::op_init_mark() {
 637   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 638   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
 639   assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
 640 
 641   assert(_generation->is_bitmap_clear(), "need clear marking bitmap");
 642   assert(!_generation->is_mark_complete(), "should not be complete");
 643   assert(!heap->has_forwarded_objects(), "No forwarded objects on this path");
 644 
 645 
 646   if (heap->mode()->is_generational()) {
 647     if (_generation->is_young() || (_generation->is_global() && ShenandoahVerify)) {
 648       // The current implementation of swap_remembered_set() copies the write-card-table
 649       // to the read-card-table. The remembered sets are also swapped for GLOBAL collections
 650       // so that the verifier works with the correct copy of the card table when verifying.
 651       // TODO: This path should not really depend on ShenandoahVerify.
 652       ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_swap_rset);
 653       _generation->swap_remembered_set();
 654     }
 655 
 656     if (_generation->is_global()) {
 657       heap->cancel_old_gc();
 658     } else if (heap->is_concurrent_old_mark_in_progress()) {
 659       // Purge the SATB buffers, transferring any valid, old pointers to the
 660       // old generation mark queue. Any pointers in a young region will be
 661       // abandoned.
 662       ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_transfer_satb);
 663       heap->transfer_old_pointers_from_satb();
 664     }
 665   }
 666 
 667   if (ShenandoahVerify) {
 668     heap->verifier()->verify_before_concmark();
 669   }
 670 
 671   if (VerifyBeforeGC) {
 672     Universe::verify();
 673   }
 674 
 675   _generation->set_concurrent_mark_in_progress(true);
 676 
 677   start_mark();
 678 
 679   if (_do_old_gc_bootstrap) {
 680     // Update region state for both young and old regions
 681     // TODO: We should be able to pull this out of the safepoint for the bootstrap
 682     // cycle. The top of an old region will only move when a GC cycle evacuates
 683     // objects into it. When we start an old cycle, we know that nothing can touch
 684     // the top of old regions.
 685     ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states);
 686     ShenandoahInitMarkUpdateRegionStateClosure cl;
 687     heap->parallel_heap_region_iterate(&cl);
 688     heap->old_generation()->ref_processor()->reset_thread_locals();
 689   } else {
 690     // Update region state for only young regions
 691     ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states);
 692     ShenandoahInitMarkUpdateRegionStateClosure cl;
 693     _generation->parallel_heap_region_iterate(&cl);
 694   }
 695 
 696   // Weak reference processing
 697   ShenandoahReferenceProcessor* rp = _generation->ref_processor();
 698   rp->reset_thread_locals();
 699   rp->set_soft_reference_policy(heap->soft_ref_policy()->should_clear_all_soft_refs());
 700 
 701   // Make above changes visible to worker threads
 702   OrderAccess::fence();
 703 
 704   // Arm nmethods for concurrent mark
 705   ShenandoahCodeRoots::arm_nmethods_for_mark();
 706 
 707   ShenandoahStackWatermark::change_epoch_id();
 708   if (ShenandoahPacing) {
 709     heap->pacer()->setup_for_mark();
 710   }
 711 }
 712 
 713 void ShenandoahConcurrentGC::op_mark_roots() {
 714   _mark.mark_concurrent_roots();
 715 }
 716 
 717 void ShenandoahConcurrentGC::op_mark() {
 718   _mark.concurrent_mark();
 719 }
 720 
 721 void ShenandoahConcurrentGC::op_final_mark() {
 722   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 723   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
 724   assert(!heap->has_forwarded_objects(), "No forwarded objects on this path");
 725 
 726   if (ShenandoahVerify) {
 727     heap->verifier()->verify_roots_no_forwarded();
 728   }
 729 
 730   if (!heap->cancelled_gc()) {
 731     _mark.finish_mark();
 732     assert(!heap->cancelled_gc(), "STW mark cannot OOM");
 733 
 734     // Notify JVMTI that the tagmap table will need cleaning.
 735     JvmtiTagMap::set_needs_cleaning();
 736 
 737     // The collection set is chosen by prepare_regions_and_collection_set().
 738     //
 739     // TODO: Under severe memory overload conditions that can be checked here, we may want to limit
 740     // the inclusion of old-gen candidates within the collection set.  This would allow us to prioritize efforts on
 741     // evacuating young-gen,  This remediation is most appropriate when old-gen availability is very high (so there
 742     // are negligible negative impacts from delaying completion of old-gen evacuation) and when young-gen collections
 743     // are "under duress" (as signalled by very low availability of memory within young-gen, indicating that/ young-gen
 744     // collections are not triggering frequently enough).
 745     _generation->prepare_regions_and_collection_set(true /*concurrent*/);
 746 
 747     // Upon return from prepare_regions_and_collection_set(), certain parameters have been established to govern the
 748     // evacuation efforts that are about to begin.  In particular:
 749     //
 750     // heap->get_promoted_reserve() represents the amount of memory within old-gen's available memory that has
 751     //   been set aside to hold objects promoted from young-gen memory.  This represents an estimated percentage
 752     //   of the live young-gen memory within the collection set.  If there is more data ready to be promoted than
 753     //   can fit within this reserve, the promotion of some objects will be deferred until a subsequent evacuation
 754     //   pass.
 755     //
 756     // heap->get_old_evac_reserve() represents the amount of memory within old-gen's available memory that has been
 757     //  set aside to hold objects evacuated from the old-gen collection set.
 758     //
 759     // heap->get_young_evac_reserve() represents the amount of memory within young-gen's available memory that has
 760     //  been set aside to hold objects evacuated from the young-gen collection set.  Conservatively, this value
 761     //  equals the entire amount of live young-gen memory within the collection set, even though some of this memory
 762     //  will likely be promoted.
 763 
 764     // Has to be done after cset selection
 765     heap->prepare_concurrent_roots();
 766 
 767     if (heap->mode()->is_generational()) {
 768       size_t humongous_regions_promoted = heap->get_promotable_humongous_regions();
 769       size_t regular_regions_promoted_in_place = heap->get_regular_regions_promoted_in_place();
 770       if (!heap->collection_set()->is_empty() || (humongous_regions_promoted + regular_regions_promoted_in_place > 0)) {
 771         // Even if the collection set is empty, we need to do evacuation if there are regions to be promoted in place.
 772         // Concurrent evacuation takes responsibility for registering objects and setting the remembered set cards to dirty.
 773 
 774         LogTarget(Debug, gc, cset) lt;
 775         if (lt.is_enabled()) {
 776           ResourceMark rm;
 777           LogStream ls(lt);
 778           heap->collection_set()->print_on(&ls);
 779         }
 780 
 781         if (ShenandoahVerify) {
 782           heap->verifier()->verify_before_evacuation();
 783         }
 784 
 785         heap->set_evacuation_in_progress(true);
 786 
 787         // Verify before arming for concurrent processing.
 788         // Otherwise, verification can trigger stack processing.
 789         if (ShenandoahVerify) {
 790           heap->verifier()->verify_during_evacuation();
 791         }
 792 
 793         // Generational mode may promote objects in place during the evacuation phase.
 794         // If that is the only reason we are evacuating, we don't need to update references
 795         // and there will be no forwarded objects on the heap.
 796         heap->set_has_forwarded_objects(!heap->collection_set()->is_empty());
 797 
 798         // Arm nmethods/stack for concurrent processing
 799         if (!heap->collection_set()->is_empty()) {
 800           // Iff objects will be evaluated, arm the nmethod barriers. These will be disarmed
 801           // under the same condition (established in prepare_concurrent_roots) after strong
 802           // root evacuation has completed (see op_strong_roots).
 803           ShenandoahCodeRoots::arm_nmethods_for_evac();
 804           ShenandoahStackWatermark::change_epoch_id();
 805         }
 806 
 807         if (ShenandoahPacing) {
 808           heap->pacer()->setup_for_evac();
 809         }
 810       } else {
 811         if (ShenandoahVerify) {
 812           heap->verifier()->verify_after_concmark();
 813         }
 814 
 815         if (VerifyAfterGC) {
 816           Universe::verify();
 817         }
 818       }
 819     } else {
 820       // Not is_generational()
 821       if (!heap->collection_set()->is_empty()) {
 822         LogTarget(Info, gc, ergo) lt;
 823         if (lt.is_enabled()) {
 824           ResourceMark rm;
 825           LogStream ls(lt);
 826           heap->collection_set()->print_on(&ls);
 827         }
 828 
 829         if (ShenandoahVerify) {
 830           heap->verifier()->verify_before_evacuation();
 831         }
 832 
 833         heap->set_evacuation_in_progress(true);
 834 
 835         // Verify before arming for concurrent processing.
 836         // Otherwise, verification can trigger stack processing.
 837         if (ShenandoahVerify) {
 838           heap->verifier()->verify_during_evacuation();
 839         }
 840 
 841         // From here on, we need to update references.
 842         heap->set_has_forwarded_objects(true);
 843 
 844         // Arm nmethods/stack for concurrent processing
 845         ShenandoahCodeRoots::arm_nmethods_for_evac();
 846         ShenandoahStackWatermark::change_epoch_id();
 847 
 848         if (ShenandoahPacing) {
 849           heap->pacer()->setup_for_evac();
 850         }
 851       } else {
 852         if (ShenandoahVerify) {
 853           heap->verifier()->verify_after_concmark();
 854         }
 855 
 856         if (VerifyAfterGC) {
 857           Universe::verify();
 858         }
 859       }
 860     }
 861   }
 862 }
 863 
 864 class ShenandoahConcurrentEvacThreadClosure : public ThreadClosure {
 865 private:
 866   OopClosure* const _oops;
 867 
 868 public:
 869   ShenandoahConcurrentEvacThreadClosure(OopClosure* oops);
 870   void do_thread(Thread* thread);
 871 };
 872 
 873 ShenandoahConcurrentEvacThreadClosure::ShenandoahConcurrentEvacThreadClosure(OopClosure* oops) :
 874   _oops(oops) {
 875 }
 876 
 877 void ShenandoahConcurrentEvacThreadClosure::do_thread(Thread* thread) {
 878   JavaThread* const jt = JavaThread::cast(thread);
 879   StackWatermarkSet::finish_processing(jt, _oops, StackWatermarkKind::gc);
 880   ShenandoahThreadLocalData::enable_plab_promotions(thread);
 881 }
 882 
 883 class ShenandoahConcurrentEvacUpdateThreadTask : public WorkerTask {
 884 private:
 885   ShenandoahJavaThreadsIterator _java_threads;
 886 
 887 public:
 888   ShenandoahConcurrentEvacUpdateThreadTask(uint n_workers) :
 889     WorkerTask("Shenandoah Evacuate/Update Concurrent Thread Roots"),
 890     _java_threads(ShenandoahPhaseTimings::conc_thread_roots, n_workers) {
 891   }
 892 
 893   void work(uint worker_id) {
 894     Thread* worker_thread = Thread::current();
 895     ShenandoahThreadLocalData::enable_plab_promotions(worker_thread);
 896 
 897     // ShenandoahEvacOOMScope has to be setup by ShenandoahContextEvacuateUpdateRootsClosure.
 898     // Otherwise, may deadlock with watermark lock
 899     ShenandoahContextEvacuateUpdateRootsClosure oops_cl;
 900     ShenandoahConcurrentEvacThreadClosure thr_cl(&oops_cl);
 901     _java_threads.threads_do(&thr_cl, worker_id);
 902   }
 903 };
 904 
 905 void ShenandoahConcurrentGC::op_thread_roots() {
 906   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 907   assert(heap->is_evacuation_in_progress(), "Checked by caller");
 908   ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_thread_roots);
 909   ShenandoahConcurrentEvacUpdateThreadTask task(heap->workers()->active_workers());
 910   heap->workers()->run_task(&task);
 911 }
 912 
 913 void ShenandoahConcurrentGC::op_weak_refs() {
 914   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 915   assert(heap->is_concurrent_weak_root_in_progress(), "Only during this phase");
 916   // Concurrent weak refs processing
 917   ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_refs);
 918   if (heap->gc_cause() == GCCause::_wb_breakpoint) {
 919     ShenandoahBreakpoint::at_after_reference_processing_started();
 920   }
 921   _generation->ref_processor()->process_references(ShenandoahPhaseTimings::conc_weak_refs, heap->workers(), true /* concurrent */);
 922 }
 923 
 924 class ShenandoahEvacUpdateCleanupOopStorageRootsClosure : public BasicOopIterateClosure {
 925 private:
 926   ShenandoahHeap* const _heap;
 927   ShenandoahMarkingContext* const _mark_context;
 928   bool  _evac_in_progress;
 929   Thread* const _thread;
 930 
 931 public:
 932   ShenandoahEvacUpdateCleanupOopStorageRootsClosure();
 933   void do_oop(oop* p);
 934   void do_oop(narrowOop* p);
 935 };
 936 
 937 ShenandoahEvacUpdateCleanupOopStorageRootsClosure::ShenandoahEvacUpdateCleanupOopStorageRootsClosure() :
 938   _heap(ShenandoahHeap::heap()),
 939   _mark_context(ShenandoahHeap::heap()->marking_context()),
 940   _evac_in_progress(ShenandoahHeap::heap()->is_evacuation_in_progress()),
 941   _thread(Thread::current()) {
 942 }
 943 
 944 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(oop* p) {
 945   const oop obj = RawAccess<>::oop_load(p);
 946   if (!CompressedOops::is_null(obj)) {
 947     if (!_mark_context->is_marked(obj)) {
 948       if (_heap->is_in_active_generation(obj)) {
 949         // TODO: This worries me. Here we are asserting that an unmarked from-space object is 'correct'.
 950         // Normally, I would call this a bogus assert, but there seems to be a legitimate use-case for
 951         // accessing from-space objects during class unloading. However, the from-space object may have
 952         // been "filled". We've made no effort to prevent old generation classes being unloaded by young
 953         // gen (and vice-versa).
 954         shenandoah_assert_correct(p, obj);
 955         ShenandoahHeap::atomic_clear_oop(p, obj);
 956       }
 957     } else if (_evac_in_progress && _heap->in_collection_set(obj)) {
 958       oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
 959       if (resolved == obj) {
 960         resolved = _heap->evacuate_object(obj, _thread);
 961       }
 962       ShenandoahHeap::atomic_update_oop(resolved, p, obj);
 963       assert(_heap->cancelled_gc() ||
 964              _mark_context->is_marked(resolved) && !_heap->in_collection_set(resolved),
 965              "Sanity");
 966     }
 967   }
 968 }
 969 
 970 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(narrowOop* p) {
 971   ShouldNotReachHere();
 972 }
 973 
 974 class ShenandoahIsCLDAliveClosure : public CLDClosure {
 975 public:
 976   void do_cld(ClassLoaderData* cld) {
 977     cld->is_alive();
 978   }
 979 };
 980 
 981 class ShenandoahIsNMethodAliveClosure: public NMethodClosure {
 982 public:
 983   void do_nmethod(nmethod* n) {
 984     n->is_unloading();
 985   }
 986 };
 987 
 988 // This task not only evacuates/updates marked weak roots, but also "null"
 989 // dead weak roots.
 990 class ShenandoahConcurrentWeakRootsEvacUpdateTask : public WorkerTask {
 991 private:
 992   ShenandoahVMWeakRoots<true /*concurrent*/> _vm_roots;
 993 
 994   // Roots related to concurrent class unloading
 995   ShenandoahClassLoaderDataRoots<true /* concurrent */>
 996                                              _cld_roots;
 997   ShenandoahConcurrentNMethodIterator        _nmethod_itr;
 998   ShenandoahPhaseTimings::Phase              _phase;
 999 
1000 public:
1001   ShenandoahConcurrentWeakRootsEvacUpdateTask(ShenandoahPhaseTimings::Phase phase) :
1002     WorkerTask("Shenandoah Evacuate/Update Concurrent Weak Roots"),
1003     _vm_roots(phase),
1004     _cld_roots(phase, ShenandoahHeap::heap()->workers()->active_workers(), false /*heap iteration*/),
1005     _nmethod_itr(ShenandoahCodeRoots::table()),
1006     _phase(phase) {
1007     if (ShenandoahHeap::heap()->unload_classes()) {
1008       MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1009       _nmethod_itr.nmethods_do_begin();
1010     }
1011   }
1012 
1013   ~ShenandoahConcurrentWeakRootsEvacUpdateTask() {
1014     if (ShenandoahHeap::heap()->unload_classes()) {
1015       MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1016       _nmethod_itr.nmethods_do_end();
1017     }
1018     // Notify runtime data structures of potentially dead oops
1019     _vm_roots.report_num_dead();
1020   }
1021 
1022   void work(uint worker_id) {
1023     ShenandoahConcurrentWorkerSession worker_session(worker_id);
1024     ShenandoahSuspendibleThreadSetJoiner sts_join;
1025     {
1026       ShenandoahEvacOOMScope oom;
1027       // jni_roots and weak_roots are OopStorage backed roots, concurrent iteration
1028       // may race against OopStorage::release() calls.
1029       ShenandoahEvacUpdateCleanupOopStorageRootsClosure cl;
1030       _vm_roots.oops_do(&cl, worker_id);
1031     }
1032 
1033     // If we are going to perform concurrent class unloading later on, we need to
1034     // cleanup the weak oops in CLD and determinate nmethod's unloading state, so that we
1035     // can cleanup immediate garbage sooner.
1036     if (ShenandoahHeap::heap()->unload_classes()) {
1037       // Applies ShenandoahIsCLDAlive closure to CLDs, native barrier will either null the
1038       // CLD's holder or evacuate it.
1039       {
1040         ShenandoahIsCLDAliveClosure is_cld_alive;
1041         _cld_roots.cld_do(&is_cld_alive, worker_id);
1042       }
1043 
1044       // Applies ShenandoahIsNMethodAliveClosure to registered nmethods.
1045       // The closure calls nmethod->is_unloading(). The is_unloading
1046       // state is cached, therefore, during concurrent class unloading phase,
1047       // we will not touch the metadata of unloading nmethods
1048       {
1049         ShenandoahWorkerTimingsTracker timer(_phase, ShenandoahPhaseTimings::CodeCacheRoots, worker_id);
1050         ShenandoahIsNMethodAliveClosure is_nmethod_alive;
1051         _nmethod_itr.nmethods_do(&is_nmethod_alive);
1052       }
1053     }
1054   }
1055 };
1056 
1057 void ShenandoahConcurrentGC::op_weak_roots() {
1058   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1059   assert(heap->is_concurrent_weak_root_in_progress(), "Only during this phase");
1060   // Concurrent weak root processing
1061   {
1062     ShenandoahTimingsTracker t(ShenandoahPhaseTimings::conc_weak_roots_work);
1063     ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_roots_work);
1064     ShenandoahConcurrentWeakRootsEvacUpdateTask task(ShenandoahPhaseTimings::conc_weak_roots_work);
1065     heap->workers()->run_task(&task);
1066   }
1067 
1068   // Perform handshake to flush out dead oops
1069   {
1070     ShenandoahTimingsTracker t(ShenandoahPhaseTimings::conc_weak_roots_rendezvous);
1071     heap->rendezvous_threads();
1072   }
1073 }
1074 
1075 void ShenandoahConcurrentGC::op_class_unloading() {
1076   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1077   assert (heap->is_concurrent_weak_root_in_progress() &&
1078           heap->unload_classes(),
1079           "Checked by caller");
1080   heap->do_class_unloading();
1081 }
1082 
1083 class ShenandoahEvacUpdateCodeCacheClosure : public NMethodClosure {
1084 private:
1085   BarrierSetNMethod* const                  _bs;
1086   ShenandoahEvacuateUpdateMetadataClosure   _cl;
1087 
1088 public:
1089   ShenandoahEvacUpdateCodeCacheClosure() :
1090     _bs(BarrierSet::barrier_set()->barrier_set_nmethod()),
1091     _cl() {
1092   }
1093 
1094   void do_nmethod(nmethod* n) {
1095     ShenandoahNMethod* data = ShenandoahNMethod::gc_data(n);
1096     ShenandoahReentrantLocker locker(data->lock());
1097     // Setup EvacOOM scope below reentrant lock to avoid deadlock with
1098     // nmethod_entry_barrier
1099     ShenandoahEvacOOMScope oom;
1100     data->oops_do(&_cl, true/*fix relocation*/);
1101     _bs->disarm(n);
1102   }
1103 };
1104 
1105 class ShenandoahConcurrentRootsEvacUpdateTask : public WorkerTask {
1106 private:
1107   ShenandoahPhaseTimings::Phase                 _phase;
1108   ShenandoahVMRoots<true /*concurrent*/>        _vm_roots;
1109   ShenandoahClassLoaderDataRoots<true /*concurrent*/>
1110                                                 _cld_roots;
1111   ShenandoahConcurrentNMethodIterator           _nmethod_itr;
1112 
1113 public:
1114   ShenandoahConcurrentRootsEvacUpdateTask(ShenandoahPhaseTimings::Phase phase) :
1115     WorkerTask("Shenandoah Evacuate/Update Concurrent Strong Roots"),
1116     _phase(phase),
1117     _vm_roots(phase),
1118     _cld_roots(phase, ShenandoahHeap::heap()->workers()->active_workers(), false /*heap iteration*/),
1119     _nmethod_itr(ShenandoahCodeRoots::table()) {
1120     if (!ShenandoahHeap::heap()->unload_classes()) {
1121       MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1122       _nmethod_itr.nmethods_do_begin();
1123     }
1124   }
1125 
1126   ~ShenandoahConcurrentRootsEvacUpdateTask() {
1127     if (!ShenandoahHeap::heap()->unload_classes()) {
1128       MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1129       _nmethod_itr.nmethods_do_end();
1130     }
1131   }
1132 
1133   void work(uint worker_id) {
1134     ShenandoahConcurrentWorkerSession worker_session(worker_id);
1135     {
1136       ShenandoahEvacOOMScope oom;
1137       {
1138         // vm_roots and weak_roots are OopStorage backed roots, concurrent iteration
1139         // may race against OopStorage::release() calls.
1140         ShenandoahContextEvacuateUpdateRootsClosure cl;
1141         _vm_roots.oops_do<ShenandoahContextEvacuateUpdateRootsClosure>(&cl, worker_id);
1142       }
1143 
1144       {
1145         ShenandoahEvacuateUpdateMetadataClosure cl;
1146         CLDToOopClosure clds(&cl, ClassLoaderData::_claim_strong);
1147         _cld_roots.cld_do(&clds, worker_id);
1148       }
1149     }
1150 
1151     // Cannot setup ShenandoahEvacOOMScope here, due to potential deadlock with nmethod_entry_barrier.
1152     if (!ShenandoahHeap::heap()->unload_classes()) {
1153       ShenandoahWorkerTimingsTracker timer(_phase, ShenandoahPhaseTimings::CodeCacheRoots, worker_id);
1154       ShenandoahEvacUpdateCodeCacheClosure cl;
1155       _nmethod_itr.nmethods_do(&cl);
1156     }
1157   }
1158 };
1159 
1160 void ShenandoahConcurrentGC::op_strong_roots() {
1161   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1162   assert(heap->is_concurrent_strong_root_in_progress(), "Checked by caller");
1163   ShenandoahConcurrentRootsEvacUpdateTask task(ShenandoahPhaseTimings::conc_strong_roots);
1164   heap->workers()->run_task(&task);
1165   heap->set_concurrent_strong_root_in_progress(false);
1166 }
1167 
1168 void ShenandoahConcurrentGC::op_cleanup_early() {
1169   ShenandoahHeap::heap()->free_set()->recycle_trash();
1170 }
1171 
1172 void ShenandoahConcurrentGC::op_evacuate() {
1173   ShenandoahHeap::heap()->evacuate_collection_set(true /*concurrent*/);
1174 }
1175 
1176 void ShenandoahConcurrentGC::op_init_updaterefs() {
1177   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1178   heap->set_evacuation_in_progress(false);
1179   heap->set_concurrent_weak_root_in_progress(false);
1180   heap->prepare_update_heap_references(true /*concurrent*/);
1181   heap->set_update_refs_in_progress(true);
1182   if (ShenandoahVerify) {
1183     heap->verifier()->verify_before_updaterefs();
1184   }
1185   if (ShenandoahPacing) {
1186     heap->pacer()->setup_for_updaterefs();
1187   }
1188 }
1189 
1190 void ShenandoahConcurrentGC::op_updaterefs() {
1191   ShenandoahHeap::heap()->update_heap_references(true /*concurrent*/);
1192 }
1193 
1194 class ShenandoahUpdateThreadClosure : public HandshakeClosure {
1195 private:
1196   ShenandoahUpdateRefsClosure _cl;
1197 public:
1198   ShenandoahUpdateThreadClosure();
1199   void do_thread(Thread* thread);
1200 };
1201 
1202 ShenandoahUpdateThreadClosure::ShenandoahUpdateThreadClosure() :
1203   HandshakeClosure("Shenandoah Update Thread Roots") {
1204 }
1205 
1206 void ShenandoahUpdateThreadClosure::do_thread(Thread* thread) {
1207   if (thread->is_Java_thread()) {
1208     JavaThread* jt = JavaThread::cast(thread);
1209     ResourceMark rm;
1210     jt->oops_do(&_cl, nullptr);
1211   }
1212 }
1213 
1214 void ShenandoahConcurrentGC::op_update_thread_roots() {
1215   ShenandoahUpdateThreadClosure cl;
1216   Handshake::execute(&cl);
1217 }
1218 
1219 void ShenandoahConcurrentGC::op_final_updaterefs() {
1220   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1221   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
1222   assert(!heap->_update_refs_iterator.has_next(), "Should have finished update references");
1223 
1224   heap->finish_concurrent_roots();
1225 
1226   // Clear cancelled GC, if set. On cancellation path, the block before would handle
1227   // everything.
1228   if (heap->cancelled_gc()) {
1229     heap->clear_cancelled_gc(true /* clear oom handler */);
1230   }
1231 
1232   // Has to be done before cset is clear
1233   if (ShenandoahVerify) {
1234     heap->verifier()->verify_roots_in_to_space();
1235   }
1236 
1237   if (heap->mode()->is_generational() && heap->is_concurrent_old_mark_in_progress()) {
1238     // When the SATB barrier is left on to support concurrent old gen mark, it may pick up writes to
1239     // objects in the collection set. After those objects are evacuated, the pointers in the
1240     // SATB are no longer safe. Once we have finished update references, we are guaranteed that
1241     // no more writes to the collection set are possible.
1242     //
1243     // This will transfer any old pointers in _active_ regions from the SATB to the old gen
1244     // mark queues. All other pointers will be discarded. This would also discard any pointers
1245     // in old regions that were included in a mixed evacuation. We aren't using the SATB filter
1246     // methods here because we cannot control when they execute. If the SATB filter runs _after_
1247     // a region has been recycled, we will not be able to detect the bad pointer.
1248     //
1249     // We are not concerned about skipping this step in abbreviated cycles because regions
1250     // with no live objects cannot have been written to and so cannot have entries in the SATB
1251     // buffers.
1252     heap->transfer_old_pointers_from_satb();
1253   }
1254 
1255   heap->update_heap_region_states(true /*concurrent*/);
1256 
1257   heap->set_update_refs_in_progress(false);
1258   heap->set_has_forwarded_objects(false);
1259 
1260   // Aging_cycle is only relevant during evacuation cycle for individual objects and during final mark for
1261   // entire regions.  Both of these relevant operations occur before final update refs.
1262   heap->set_aging_cycle(false);
1263 
1264   if (ShenandoahVerify) {
1265     heap->verifier()->verify_after_updaterefs();
1266   }
1267 
1268   if (VerifyAfterGC) {
1269     Universe::verify();
1270   }
1271 
1272   heap->rebuild_free_set(true /*concurrent*/);
1273 }
1274 
1275 void ShenandoahConcurrentGC::op_final_roots() {
1276 
1277   ShenandoahHeap *heap = ShenandoahHeap::heap();
1278   heap->set_concurrent_weak_root_in_progress(false);
1279   heap->set_evacuation_in_progress(false);
1280 
1281   if (heap->mode()->is_generational()) {
1282     // If the cycle was shortened for having enough immediate garbage, this could be
1283     // the last GC safepoint before concurrent marking of old resumes. We must be sure
1284     // that old mark threads don't see any pointers to garbage in the SATB buffers.
1285     if (heap->is_concurrent_old_mark_in_progress()) {
1286       heap->transfer_old_pointers_from_satb();
1287     }
1288 
1289     ShenandoahMarkingContext *ctx = heap->complete_marking_context();
1290     for (size_t i = 0; i < heap->num_regions(); i++) {
1291       ShenandoahHeapRegion *r = heap->get_region(i);
1292       if (r->is_active() && r->is_young()) {
1293         HeapWord* tams = ctx->top_at_mark_start(r);
1294         HeapWord* top = r->top();
1295         if (top > tams) {
1296           r->reset_age();
1297         } else if (heap->is_aging_cycle()) {
1298           r->increment_age();
1299         }
1300       }
1301     }
1302   }
1303 }
1304 
1305 void ShenandoahConcurrentGC::op_cleanup_complete() {
1306   ShenandoahHeap::heap()->free_set()->recycle_trash();
1307 }
1308 
1309 bool ShenandoahConcurrentGC::check_cancellation_and_abort(ShenandoahDegenPoint point) {
1310   if (ShenandoahHeap::heap()->cancelled_gc()) {
1311     _degen_point = point;
1312     return true;
1313   }
1314   return false;
1315 }
1316 
1317 const char* ShenandoahConcurrentGC::init_mark_event_message() const {
1318   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1319   assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here");
1320   if (heap->unload_classes()) {
1321     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Init Mark", " (unload classes)");
1322   } else {
1323     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Init Mark", "");
1324   }
1325 }
1326 
1327 const char* ShenandoahConcurrentGC::final_mark_event_message() const {
1328   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1329   assert(!heap->has_forwarded_objects() || heap->is_concurrent_old_mark_in_progress(),
1330          "Should not have forwarded objects during final mark, unless old gen concurrent mark is running");
1331 
1332   if (heap->unload_classes()) {
1333     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Final Mark", " (unload classes)");
1334   } else {
1335     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Final Mark", "");
1336   }
1337 }
1338 
1339 const char* ShenandoahConcurrentGC::conc_mark_event_message() const {
1340   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1341   assert(!heap->has_forwarded_objects() || heap->is_concurrent_old_mark_in_progress(),
1342          "Should not have forwarded objects concurrent mark, unless old gen concurrent mark is running");
1343   if (heap->unload_classes()) {
1344     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent marking", " (unload classes)");
1345   } else {
1346     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent marking", "");
1347   }
1348 }