1 /*
   2  * Copyright (c) 2021, Red Hat, Inc. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 
  27 #include "gc/shared/barrierSetNMethod.hpp"
  28 #include "gc/shared/collectorCounters.hpp"
  29 #include "gc/shenandoah/shenandoahBreakpoint.hpp"
  30 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  31 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
  32 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  33 #include "gc/shenandoah/shenandoahLock.hpp"
  34 #include "gc/shenandoah/shenandoahMark.inline.hpp"
  35 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  36 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  37 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  38 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
  39 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
  40 #include "gc/shenandoah/shenandoahStackWatermark.hpp"
  41 #include "gc/shenandoah/shenandoahUtils.hpp"
  42 #include "gc/shenandoah/shenandoahVerifier.hpp"
  43 #include "gc/shenandoah/shenandoahVMOperations.hpp"
  44 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
  45 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  46 #include "memory/allocation.hpp"
  47 #include "prims/jvmtiTagMap.hpp"
  48 #include "runtime/vmThread.hpp"
  49 #include "utilities/events.hpp"
  50 
  51 // Breakpoint support
  52 class ShenandoahBreakpointGCScope : public StackObj {
  53 private:
  54   const GCCause::Cause _cause;
  55 public:
  56   ShenandoahBreakpointGCScope(GCCause::Cause cause) : _cause(cause) {
  57     if (cause == GCCause::_wb_breakpoint) {
  58       ShenandoahBreakpoint::start_gc();
  59       ShenandoahBreakpoint::at_before_gc();
  60     }
  61   }
  62 
  63   ~ShenandoahBreakpointGCScope() {
  64     if (_cause == GCCause::_wb_breakpoint) {
  65       ShenandoahBreakpoint::at_after_gc();
  66     }
  67   }
  68 };
  69 
  70 class ShenandoahBreakpointMarkScope : public StackObj {
  71 private:
  72   const GCCause::Cause _cause;
  73 public:
  74   ShenandoahBreakpointMarkScope(GCCause::Cause cause) : _cause(cause) {
  75     if (_cause == GCCause::_wb_breakpoint) {
  76       ShenandoahBreakpoint::at_after_marking_started();
  77     }
  78   }
  79 
  80   ~ShenandoahBreakpointMarkScope() {
  81     if (_cause == GCCause::_wb_breakpoint) {
  82       ShenandoahBreakpoint::at_before_marking_completed();
  83     }
  84   }
  85 };
  86 
  87 ShenandoahConcurrentGC::ShenandoahConcurrentGC() :
  88   _mark(),
  89   _degen_point(ShenandoahDegenPoint::_degenerated_unset) {
  90 }
  91 
  92 ShenandoahGC::ShenandoahDegenPoint ShenandoahConcurrentGC::degen_point() const {
  93   return _degen_point;
  94 }
  95 
  96 void ShenandoahConcurrentGC::cancel() {
  97   ShenandoahConcurrentMark::cancel();
  98 }
  99 
 100 bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) {
 101   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 102   ShenandoahBreakpointGCScope breakpoint_gc_scope(cause);
 103 
 104   // Reset for upcoming marking
 105   entry_reset();
 106 
 107   // Start initial mark under STW
 108   vmop_entry_init_mark();
 109 
 110   {
 111     ShenandoahBreakpointMarkScope breakpoint_mark_scope(cause);
 112     // Concurrent mark roots
 113     entry_mark_roots();
 114     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_outside_cycle)) return false;
 115 
 116     // Continue concurrent mark
 117     entry_mark();
 118     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_mark)) return false;
 119   }
 120 
 121   // Complete marking under STW, and start evacuation
 122   vmop_entry_final_mark();
 123 
 124   // Concurrent stack processing
 125   if (heap->is_evacuation_in_progress()) {
 126     entry_thread_roots();
 127   }
 128 
 129   // Process weak roots that might still point to regions that would be broken by cleanup
 130   if (heap->is_concurrent_weak_root_in_progress()) {
 131     entry_weak_refs();
 132     entry_weak_roots();
 133   }
 134 
 135   // Final mark might have reclaimed some immediate garbage, kick cleanup to reclaim
 136   // the space. This would be the last action if there is nothing to evacuate.
 137   entry_cleanup_early();
 138 
 139   {
 140     ShenandoahHeapLocker locker(heap->lock());
 141     heap->free_set()->log_status();
 142   }
 143 
 144   // Perform concurrent class unloading
 145   if (heap->unload_classes() &&
 146       heap->is_concurrent_weak_root_in_progress()) {
 147     entry_class_unloading();
 148   }
 149 
 150   // Processing strong roots
 151   // This may be skipped if there is nothing to update/evacuate.
 152   // If so, strong_root_in_progress would be unset.
 153   if (heap->is_concurrent_strong_root_in_progress()) {
 154     entry_strong_roots();
 155   }
 156 
 157   // Continue the cycle with evacuation and optional update-refs.
 158   // This may be skipped if there is nothing to evacuate.
 159   // If so, evac_in_progress would be unset by collection set preparation code.
 160   if (heap->is_evacuation_in_progress()) {
 161     // Concurrently evacuate
 162     entry_evacuate();
 163     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_evac)) return false;
 164 
 165     // Perform update-refs phase.
 166     vmop_entry_init_updaterefs();
 167     entry_updaterefs();
 168     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_updaterefs)) return false;
 169 
 170     // Concurrent update thread roots
 171     entry_update_thread_roots();
 172     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_updaterefs)) return false;
 173 
 174     vmop_entry_final_updaterefs();
 175 
 176     // Update references freed up collection set, kick the cleanup to reclaim the space.
 177     entry_cleanup_complete();
 178   } else {
 179     vmop_entry_final_roots();
 180   }
 181 
 182   return true;
 183 }
 184 
 185 void ShenandoahConcurrentGC::vmop_entry_init_mark() {
 186   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 187   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 188   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_mark_gross);
 189 
 190   heap->try_inject_alloc_failure();
 191   VM_ShenandoahInitMark op(this);
 192   VMThread::execute(&op); // jump to entry_init_mark() under safepoint
 193 }
 194 
 195 void ShenandoahConcurrentGC::vmop_entry_final_mark() {
 196   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 197   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 198   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_mark_gross);
 199 
 200   heap->try_inject_alloc_failure();
 201   VM_ShenandoahFinalMarkStartEvac op(this);
 202   VMThread::execute(&op); // jump to entry_final_mark under safepoint
 203 }
 204 
 205 void ShenandoahConcurrentGC::vmop_entry_init_updaterefs() {
 206   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 207   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 208   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_update_refs_gross);
 209 
 210   heap->try_inject_alloc_failure();
 211   VM_ShenandoahInitUpdateRefs op(this);
 212   VMThread::execute(&op);
 213 }
 214 
 215 void ShenandoahConcurrentGC::vmop_entry_final_updaterefs() {
 216   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 217   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 218   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_update_refs_gross);
 219 
 220   heap->try_inject_alloc_failure();
 221   VM_ShenandoahFinalUpdateRefs op(this);
 222   VMThread::execute(&op);
 223 }
 224 
 225 void ShenandoahConcurrentGC::vmop_entry_final_roots() {
 226   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 227   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 228   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_roots_gross);
 229 
 230   // This phase does not use workers, no need for setup
 231   heap->try_inject_alloc_failure();
 232   VM_ShenandoahFinalRoots op(this);
 233   VMThread::execute(&op);
 234 }
 235 
 236 void ShenandoahConcurrentGC::entry_init_mark() {
 237   const char* msg = init_mark_event_message();
 238   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::init_mark);
 239   EventMark em("%s", msg);
 240 
 241   ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
 242                               ShenandoahWorkerPolicy::calc_workers_for_init_marking(),
 243                               "init marking");
 244 
 245   op_init_mark();
 246 }
 247 
 248 void ShenandoahConcurrentGC::entry_final_mark() {
 249   const char* msg = final_mark_event_message();
 250   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_mark);
 251   EventMark em("%s", msg);
 252 
 253   ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
 254                               ShenandoahWorkerPolicy::calc_workers_for_final_marking(),
 255                               "final marking");
 256 
 257   op_final_mark();
 258 }
 259 
 260 void ShenandoahConcurrentGC::entry_init_updaterefs() {
 261   static const char* msg = "Pause Init Update Refs";
 262   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::init_update_refs);
 263   EventMark em("%s", msg);
 264 
 265   // No workers used in this phase, no setup required
 266   op_init_updaterefs();
 267 }
 268 
 269 void ShenandoahConcurrentGC::entry_final_updaterefs() {
 270   static const char* msg = "Pause Final Update Refs";
 271   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_update_refs);
 272   EventMark em("%s", msg);
 273 
 274   ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
 275                               ShenandoahWorkerPolicy::calc_workers_for_final_update_ref(),
 276                               "final reference update");
 277 
 278   op_final_updaterefs();
 279 }
 280 
 281 void ShenandoahConcurrentGC::entry_final_roots() {
 282   static const char* msg = "Pause Final Roots";
 283   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_roots);
 284   EventMark em("%s", msg);
 285 
 286   op_final_roots();
 287 }
 288 
 289 void ShenandoahConcurrentGC::entry_reset() {
 290   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 291   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 292   static const char* msg = "Concurrent reset";
 293   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_reset);
 294   EventMark em("%s", msg);
 295 
 296   ShenandoahWorkerScope scope(heap->workers(),
 297                               ShenandoahWorkerPolicy::calc_workers_for_conc_reset(),
 298                               "concurrent reset");
 299 
 300   heap->try_inject_alloc_failure();
 301   op_reset();
 302 }
 303 
 304 void ShenandoahConcurrentGC::entry_mark_roots() {
 305   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 306   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 307   const char* msg = "Concurrent marking roots";
 308   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_mark_roots);
 309   EventMark em("%s", msg);
 310 
 311   ShenandoahWorkerScope scope(heap->workers(),
 312                               ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
 313                               "concurrent marking roots");
 314 
 315   heap->try_inject_alloc_failure();
 316   op_mark_roots();
 317 }
 318 
 319 void ShenandoahConcurrentGC::entry_mark() {
 320   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 321   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 322   const char* msg = conc_mark_event_message();
 323   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_mark);
 324   EventMark em("%s", msg);
 325 
 326   ShenandoahWorkerScope scope(heap->workers(),
 327                               ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
 328                               "concurrent marking");
 329 
 330   heap->try_inject_alloc_failure();
 331   op_mark();
 332 }
 333 
 334 void ShenandoahConcurrentGC::entry_thread_roots() {
 335   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 336   static const char* msg = "Concurrent thread roots";
 337   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_thread_roots);
 338   EventMark em("%s", msg);
 339 
 340   ShenandoahWorkerScope scope(heap->workers(),
 341                               ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
 342                               msg);
 343 
 344   heap->try_inject_alloc_failure();
 345   op_thread_roots();
 346 }
 347 
 348 void ShenandoahConcurrentGC::entry_weak_refs() {
 349   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 350   static const char* msg = "Concurrent weak references";
 351   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_weak_refs);
 352   EventMark em("%s", msg);
 353 
 354   ShenandoahWorkerScope scope(heap->workers(),
 355                               ShenandoahWorkerPolicy::calc_workers_for_conc_refs_processing(),
 356                               "concurrent weak references");
 357 
 358   heap->try_inject_alloc_failure();
 359   op_weak_refs();
 360 }
 361 
 362 void ShenandoahConcurrentGC::entry_weak_roots() {
 363   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 364   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 365   static const char* msg = "Concurrent weak roots";
 366   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_weak_roots);
 367   EventMark em("%s", msg);
 368 
 369   ShenandoahWorkerScope scope(heap->workers(),
 370                               ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
 371                               "concurrent weak root");
 372 
 373   heap->try_inject_alloc_failure();
 374   op_weak_roots();
 375 }
 376 
 377 void ShenandoahConcurrentGC::entry_class_unloading() {
 378   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 379   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 380   static const char* msg = "Concurrent class unloading";
 381   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_class_unload);
 382   EventMark em("%s", msg);
 383 
 384   ShenandoahWorkerScope scope(heap->workers(),
 385                               ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
 386                               "concurrent class unloading");
 387 
 388   heap->try_inject_alloc_failure();
 389   op_class_unloading();
 390 }
 391 
 392 void ShenandoahConcurrentGC::entry_strong_roots() {
 393   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 394   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 395   static const char* msg = "Concurrent strong roots";
 396   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_strong_roots);
 397   EventMark em("%s", msg);
 398 
 399   ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_strong_roots);
 400 
 401   ShenandoahWorkerScope scope(heap->workers(),
 402                               ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
 403                               "concurrent strong root");
 404 
 405   heap->try_inject_alloc_failure();
 406   op_strong_roots();
 407 }
 408 
 409 void ShenandoahConcurrentGC::entry_cleanup_early() {
 410   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 411   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 412   static const char* msg = "Concurrent cleanup";
 413   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_cleanup_early, true /* log_heap_usage */);
 414   EventMark em("%s", msg);
 415 
 416   // This phase does not use workers, no need for setup
 417   heap->try_inject_alloc_failure();
 418   op_cleanup_early();
 419 }
 420 
 421 void ShenandoahConcurrentGC::entry_evacuate() {
 422   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 423   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 424 
 425   static const char* msg = "Concurrent evacuation";
 426   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_evac);
 427   EventMark em("%s", msg);
 428 
 429   ShenandoahWorkerScope scope(heap->workers(),
 430                               ShenandoahWorkerPolicy::calc_workers_for_conc_evac(),
 431                               "concurrent evacuation");
 432 
 433   heap->try_inject_alloc_failure();
 434   op_evacuate();
 435 }
 436 
 437 void ShenandoahConcurrentGC::entry_update_thread_roots() {
 438   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 439   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 440 
 441   static const char* msg = "Concurrent update thread roots";
 442   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_update_thread_roots);
 443   EventMark em("%s", msg);
 444 
 445   // No workers used in this phase, no setup required
 446   heap->try_inject_alloc_failure();
 447   op_update_thread_roots();
 448 }
 449 
 450 void ShenandoahConcurrentGC::entry_updaterefs() {
 451   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 452   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 453   static const char* msg = "Concurrent update references";
 454   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_update_refs);
 455   EventMark em("%s", msg);
 456 
 457   ShenandoahWorkerScope scope(heap->workers(),
 458                               ShenandoahWorkerPolicy::calc_workers_for_conc_update_ref(),
 459                               "concurrent reference update");
 460 
 461   heap->try_inject_alloc_failure();
 462   op_updaterefs();
 463 }
 464 
 465 void ShenandoahConcurrentGC::entry_cleanup_complete() {
 466   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 467   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 468   static const char* msg = "Concurrent cleanup";
 469   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_cleanup_complete, true /* log_heap_usage */);
 470   EventMark em("%s", msg);
 471 
 472   // This phase does not use workers, no need for setup
 473   heap->try_inject_alloc_failure();
 474   op_cleanup_complete();
 475 }
 476 
 477 void ShenandoahConcurrentGC::op_reset() {
 478   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 479   if (ShenandoahPacing) {
 480     heap->pacer()->setup_for_reset();
 481   }
 482 
 483   heap->prepare_gc();
 484 }
 485 
 486 class ShenandoahInitMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
 487 private:
 488   ShenandoahMarkingContext* const _ctx;
 489 public:
 490   ShenandoahInitMarkUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
 491 
 492   void heap_region_do(ShenandoahHeapRegion* r) {
 493     assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index());
 494     if (r->is_active()) {
 495       // Check if region needs updating its TAMS. We have updated it already during concurrent
 496       // reset, so it is very likely we don't need to do another write here.
 497       if (_ctx->top_at_mark_start(r) != r->top()) {
 498         _ctx->capture_top_at_mark_start(r);
 499       }
 500     } else {
 501       assert(_ctx->top_at_mark_start(r) == r->top(),
 502              "Region " SIZE_FORMAT " should already have correct TAMS", r->index());
 503     }
 504   }
 505 
 506   bool is_thread_safe() { return true; }
 507 };
 508 
 509 void ShenandoahConcurrentGC::op_init_mark() {
 510   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 511   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
 512   assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
 513 
 514   assert(heap->marking_context()->is_bitmap_clear(), "need clear marking bitmap");
 515   assert(!heap->marking_context()->is_complete(), "should not be complete");
 516   assert(!heap->has_forwarded_objects(), "No forwarded objects on this path");
 517 
 518   if (ShenandoahVerify) {
 519     heap->verifier()->verify_before_concmark();
 520   }
 521 
 522   if (VerifyBeforeGC) {
 523     Universe::verify();
 524   }
 525 
 526   heap->set_concurrent_mark_in_progress(true);
 527 
 528   {
 529     ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states);
 530     ShenandoahInitMarkUpdateRegionStateClosure cl;
 531     heap->parallel_heap_region_iterate(&cl);
 532   }
 533 
 534   // Weak reference processing
 535   ShenandoahReferenceProcessor* rp = heap->ref_processor();
 536   rp->reset_thread_locals();
 537   rp->set_soft_reference_policy(heap->soft_ref_policy()->should_clear_all_soft_refs());
 538 
 539   // Make above changes visible to worker threads
 540   OrderAccess::fence();
 541   // Arm nmethods for concurrent marking. When a nmethod is about to be executed,
 542   // we need to make sure that all its metadata are marked. alternative is to remark
 543   // thread roots at final mark pause, but it can be potential latency killer.
 544   if (heap->unload_classes()) {
 545     ShenandoahCodeRoots::arm_nmethods();
 546   }
 547 
 548   ShenandoahStackWatermark::change_epoch_id();
 549   if (ShenandoahPacing) {
 550     heap->pacer()->setup_for_mark();
 551   }
 552 }
 553 
 554 void ShenandoahConcurrentGC::op_mark_roots() {
 555   _mark.mark_concurrent_roots();
 556 }
 557 
 558 void ShenandoahConcurrentGC::op_mark() {
 559   _mark.concurrent_mark();
 560 }
 561 
 562 void ShenandoahConcurrentGC::op_final_mark() {
 563   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 564   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
 565   assert(!heap->has_forwarded_objects(), "No forwarded objects on this path");
 566 
 567   if (ShenandoahVerify) {
 568     heap->verifier()->verify_roots_no_forwarded();
 569   }
 570 
 571   if (!heap->cancelled_gc()) {
 572     _mark.finish_mark();
 573     assert(!heap->cancelled_gc(), "STW mark cannot OOM");
 574 
 575     // Notify JVMTI that the tagmap table will need cleaning.
 576     JvmtiTagMap::set_needs_cleaning();
 577 
 578     heap->prepare_regions_and_collection_set(true /*concurrent*/);
 579 
 580     // Has to be done after cset selection
 581     heap->prepare_concurrent_roots();
 582 
 583     if (!heap->collection_set()->is_empty()) {
 584       if (ShenandoahVerify) {
 585         heap->verifier()->verify_before_evacuation();
 586       }
 587 
 588       heap->set_evacuation_in_progress(true);
 589       // From here on, we need to update references.
 590       heap->set_has_forwarded_objects(true);
 591 
 592       // Verify before arming for concurrent processing.
 593       // Otherwise, verification can trigger stack processing.
 594       if (ShenandoahVerify) {
 595         heap->verifier()->verify_during_evacuation();
 596       }
 597 
 598       // Arm nmethods/stack for concurrent processing
 599       ShenandoahCodeRoots::arm_nmethods();
 600       ShenandoahStackWatermark::change_epoch_id();
 601 
 602       // Notify JVMTI that oops are changed.
 603       JvmtiTagMap::set_needs_rehashing();
 604 
 605       if (ShenandoahPacing) {
 606         heap->pacer()->setup_for_evac();
 607       }
 608     } else {
 609       if (ShenandoahVerify) {
 610         heap->verifier()->verify_after_concmark();
 611       }
 612 
 613       if (VerifyAfterGC) {
 614         Universe::verify();
 615       }
 616     }
 617   }
 618 }
 619 
 620 class ShenandoahConcurrentEvacThreadClosure : public ThreadClosure {
 621 private:
 622   OopClosure* const _oops;
 623 
 624 public:
 625   ShenandoahConcurrentEvacThreadClosure(OopClosure* oops);
 626   void do_thread(Thread* thread);
 627 };
 628 
 629 ShenandoahConcurrentEvacThreadClosure::ShenandoahConcurrentEvacThreadClosure(OopClosure* oops) :
 630   _oops(oops) {
 631 }
 632 
 633 void ShenandoahConcurrentEvacThreadClosure::do_thread(Thread* thread) {
 634   JavaThread* const jt = JavaThread::cast(thread);
 635   StackWatermarkSet::finish_processing(jt, _oops, StackWatermarkKind::gc);
 636 }
 637 
 638 class ShenandoahConcurrentEvacUpdateThreadTask : public WorkerTask {
 639 private:
 640   ShenandoahJavaThreadsIterator _java_threads;
 641 
 642 public:
 643   ShenandoahConcurrentEvacUpdateThreadTask(uint n_workers) :
 644     WorkerTask("Shenandoah Evacuate/Update Concurrent Thread Roots"),
 645     _java_threads(ShenandoahPhaseTimings::conc_thread_roots, n_workers) {
 646   }
 647 
 648   void work(uint worker_id) {
 649     // ShenandoahEvacOOMScope has to be setup by ShenandoahContextEvacuateUpdateRootsClosure.
 650     // Otherwise, may deadlock with watermark lock
 651     ShenandoahContextEvacuateUpdateRootsClosure oops_cl;
 652     ShenandoahConcurrentEvacThreadClosure thr_cl(&oops_cl);
 653     _java_threads.threads_do(&thr_cl, worker_id);
 654   }
 655 };
 656 
 657 void ShenandoahConcurrentGC::op_thread_roots() {
 658   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 659   assert(heap->is_evacuation_in_progress(), "Checked by caller");
 660   ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_thread_roots);
 661   ShenandoahConcurrentEvacUpdateThreadTask task(heap->workers()->active_workers());
 662   heap->workers()->run_task(&task);
 663 }
 664 
 665 void ShenandoahConcurrentGC::op_weak_refs() {
 666   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 667   assert(heap->is_concurrent_weak_root_in_progress(), "Only during this phase");
 668   // Concurrent weak refs processing
 669   ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_refs);
 670   if (heap->gc_cause() == GCCause::_wb_breakpoint) {
 671     ShenandoahBreakpoint::at_after_reference_processing_started();
 672   }
 673   heap->ref_processor()->process_references(ShenandoahPhaseTimings::conc_weak_refs, heap->workers(), true /* concurrent */);
 674 }
 675 
 676 class ShenandoahEvacUpdateCleanupOopStorageRootsClosure : public BasicOopIterateClosure {
 677 private:
 678   ShenandoahHeap* const _heap;
 679   ShenandoahMarkingContext* const _mark_context;
 680   bool  _evac_in_progress;
 681   Thread* const _thread;
 682 
 683 public:
 684   ShenandoahEvacUpdateCleanupOopStorageRootsClosure();
 685   void do_oop(oop* p);
 686   void do_oop(narrowOop* p);
 687 };
 688 
 689 ShenandoahEvacUpdateCleanupOopStorageRootsClosure::ShenandoahEvacUpdateCleanupOopStorageRootsClosure() :
 690   _heap(ShenandoahHeap::heap()),
 691   _mark_context(ShenandoahHeap::heap()->marking_context()),
 692   _evac_in_progress(ShenandoahHeap::heap()->is_evacuation_in_progress()),
 693   _thread(Thread::current()) {
 694 }
 695 
 696 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(oop* p) {
 697   const oop obj = RawAccess<>::oop_load(p);
 698   if (!CompressedOops::is_null(obj)) {
 699     if (!_mark_context->is_marked(obj)) {
 700       shenandoah_assert_correct(p, obj);
 701       ShenandoahHeap::atomic_clear_oop(p, obj);
 702     } else if (_evac_in_progress && _heap->in_collection_set(obj)) {
 703       oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
 704       if (resolved == obj) {
 705         resolved = _heap->evacuate_object(obj, _thread);
 706       }
 707       ShenandoahHeap::atomic_update_oop(resolved, p, obj);
 708       assert(_heap->cancelled_gc() ||
 709              _mark_context->is_marked(resolved) && !_heap->in_collection_set(resolved),
 710              "Sanity");
 711     }
 712   }
 713 }
 714 
 715 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(narrowOop* p) {
 716   ShouldNotReachHere();
 717 }
 718 
 719 class ShenandoahIsCLDAliveClosure : public CLDClosure {
 720 public:
 721   void do_cld(ClassLoaderData* cld) {
 722     cld->is_alive();
 723   }
 724 };
 725 
 726 class ShenandoahIsNMethodAliveClosure: public NMethodClosure {
 727 public:
 728   void do_nmethod(nmethod* n) {
 729     n->is_unloading();
 730   }
 731 };
 732 
 733 // This task not only evacuates/updates marked weak roots, but also "NULL"
 734 // dead weak roots.
 735 class ShenandoahConcurrentWeakRootsEvacUpdateTask : public WorkerTask {
 736 private:
 737   ShenandoahVMWeakRoots<true /*concurrent*/> _vm_roots;
 738 
 739   // Roots related to concurrent class unloading
 740   ShenandoahClassLoaderDataRoots<true /* concurrent */>
 741                                              _cld_roots;
 742   ShenandoahConcurrentNMethodIterator        _nmethod_itr;
 743   ShenandoahPhaseTimings::Phase              _phase;
 744 
 745 public:
 746   ShenandoahConcurrentWeakRootsEvacUpdateTask(ShenandoahPhaseTimings::Phase phase) :
 747     WorkerTask("Shenandoah Evacuate/Update Concurrent Weak Roots"),
 748     _vm_roots(phase),
 749     _cld_roots(phase, ShenandoahHeap::heap()->workers()->active_workers(), false /*heap iteration*/),
 750     _nmethod_itr(ShenandoahCodeRoots::table()),
 751     _phase(phase) {
 752     if (ShenandoahHeap::heap()->unload_classes()) {
 753       MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 754       _nmethod_itr.nmethods_do_begin();
 755     }
 756   }
 757 
 758   ~ShenandoahConcurrentWeakRootsEvacUpdateTask() {
 759     if (ShenandoahHeap::heap()->unload_classes()) {
 760       MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 761       _nmethod_itr.nmethods_do_end();
 762     }
 763     // Notify runtime data structures of potentially dead oops
 764     _vm_roots.report_num_dead();
 765   }
 766 
 767   void work(uint worker_id) {
 768     ShenandoahConcurrentWorkerSession worker_session(worker_id);
 769     {
 770       ShenandoahEvacOOMScope oom;
 771       // jni_roots and weak_roots are OopStorage backed roots, concurrent iteration
 772       // may race against OopStorage::release() calls.
 773       ShenandoahEvacUpdateCleanupOopStorageRootsClosure cl;
 774       _vm_roots.oops_do(&cl, worker_id);
 775     }
 776 
 777     // If we are going to perform concurrent class unloading later on, we need to
 778     // cleanup the weak oops in CLD and determinate nmethod's unloading state, so that we
 779     // can cleanup immediate garbage sooner.
 780     if (ShenandoahHeap::heap()->unload_classes()) {
 781       // Applies ShenandoahIsCLDAlive closure to CLDs, native barrier will either NULL the
 782       // CLD's holder or evacuate it.
 783       {
 784         ShenandoahIsCLDAliveClosure is_cld_alive;
 785         _cld_roots.cld_do(&is_cld_alive, worker_id);
 786       }
 787 
 788       // Applies ShenandoahIsNMethodAliveClosure to registered nmethods.
 789       // The closure calls nmethod->is_unloading(). The is_unloading
 790       // state is cached, therefore, during concurrent class unloading phase,
 791       // we will not touch the metadata of unloading nmethods
 792       {
 793         ShenandoahWorkerTimingsTracker timer(_phase, ShenandoahPhaseTimings::CodeCacheRoots, worker_id);
 794         ShenandoahIsNMethodAliveClosure is_nmethod_alive;
 795         _nmethod_itr.nmethods_do(&is_nmethod_alive);
 796       }
 797     }
 798   }
 799 };
 800 
 801 void ShenandoahConcurrentGC::op_weak_roots() {
 802   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 803   assert(heap->is_concurrent_weak_root_in_progress(), "Only during this phase");
 804   // Concurrent weak root processing
 805   {
 806     ShenandoahTimingsTracker t(ShenandoahPhaseTimings::conc_weak_roots_work);
 807     ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_roots_work);
 808     ShenandoahConcurrentWeakRootsEvacUpdateTask task(ShenandoahPhaseTimings::conc_weak_roots_work);
 809     heap->workers()->run_task(&task);
 810   }
 811 
 812   // Perform handshake to flush out dead oops
 813   {
 814     ShenandoahTimingsTracker t(ShenandoahPhaseTimings::conc_weak_roots_rendezvous);
 815     heap->rendezvous_threads();
 816   }
 817 }
 818 
 819 void ShenandoahConcurrentGC::op_class_unloading() {
 820   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 821   assert (heap->is_concurrent_weak_root_in_progress() &&
 822           heap->unload_classes(),
 823           "Checked by caller");
 824   heap->do_class_unloading();
 825 }
 826 
 827 class ShenandoahEvacUpdateCodeCacheClosure : public NMethodClosure {
 828 private:
 829   BarrierSetNMethod* const                  _bs;
 830   ShenandoahEvacuateUpdateMetadataClosure<> _cl;
 831 
 832 public:
 833   ShenandoahEvacUpdateCodeCacheClosure() :
 834     _bs(BarrierSet::barrier_set()->barrier_set_nmethod()),
 835     _cl() {
 836   }
 837 
 838   void do_nmethod(nmethod* n) {
 839     ShenandoahNMethod* data = ShenandoahNMethod::gc_data(n);
 840     ShenandoahReentrantLocker locker(data->lock());
 841     // Setup EvacOOM scope below reentrant lock to avoid deadlock with
 842     // nmethod_entry_barrier
 843     ShenandoahEvacOOMScope oom;
 844     data->oops_do(&_cl, true/*fix relocation*/);
 845     _bs->disarm(n);
 846   }
 847 };
 848 
 849 class ShenandoahConcurrentRootsEvacUpdateTask : public WorkerTask {
 850 private:
 851   ShenandoahPhaseTimings::Phase                 _phase;
 852   ShenandoahVMRoots<true /*concurrent*/>        _vm_roots;
 853   ShenandoahClassLoaderDataRoots<true /*concurrent*/>
 854                                                 _cld_roots;
 855   ShenandoahConcurrentNMethodIterator           _nmethod_itr;
 856 
 857 public:
 858   ShenandoahConcurrentRootsEvacUpdateTask(ShenandoahPhaseTimings::Phase phase) :
 859     WorkerTask("Shenandoah Evacuate/Update Concurrent Strong Roots"),
 860     _phase(phase),
 861     _vm_roots(phase),
 862     _cld_roots(phase, ShenandoahHeap::heap()->workers()->active_workers(), false /*heap iteration*/),
 863     _nmethod_itr(ShenandoahCodeRoots::table()) {
 864     if (!ShenandoahHeap::heap()->unload_classes()) {
 865       MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 866       _nmethod_itr.nmethods_do_begin();
 867     }
 868   }
 869 
 870   ~ShenandoahConcurrentRootsEvacUpdateTask() {
 871     if (!ShenandoahHeap::heap()->unload_classes()) {
 872       MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 873       _nmethod_itr.nmethods_do_end();
 874     }
 875   }
 876 
 877   void work(uint worker_id) {
 878     ShenandoahConcurrentWorkerSession worker_session(worker_id);
 879     {
 880       ShenandoahEvacOOMScope oom;
 881       {
 882         // vm_roots and weak_roots are OopStorage backed roots, concurrent iteration
 883         // may race against OopStorage::release() calls.
 884         ShenandoahContextEvacuateUpdateRootsClosure cl;
 885         _vm_roots.oops_do<ShenandoahContextEvacuateUpdateRootsClosure>(&cl, worker_id);
 886       }
 887 
 888       {
 889         ShenandoahEvacuateUpdateMetadataClosure<> cl;
 890         CLDToOopClosure clds(&cl, ClassLoaderData::_claim_strong);
 891         _cld_roots.cld_do(&clds, worker_id);
 892       }
 893     }
 894 
 895     // Cannot setup ShenandoahEvacOOMScope here, due to potential deadlock with nmethod_entry_barrier.
 896     if (!ShenandoahHeap::heap()->unload_classes()) {
 897       ShenandoahWorkerTimingsTracker timer(_phase, ShenandoahPhaseTimings::CodeCacheRoots, worker_id);
 898       ShenandoahEvacUpdateCodeCacheClosure cl;
 899       _nmethod_itr.nmethods_do(&cl);
 900     }
 901   }
 902 };
 903 
 904 void ShenandoahConcurrentGC::op_strong_roots() {
 905   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 906   assert(heap->is_concurrent_strong_root_in_progress(), "Checked by caller");
 907   ShenandoahConcurrentRootsEvacUpdateTask task(ShenandoahPhaseTimings::conc_strong_roots);
 908   heap->workers()->run_task(&task);
 909   heap->set_concurrent_strong_root_in_progress(false);
 910 }
 911 
 912 void ShenandoahConcurrentGC::op_cleanup_early() {
 913   ShenandoahHeap::heap()->free_set()->recycle_trash();
 914 }
 915 
 916 void ShenandoahConcurrentGC::op_evacuate() {
 917   ShenandoahHeap::heap()->evacuate_collection_set(true /*concurrent*/);
 918 }
 919 
 920 void ShenandoahConcurrentGC::op_init_updaterefs() {
 921   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 922   heap->set_evacuation_in_progress(false);
 923   heap->set_concurrent_weak_root_in_progress(false);
 924   heap->prepare_update_heap_references(true /*concurrent*/);
 925   heap->set_update_refs_in_progress(true);
 926 
 927   if (ShenandoahPacing) {
 928     heap->pacer()->setup_for_updaterefs();
 929   }
 930 }
 931 
 932 void ShenandoahConcurrentGC::op_updaterefs() {
 933   ShenandoahHeap::heap()->update_heap_references(true /*concurrent*/);
 934 }
 935 
 936 class ShenandoahUpdateThreadClosure : public HandshakeClosure {
 937 private:
 938   ShenandoahUpdateRefsClosure _cl;
 939 public:
 940   ShenandoahUpdateThreadClosure();
 941   void do_thread(Thread* thread);
 942 };
 943 
 944 ShenandoahUpdateThreadClosure::ShenandoahUpdateThreadClosure() :
 945   HandshakeClosure("Shenandoah Update Thread Roots") {
 946 }
 947 
 948 void ShenandoahUpdateThreadClosure::do_thread(Thread* thread) {
 949   if (thread->is_Java_thread()) {
 950     JavaThread* jt = JavaThread::cast(thread);
 951     ResourceMark rm;
 952     jt->oops_do(&_cl, NULL);
 953   }
 954 }
 955 
 956 void ShenandoahConcurrentGC::op_update_thread_roots() {
 957   ShenandoahUpdateThreadClosure cl;
 958   Handshake::execute(&cl);
 959 }
 960 
 961 void ShenandoahConcurrentGC::op_final_updaterefs() {
 962   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 963   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
 964   assert(!heap->_update_refs_iterator.has_next(), "Should have finished update references");
 965 
 966   heap->finish_concurrent_roots();
 967 
 968   // Clear cancelled GC, if set. On cancellation path, the block before would handle
 969   // everything.
 970   if (heap->cancelled_gc()) {
 971     heap->clear_cancelled_gc();
 972   }
 973 
 974   // Has to be done before cset is clear
 975   if (ShenandoahVerify) {
 976     heap->verifier()->verify_roots_in_to_space();
 977   }
 978 
 979   heap->update_heap_region_states(true /*concurrent*/);
 980 
 981   heap->set_update_refs_in_progress(false);
 982   heap->set_has_forwarded_objects(false);
 983 
 984   if (ShenandoahVerify) {
 985     heap->verifier()->verify_after_updaterefs();
 986   }
 987 
 988   if (VerifyAfterGC) {
 989     Universe::verify();
 990   }
 991 
 992   heap->rebuild_free_set(true /*concurrent*/);
 993 }
 994 
 995 void ShenandoahConcurrentGC::op_final_roots() {
 996   ShenandoahHeap::heap()->set_concurrent_weak_root_in_progress(false);
 997 }
 998 
 999 void ShenandoahConcurrentGC::op_cleanup_complete() {
1000   ShenandoahHeap::heap()->free_set()->recycle_trash();
1001 }
1002 
1003 bool ShenandoahConcurrentGC::check_cancellation_and_abort(ShenandoahDegenPoint point) {
1004   if (ShenandoahHeap::heap()->cancelled_gc()) {
1005     _degen_point = point;
1006     return true;
1007   }
1008   return false;
1009 }
1010 
1011 const char* ShenandoahConcurrentGC::init_mark_event_message() const {
1012   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1013   assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here");
1014   if (heap->unload_classes()) {
1015     return "Pause Init Mark (unload classes)";
1016   } else {
1017     return "Pause Init Mark";
1018   }
1019 }
1020 
1021 const char* ShenandoahConcurrentGC::final_mark_event_message() const {
1022   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1023   assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here");
1024   if (heap->unload_classes()) {
1025     return "Pause Final Mark (unload classes)";
1026   } else {
1027     return "Pause Final Mark";
1028   }
1029 }
1030 
1031 const char* ShenandoahConcurrentGC::conc_mark_event_message() const {
1032   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1033   assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here");
1034   if (heap->unload_classes()) {
1035     return "Concurrent marking (unload classes)";
1036   } else {
1037     return "Concurrent marking";
1038   }
1039 }