< prev index next >

src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp

Print this page

  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 
  27 #include "gc/shared/barrierSetNMethod.hpp"
  28 #include "gc/shared/collectorCounters.hpp"
  29 #include "gc/shenandoah/shenandoahBreakpoint.hpp"
  30 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  31 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
  32 #include "gc/shenandoah/shenandoahFreeSet.hpp"

  33 #include "gc/shenandoah/shenandoahLock.hpp"
  34 #include "gc/shenandoah/shenandoahMark.inline.hpp"
  35 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  36 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  37 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  38 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
  39 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
  40 #include "gc/shenandoah/shenandoahStackWatermark.hpp"
  41 #include "gc/shenandoah/shenandoahUtils.hpp"
  42 #include "gc/shenandoah/shenandoahVerifier.hpp"
  43 #include "gc/shenandoah/shenandoahVMOperations.hpp"
  44 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
  45 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  46 #include "memory/allocation.hpp"
  47 #include "prims/jvmtiTagMap.hpp"
  48 #include "runtime/vmThread.hpp"
  49 #include "utilities/events.hpp"
  50 
  51 // Breakpoint support
  52 class ShenandoahBreakpointGCScope : public StackObj {

  54   ShenandoahBreakpointGCScope() {
  55     ShenandoahBreakpoint::at_before_gc();
  56   }
  57 
  58   ~ShenandoahBreakpointGCScope() {
  59     ShenandoahBreakpoint::at_after_gc();
  60   }
  61 };
  62 
  63 class ShenandoahBreakpointMarkScope : public StackObj {
  64 public:
  65   ShenandoahBreakpointMarkScope() {
  66     ShenandoahBreakpoint::at_after_marking_started();
  67   }
  68 
  69   ~ShenandoahBreakpointMarkScope() {
  70     ShenandoahBreakpoint::at_before_marking_completed();
  71   }
  72 };
  73 
  74 ShenandoahConcurrentGC::ShenandoahConcurrentGC() :
  75   _mark(),
  76   _degen_point(ShenandoahDegenPoint::_degenerated_unset) {



  77 }
  78 
  79 ShenandoahGC::ShenandoahDegenPoint ShenandoahConcurrentGC::degen_point() const {
  80   return _degen_point;
  81 }
  82 
  83 void ShenandoahConcurrentGC::cancel() {
  84   ShenandoahConcurrentMark::cancel();
  85 }
  86 
  87 bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) {
  88   ShenandoahHeap* const heap = ShenandoahHeap::heap();
  89   if (cause == GCCause::_wb_breakpoint) {
  90     ShenandoahBreakpoint::start_gc();
  91   }
  92   ShenandoahBreakpointGCScope breakpoint_gc_scope;
  93 
  94   // Reset for upcoming marking
  95   entry_reset();
  96 
  97   // Start initial mark under STW
  98   vmop_entry_init_mark();
  99 
 100   {
 101     ShenandoahBreakpointMarkScope breakpoint_mark_scope;












 102     // Concurrent mark roots
 103     entry_mark_roots();
 104     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_outside_cycle)) return false;
 105 
 106     // Continue concurrent mark
 107     entry_mark();
 108     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_mark)) return false;
 109   }
 110 
 111   // Complete marking under STW, and start evacuation
 112   vmop_entry_final_mark();
 113 
 114   // Concurrent stack processing
 115   if (heap->is_evacuation_in_progress()) {
 116     entry_thread_roots();
 117   }
 118 
 119   // Process weak roots that might still point to regions that would be broken by cleanup
 120   if (heap->is_concurrent_weak_root_in_progress()) {
 121     entry_weak_refs();
 122     entry_weak_roots();
 123   }
 124 
 125   // Final mark might have reclaimed some immediate garbage, kick cleanup to reclaim
 126   // the space. This would be the last action if there is nothing to evacuate.

 127   entry_cleanup_early();
 128 
 129   {
 130     ShenandoahHeapLocker locker(heap->lock());
 131     heap->free_set()->log_status();
 132   }
 133 
 134   // Perform concurrent class unloading
 135   if (heap->unload_classes() &&
 136       heap->is_concurrent_weak_root_in_progress()) {
 137     entry_class_unloading();
 138   }
 139 
 140   // Processing strong roots
 141   // This may be skipped if there is nothing to update/evacuate.
 142   // If so, strong_root_in_progress would be unset.
 143   if (heap->is_concurrent_strong_root_in_progress()) {
 144     entry_strong_roots();
 145   }
 146 




 147   // Continue the cycle with evacuation and optional update-refs.
 148   // This may be skipped if there is nothing to evacuate.
 149   // If so, evac_in_progress would be unset by collection set preparation code.
 150   if (heap->is_evacuation_in_progress()) {
 151     // Concurrently evacuate
 152     entry_evacuate();
 153     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_evac)) return false;
 154 
 155     // Perform update-refs phase.
 156     vmop_entry_init_updaterefs();
 157     entry_updaterefs();
 158     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_updaterefs)) return false;
 159 
 160     // Concurrent update thread roots
 161     entry_update_thread_roots();
 162     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_updaterefs)) return false;
 163 
 164     vmop_entry_final_updaterefs();
 165 
 166     // Update references freed up collection set, kick the cleanup to reclaim the space.
 167     entry_cleanup_complete();
 168   } else {
 169     vmop_entry_final_roots();
 170   }
 171 
 172   return true;
 173 }
 174 
 175 void ShenandoahConcurrentGC::vmop_entry_init_mark() {
 176   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 177   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 178   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_mark_gross);
 179 
 180   heap->try_inject_alloc_failure();
 181   VM_ShenandoahInitMark op(this);
 182   VMThread::execute(&op); // jump to entry_init_mark() under safepoint
 183 }
 184 
 185 void ShenandoahConcurrentGC::vmop_entry_final_mark() {
 186   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 187   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 188   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_mark_gross);
 189 
 190   heap->try_inject_alloc_failure();
 191   VM_ShenandoahFinalMarkStartEvac op(this);
 192   VMThread::execute(&op); // jump to entry_final_mark under safepoint
 193 }
 194 
 195 void ShenandoahConcurrentGC::vmop_entry_init_updaterefs() {
 196   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 197   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 198   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_update_refs_gross);
 199 
 200   heap->try_inject_alloc_failure();
 201   VM_ShenandoahInitUpdateRefs op(this);

 207   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 208   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_update_refs_gross);
 209 
 210   heap->try_inject_alloc_failure();
 211   VM_ShenandoahFinalUpdateRefs op(this);
 212   VMThread::execute(&op);
 213 }
 214 
 215 void ShenandoahConcurrentGC::vmop_entry_final_roots() {
 216   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 217   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 218   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_roots_gross);
 219 
 220   // This phase does not use workers, no need for setup
 221   heap->try_inject_alloc_failure();
 222   VM_ShenandoahFinalRoots op(this);
 223   VMThread::execute(&op);
 224 }
 225 
 226 void ShenandoahConcurrentGC::entry_init_mark() {
 227   const char* msg = init_mark_event_message();

 228   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::init_mark);
 229   EventMark em("%s", msg);
 230 
 231   ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
 232                               ShenandoahWorkerPolicy::calc_workers_for_init_marking(),
 233                               "init marking");
 234 








 235   op_init_mark();
 236 }
 237 
 238 void ShenandoahConcurrentGC::entry_final_mark() {
 239   const char* msg = final_mark_event_message();

 240   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_mark);
 241   EventMark em("%s", msg);
 242 
 243   ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
 244                               ShenandoahWorkerPolicy::calc_workers_for_final_marking(),
 245                               "final marking");
 246 
 247   op_final_mark();
 248 }
 249 
 250 void ShenandoahConcurrentGC::entry_init_updaterefs() {
 251   static const char* msg = "Pause Init Update Refs";
 252   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::init_update_refs);
 253   EventMark em("%s", msg);
 254 
 255   // No workers used in this phase, no setup required
 256   op_init_updaterefs();
 257 }
 258 
 259 void ShenandoahConcurrentGC::entry_final_updaterefs() {

 290   heap->try_inject_alloc_failure();
 291   op_reset();
 292 }
 293 
 294 void ShenandoahConcurrentGC::entry_mark_roots() {
 295   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 296   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 297   const char* msg = "Concurrent marking roots";
 298   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_mark_roots);
 299   EventMark em("%s", msg);
 300 
 301   ShenandoahWorkerScope scope(heap->workers(),
 302                               ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
 303                               "concurrent marking roots");
 304 
 305   heap->try_inject_alloc_failure();
 306   op_mark_roots();
 307 }
 308 
 309 void ShenandoahConcurrentGC::entry_mark() {

 310   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 311   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 312   const char* msg = conc_mark_event_message();
 313   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_mark);
 314   EventMark em("%s", msg);
 315 
 316   ShenandoahWorkerScope scope(heap->workers(),
 317                               ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
 318                               "concurrent marking");
 319 
 320   heap->try_inject_alloc_failure();
 321   op_mark();
 322 }
 323 
 324 void ShenandoahConcurrentGC::entry_thread_roots() {
 325   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 326   static const char* msg = "Concurrent thread roots";
 327   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_thread_roots);
 328   EventMark em("%s", msg);
 329 
 330   ShenandoahWorkerScope scope(heap->workers(),
 331                               ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
 332                               msg);

 447   ShenandoahWorkerScope scope(heap->workers(),
 448                               ShenandoahWorkerPolicy::calc_workers_for_conc_update_ref(),
 449                               "concurrent reference update");
 450 
 451   heap->try_inject_alloc_failure();
 452   op_updaterefs();
 453 }
 454 
 455 void ShenandoahConcurrentGC::entry_cleanup_complete() {
 456   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 457   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 458   static const char* msg = "Concurrent cleanup";
 459   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_cleanup_complete, true /* log_heap_usage */);
 460   EventMark em("%s", msg);
 461 
 462   // This phase does not use workers, no need for setup
 463   heap->try_inject_alloc_failure();
 464   op_cleanup_complete();
 465 }
 466 















 467 void ShenandoahConcurrentGC::op_reset() {
 468   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 469   if (ShenandoahPacing) {
 470     heap->pacer()->setup_for_reset();
 471   }
 472 
 473   heap->prepare_gc();
 474 }
 475 
 476 class ShenandoahInitMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
 477 private:
 478   ShenandoahMarkingContext* const _ctx;
 479 public:
 480   ShenandoahInitMarkUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
 481 
 482   void heap_region_do(ShenandoahHeapRegion* r) {
 483     assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index());
 484     if (r->is_active()) {
 485       // Check if region needs updating its TAMS. We have updated it already during concurrent
 486       // reset, so it is very likely we don't need to do another write here.

 487       if (_ctx->top_at_mark_start(r) != r->top()) {
 488         _ctx->capture_top_at_mark_start(r);
 489       }
 490     } else {
 491       assert(_ctx->top_at_mark_start(r) == r->top(),
 492              "Region " SIZE_FORMAT " should already have correct TAMS", r->index());
 493     }
 494   }
 495 
 496   bool is_thread_safe() { return true; }
 497 };
 498 
 499 void ShenandoahConcurrentGC::op_init_mark() {
 500   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 501   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
 502   assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
 503 
 504   assert(heap->marking_context()->is_bitmap_clear(), "need clear marking bitmap");
 505   assert(!heap->marking_context()->is_complete(), "should not be complete");
 506   assert(!heap->has_forwarded_objects(), "No forwarded objects on this path");
 507 
 508   if (ShenandoahVerify) {
 509     heap->verifier()->verify_before_concmark();
 510   }
 511 
 512   if (VerifyBeforeGC) {
 513     Universe::verify();
 514   }
 515 
 516   heap->set_concurrent_mark_in_progress(true);
 517 
 518   {

 519     ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states);
 520     ShenandoahInitMarkUpdateRegionStateClosure cl;
 521     heap->parallel_heap_region_iterate(&cl);






 522   }
 523 
 524   // Weak reference processing
 525   ShenandoahReferenceProcessor* rp = heap->ref_processor();
 526   rp->reset_thread_locals();
 527   rp->set_soft_reference_policy(heap->soft_ref_policy()->should_clear_all_soft_refs());
 528 
 529   // Make above changes visible to worker threads
 530   OrderAccess::fence();

 531   // Arm nmethods for concurrent marking. When a nmethod is about to be executed,
 532   // we need to make sure that all its metadata are marked. alternative is to remark
 533   // thread roots at final mark pause, but it can be potential latency killer.
 534   if (heap->unload_classes()) {
 535     ShenandoahCodeRoots::arm_nmethods();
 536   }
 537 
 538   ShenandoahStackWatermark::change_epoch_id();
 539   if (ShenandoahPacing) {
 540     heap->pacer()->setup_for_mark();
 541   }
 542 }
 543 
 544 void ShenandoahConcurrentGC::op_mark_roots() {
 545   _mark.mark_concurrent_roots();
 546 }
 547 
 548 void ShenandoahConcurrentGC::op_mark() {
 549   _mark.concurrent_mark();
 550 }
 551 
 552 void ShenandoahConcurrentGC::op_final_mark() {
 553   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 554   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
 555   assert(!heap->has_forwarded_objects(), "No forwarded objects on this path");
 556 
 557   if (ShenandoahVerify) {
 558     heap->verifier()->verify_roots_no_forwarded();
 559   }
 560 
 561   if (!heap->cancelled_gc()) {
 562     _mark.finish_mark();
 563     assert(!heap->cancelled_gc(), "STW mark cannot OOM");
 564 
 565     // Notify JVMTI that the tagmap table will need cleaning.
 566     JvmtiTagMap::set_needs_cleaning();
 567 
 568     heap->prepare_regions_and_collection_set(true /*concurrent*/);

 569 
 570     // Has to be done after cset selection
 571     heap->prepare_concurrent_roots();
 572 
 573     if (!heap->collection_set()->is_empty()) {
 574       if (ShenandoahVerify) {
 575         heap->verifier()->verify_before_evacuation();
 576       }
 577 
 578       heap->set_evacuation_in_progress(true);
 579       // From here on, we need to update references.
 580       heap->set_has_forwarded_objects(true);
 581 
 582       // Verify before arming for concurrent processing.
 583       // Otherwise, verification can trigger stack processing.
 584       if (ShenandoahVerify) {
 585         heap->verifier()->verify_during_evacuation();
 586       }
 587 
 588       // Arm nmethods/stack for concurrent processing

 641     ShenandoahContextEvacuateUpdateRootsClosure oops_cl;
 642     ShenandoahConcurrentEvacThreadClosure thr_cl(&oops_cl);
 643     _java_threads.threads_do(&thr_cl, worker_id);
 644   }
 645 };
 646 
 647 void ShenandoahConcurrentGC::op_thread_roots() {
 648   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 649   assert(heap->is_evacuation_in_progress(), "Checked by caller");
 650   ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_thread_roots);
 651   ShenandoahConcurrentEvacUpdateThreadTask task(heap->workers()->active_workers());
 652   heap->workers()->run_task(&task);
 653 }
 654 
 655 void ShenandoahConcurrentGC::op_weak_refs() {
 656   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 657   assert(heap->is_concurrent_weak_root_in_progress(), "Only during this phase");
 658   // Concurrent weak refs processing
 659   ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_refs);
 660   ShenandoahBreakpoint::at_after_reference_processing_started();
 661   heap->ref_processor()->process_references(ShenandoahPhaseTimings::conc_weak_refs, heap->workers(), true /* concurrent */);
 662 }
 663 
 664 class ShenandoahEvacUpdateCleanupOopStorageRootsClosure : public BasicOopIterateClosure {
 665 private:
 666   ShenandoahHeap* const _heap;
 667   ShenandoahMarkingContext* const _mark_context;
 668   bool  _evac_in_progress;
 669   Thread* const _thread;
 670 
 671 public:
 672   ShenandoahEvacUpdateCleanupOopStorageRootsClosure();
 673   void do_oop(oop* p);
 674   void do_oop(narrowOop* p);
 675 };
 676 
 677 ShenandoahEvacUpdateCleanupOopStorageRootsClosure::ShenandoahEvacUpdateCleanupOopStorageRootsClosure() :
 678   _heap(ShenandoahHeap::heap()),
 679   _mark_context(ShenandoahHeap::heap()->marking_context()),
 680   _evac_in_progress(ShenandoahHeap::heap()->is_evacuation_in_progress()),
 681   _thread(Thread::current()) {
 682 }
 683 
 684 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(oop* p) {
 685   const oop obj = RawAccess<>::oop_load(p);
 686   if (!CompressedOops::is_null(obj)) {
 687     if (!_mark_context->is_marked(obj)) {
 688       shenandoah_assert_correct(p, obj);
 689       ShenandoahHeap::atomic_clear_oop(p, obj);







 690     } else if (_evac_in_progress && _heap->in_collection_set(obj)) {
 691       oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
 692       if (resolved == obj) {
 693         resolved = _heap->evacuate_object(obj, _thread);
 694       }
 695       ShenandoahHeap::atomic_update_oop(resolved, p, obj);
 696       assert(_heap->cancelled_gc() ||
 697              _mark_context->is_marked(resolved) && !_heap->in_collection_set(resolved),
 698              "Sanity");
 699     }
 700   }
 701 }
 702 
 703 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(narrowOop* p) {
 704   ShouldNotReachHere();
 705 }
 706 
 707 class ShenandoahIsCLDAliveClosure : public CLDClosure {
 708 public:
 709   void do_cld(ClassLoaderData* cld) {

 893   assert(heap->is_concurrent_strong_root_in_progress(), "Checked by caller");
 894   ShenandoahConcurrentRootsEvacUpdateTask task(ShenandoahPhaseTimings::conc_strong_roots);
 895   heap->workers()->run_task(&task);
 896   heap->set_concurrent_strong_root_in_progress(false);
 897 }
 898 
 899 void ShenandoahConcurrentGC::op_cleanup_early() {
 900   ShenandoahHeap::heap()->free_set()->recycle_trash();
 901 }
 902 
 903 void ShenandoahConcurrentGC::op_evacuate() {
 904   ShenandoahHeap::heap()->evacuate_collection_set(true /*concurrent*/);
 905 }
 906 
 907 void ShenandoahConcurrentGC::op_init_updaterefs() {
 908   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 909   heap->set_evacuation_in_progress(false);
 910   heap->set_concurrent_weak_root_in_progress(false);
 911   heap->prepare_update_heap_references(true /*concurrent*/);
 912   heap->set_update_refs_in_progress(true);
 913 


 914   if (ShenandoahPacing) {
 915     heap->pacer()->setup_for_updaterefs();
 916   }
 917 }
 918 
 919 void ShenandoahConcurrentGC::op_updaterefs() {
 920   ShenandoahHeap::heap()->update_heap_references(true /*concurrent*/);
 921 }
 922 
 923 class ShenandoahUpdateThreadClosure : public HandshakeClosure {
 924 private:
 925   ShenandoahUpdateRefsClosure _cl;
 926 public:
 927   ShenandoahUpdateThreadClosure();
 928   void do_thread(Thread* thread);
 929 };
 930 
 931 ShenandoahUpdateThreadClosure::ShenandoahUpdateThreadClosure() :
 932   HandshakeClosure("Shenandoah Update Thread Roots") {
 933 }

 938     ResourceMark rm;
 939     jt->oops_do(&_cl, NULL);
 940   }
 941 }
 942 
 943 void ShenandoahConcurrentGC::op_update_thread_roots() {
 944   ShenandoahUpdateThreadClosure cl;
 945   Handshake::execute(&cl);
 946 }
 947 
 948 void ShenandoahConcurrentGC::op_final_updaterefs() {
 949   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 950   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
 951   assert(!heap->_update_refs_iterator.has_next(), "Should have finished update references");
 952 
 953   heap->finish_concurrent_roots();
 954 
 955   // Clear cancelled GC, if set. On cancellation path, the block before would handle
 956   // everything.
 957   if (heap->cancelled_gc()) {
 958     heap->clear_cancelled_gc();
 959   }
 960 
 961   // Has to be done before cset is clear
 962   if (ShenandoahVerify) {
 963     heap->verifier()->verify_roots_in_to_space();
 964   }
 965 
 966   heap->update_heap_region_states(true /*concurrent*/);
 967 







 968   heap->set_update_refs_in_progress(false);
 969   heap->set_has_forwarded_objects(false);
 970 




 971   if (ShenandoahVerify) {
 972     heap->verifier()->verify_after_updaterefs();
 973   }
 974 
 975   if (VerifyAfterGC) {
 976     Universe::verify();
 977   }
 978 
 979   heap->rebuild_free_set(true /*concurrent*/);
 980 }
 981 
 982 void ShenandoahConcurrentGC::op_final_roots() {
 983   ShenandoahHeap::heap()->set_concurrent_weak_root_in_progress(false);
 984 }
 985 
 986 void ShenandoahConcurrentGC::op_cleanup_complete() {
 987   ShenandoahHeap::heap()->free_set()->recycle_trash();
 988 }
 989 




 990 bool ShenandoahConcurrentGC::check_cancellation_and_abort(ShenandoahDegenPoint point) {
 991   if (ShenandoahHeap::heap()->cancelled_gc()) {
 992     _degen_point = point;
 993     return true;
 994   }
 995   return false;
 996 }
 997 
 998 const char* ShenandoahConcurrentGC::init_mark_event_message() const {
 999   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1000   assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here");
1001   if (heap->unload_classes()) {
1002     return "Pause Init Mark (unload classes)";
1003   } else {
1004     return "Pause Init Mark";
1005   }
1006 }
1007 
1008 const char* ShenandoahConcurrentGC::final_mark_event_message() const {
1009   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1010   assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here");

1011   if (heap->unload_classes()) {
1012     return "Pause Final Mark (unload classes)";
1013   } else {
1014     return "Pause Final Mark";
1015   }
1016 }
1017 
1018 const char* ShenandoahConcurrentGC::conc_mark_event_message() const {
1019   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1020   assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here");

1021   if (heap->unload_classes()) {
1022     return "Concurrent marking (unload classes)";
1023   } else {
1024     return "Concurrent marking";
1025   }
1026 }

  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 
  27 #include "gc/shared/barrierSetNMethod.hpp"
  28 #include "gc/shared/collectorCounters.hpp"
  29 #include "gc/shenandoah/shenandoahBreakpoint.hpp"
  30 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  31 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
  32 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  33 #include "gc/shenandoah/shenandoahGeneration.hpp"
  34 #include "gc/shenandoah/shenandoahLock.hpp"
  35 #include "gc/shenandoah/shenandoahMark.inline.hpp"
  36 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  37 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  38 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  39 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
  40 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
  41 #include "gc/shenandoah/shenandoahStackWatermark.hpp"
  42 #include "gc/shenandoah/shenandoahUtils.hpp"
  43 #include "gc/shenandoah/shenandoahVerifier.hpp"
  44 #include "gc/shenandoah/shenandoahVMOperations.hpp"
  45 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
  46 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  47 #include "memory/allocation.hpp"
  48 #include "prims/jvmtiTagMap.hpp"
  49 #include "runtime/vmThread.hpp"
  50 #include "utilities/events.hpp"
  51 
  52 // Breakpoint support
  53 class ShenandoahBreakpointGCScope : public StackObj {

  55   ShenandoahBreakpointGCScope() {
  56     ShenandoahBreakpoint::at_before_gc();
  57   }
  58 
  59   ~ShenandoahBreakpointGCScope() {
  60     ShenandoahBreakpoint::at_after_gc();
  61   }
  62 };
  63 
  64 class ShenandoahBreakpointMarkScope : public StackObj {
  65 public:
  66   ShenandoahBreakpointMarkScope() {
  67     ShenandoahBreakpoint::at_after_marking_started();
  68   }
  69 
  70   ~ShenandoahBreakpointMarkScope() {
  71     ShenandoahBreakpoint::at_before_marking_completed();
  72   }
  73 };
  74 
  75 ShenandoahConcurrentGC::ShenandoahConcurrentGC(ShenandoahGeneration* generation, bool do_old_gc_bootstrap) :
  76   _mark(generation),
  77   _degen_point(ShenandoahDegenPoint::_degenerated_unset),
  78   _mixed_evac (false),
  79   _do_old_gc_bootstrap(do_old_gc_bootstrap),
  80   _generation(generation) {
  81 }
  82 
  83 ShenandoahGC::ShenandoahDegenPoint ShenandoahConcurrentGC::degen_point() const {
  84   return _degen_point;
  85 }
  86 




  87 bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) {
  88   ShenandoahHeap* const heap = ShenandoahHeap::heap();
  89   if (cause == GCCause::_wb_breakpoint) {
  90     ShenandoahBreakpoint::start_gc();
  91   }
  92   ShenandoahBreakpointGCScope breakpoint_gc_scope;
  93 
  94   // Reset for upcoming marking
  95   entry_reset();
  96 
  97   // Start initial mark under STW
  98   vmop_entry_init_mark();
  99 
 100   {
 101     ShenandoahBreakpointMarkScope breakpoint_mark_scope;
 102 
 103     // Reset task queue stats here, rather than in mark_concurrent_roots
 104     // because remembered set scan will `push` oops into the queues and
 105     // resetting after this happens will lose those counts.
 106     TASKQUEUE_STATS_ONLY(_mark.task_queues()->reset_taskqueue_stats());
 107 
 108     // Concurrent remembered set scanning
 109     if (_generation->generation_mode() == YOUNG) {
 110       ShenandoahConcurrentPhase gc_phase("Concurrent remembered set scanning", ShenandoahPhaseTimings::init_scan_rset);
 111       _generation->scan_remembered_set();
 112     }
 113 
 114     // Concurrent mark roots
 115     entry_mark_roots();
 116     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_roots)) return false;
 117 
 118     // Continue concurrent mark
 119     entry_mark();
 120     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_mark)) return false;
 121   }
 122 
 123   // Complete marking under STW, and start evacuation
 124   vmop_entry_final_mark();
 125 
 126   // Concurrent stack processing
 127   if (heap->is_evacuation_in_progress()) {
 128     entry_thread_roots();
 129   }
 130 
 131   // Process weak roots that might still point to regions that would be broken by cleanup
 132   if (heap->is_concurrent_weak_root_in_progress()) {
 133     entry_weak_refs();
 134     entry_weak_roots();
 135   }
 136 
 137   // Final mark might have reclaimed some immediate garbage, kick cleanup to reclaim
 138   // the space. This would be the last action if there is nothing to evacuate.  Note that
 139   // we will not age young-gen objects in the case that we skip evacuation.
 140   entry_cleanup_early();
 141 
 142   {
 143     ShenandoahHeapLocker locker(heap->lock());
 144     heap->free_set()->log_status();
 145   }
 146 
 147   // Perform concurrent class unloading
 148   if (heap->unload_classes() &&
 149       heap->is_concurrent_weak_root_in_progress()) {
 150     entry_class_unloading();
 151   }
 152 
 153   // Processing strong roots
 154   // This may be skipped if there is nothing to update/evacuate.
 155   // If so, strong_root_in_progress would be unset.
 156   if (heap->is_concurrent_strong_root_in_progress()) {
 157     entry_strong_roots();
 158   }
 159 
 160   if (!heap->cancelled_gc() && heap->mode()->is_generational() && _generation->generation_mode() == GLOBAL) {
 161     entry_global_coalesce_and_fill();
 162   }
 163 
 164   // Continue the cycle with evacuation and optional update-refs.
 165   // This may be skipped if there is nothing to evacuate.
 166   // If so, evac_in_progress would be unset by collection set preparation code.
 167   if (heap->is_evacuation_in_progress()) {
 168     // Concurrently evacuate
 169     entry_evacuate();
 170     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_evac)) return false;
 171 
 172     // Perform update-refs phase.
 173     vmop_entry_init_updaterefs();
 174     entry_updaterefs();
 175     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_updaterefs)) return false;
 176 
 177     // Concurrent update thread roots
 178     entry_update_thread_roots();
 179     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_updaterefs)) return false;
 180 
 181     vmop_entry_final_updaterefs();
 182 
 183     // Update references freed up collection set, kick the cleanup to reclaim the space.
 184     entry_cleanup_complete();
 185   } else {
 186     vmop_entry_final_roots();
 187   }
 188 
 189   return true;
 190 }
 191 
 192 void ShenandoahConcurrentGC::vmop_entry_init_mark() {
 193   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 194   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 195   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_mark_gross);
 196 
 197   heap->try_inject_alloc_failure();
 198   VM_ShenandoahInitMark op(this, _do_old_gc_bootstrap);
 199   VMThread::execute(&op); // jump to entry_init_mark() under safepoint
 200 }
 201 
 202 void ShenandoahConcurrentGC::vmop_entry_final_mark() {
 203   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 204   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 205   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_mark_gross);
 206 
 207   heap->try_inject_alloc_failure();
 208   VM_ShenandoahFinalMarkStartEvac op(this);
 209   VMThread::execute(&op); // jump to entry_final_mark under safepoint
 210 }
 211 
 212 void ShenandoahConcurrentGC::vmop_entry_init_updaterefs() {
 213   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 214   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 215   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_update_refs_gross);
 216 
 217   heap->try_inject_alloc_failure();
 218   VM_ShenandoahInitUpdateRefs op(this);

 224   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 225   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_update_refs_gross);
 226 
 227   heap->try_inject_alloc_failure();
 228   VM_ShenandoahFinalUpdateRefs op(this);
 229   VMThread::execute(&op);
 230 }
 231 
 232 void ShenandoahConcurrentGC::vmop_entry_final_roots() {
 233   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 234   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 235   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_roots_gross);
 236 
 237   // This phase does not use workers, no need for setup
 238   heap->try_inject_alloc_failure();
 239   VM_ShenandoahFinalRoots op(this);
 240   VMThread::execute(&op);
 241 }
 242 
 243 void ShenandoahConcurrentGC::entry_init_mark() {
 244   char msg[1024];
 245   init_mark_event_message(msg, sizeof(msg));
 246   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::init_mark);
 247   EventMark em("%s", msg);
 248 
 249   ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
 250                               ShenandoahWorkerPolicy::calc_workers_for_init_marking(),
 251                               "init marking");
 252 
 253   if (ShenandoahHeap::heap()->mode()->is_generational()
 254     && (_generation->generation_mode() == YOUNG || (_generation->generation_mode() == GLOBAL && ShenandoahVerify))) {
 255     // The current implementation of swap_remembered_set() copies the write-card-table
 256     // to the read-card-table. The remembered sets are also swapped for GLOBAL collections
 257     // so that the verifier works with the correct copy of the card table when verifying.
 258     _generation->swap_remembered_set();
 259   }
 260 
 261   op_init_mark();
 262 }
 263 
 264 void ShenandoahConcurrentGC::entry_final_mark() {
 265   char msg[1024];
 266   final_mark_event_message(msg, sizeof(msg));
 267   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_mark);
 268   EventMark em("%s", msg);
 269 
 270   ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
 271                               ShenandoahWorkerPolicy::calc_workers_for_final_marking(),
 272                               "final marking");
 273 
 274   op_final_mark();
 275 }
 276 
 277 void ShenandoahConcurrentGC::entry_init_updaterefs() {
 278   static const char* msg = "Pause Init Update Refs";
 279   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::init_update_refs);
 280   EventMark em("%s", msg);
 281 
 282   // No workers used in this phase, no setup required
 283   op_init_updaterefs();
 284 }
 285 
 286 void ShenandoahConcurrentGC::entry_final_updaterefs() {

 317   heap->try_inject_alloc_failure();
 318   op_reset();
 319 }
 320 
 321 void ShenandoahConcurrentGC::entry_mark_roots() {
 322   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 323   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 324   const char* msg = "Concurrent marking roots";
 325   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_mark_roots);
 326   EventMark em("%s", msg);
 327 
 328   ShenandoahWorkerScope scope(heap->workers(),
 329                               ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
 330                               "concurrent marking roots");
 331 
 332   heap->try_inject_alloc_failure();
 333   op_mark_roots();
 334 }
 335 
 336 void ShenandoahConcurrentGC::entry_mark() {
 337   char msg[1024];
 338   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 339   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 340   conc_mark_event_message(msg, sizeof(msg));
 341   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_mark);
 342   EventMark em("%s", msg);
 343 
 344   ShenandoahWorkerScope scope(heap->workers(),
 345                               ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
 346                               "concurrent marking");
 347 
 348   heap->try_inject_alloc_failure();
 349   op_mark();
 350 }
 351 
 352 void ShenandoahConcurrentGC::entry_thread_roots() {
 353   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 354   static const char* msg = "Concurrent thread roots";
 355   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_thread_roots);
 356   EventMark em("%s", msg);
 357 
 358   ShenandoahWorkerScope scope(heap->workers(),
 359                               ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
 360                               msg);

 475   ShenandoahWorkerScope scope(heap->workers(),
 476                               ShenandoahWorkerPolicy::calc_workers_for_conc_update_ref(),
 477                               "concurrent reference update");
 478 
 479   heap->try_inject_alloc_failure();
 480   op_updaterefs();
 481 }
 482 
 483 void ShenandoahConcurrentGC::entry_cleanup_complete() {
 484   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 485   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 486   static const char* msg = "Concurrent cleanup";
 487   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_cleanup_complete, true /* log_heap_usage */);
 488   EventMark em("%s", msg);
 489 
 490   // This phase does not use workers, no need for setup
 491   heap->try_inject_alloc_failure();
 492   op_cleanup_complete();
 493 }
 494 
 495 void ShenandoahConcurrentGC::entry_global_coalesce_and_fill() {
 496   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 497 
 498   const char* msg = "Coalescing and filling old regions in global collect";
 499   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::coalesce_and_fill);
 500 
 501   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 502   EventMark em("%s", msg);
 503   ShenandoahWorkerScope scope(heap->workers(),
 504                               ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
 505                               "concurrent coalesce and fill");
 506 
 507   op_global_coalesce_and_fill();
 508 }
 509 
 510 void ShenandoahConcurrentGC::op_reset() {
 511   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 512   if (ShenandoahPacing) {
 513     heap->pacer()->setup_for_reset();
 514   }
 515   _generation->prepare_gc(_do_old_gc_bootstrap);

 516 }
 517 
 518 class ShenandoahInitMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
 519 private:
 520   ShenandoahMarkingContext* const _ctx;
 521 public:
 522   ShenandoahInitMarkUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
 523 
 524   void heap_region_do(ShenandoahHeapRegion* r) {
 525     assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index());
 526     if (r->is_active()) {
 527       // Check if region needs updating its TAMS. We have updated it already during concurrent
 528       // reset, so it is very likely we don't need to do another write here.  Since most regions
 529       // are not "active", this path is relatively rare.
 530       if (_ctx->top_at_mark_start(r) != r->top()) {
 531         _ctx->capture_top_at_mark_start(r);
 532       }
 533     } else {
 534       assert(_ctx->top_at_mark_start(r) == r->top(),
 535              "Region " SIZE_FORMAT " should already have correct TAMS", r->index());
 536     }
 537   }
 538 
 539   bool is_thread_safe() { return true; }
 540 };
 541 
 542 void ShenandoahConcurrentGC::op_init_mark() {
 543   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 544   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
 545   assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
 546 
 547   assert(_generation->is_bitmap_clear(), "need clear marking bitmap");
 548   assert(!_generation->is_mark_complete(), "should not be complete");
 549   assert(!heap->has_forwarded_objects(), "No forwarded objects on this path");
 550 
 551   if (ShenandoahVerify) {
 552     heap->verifier()->verify_before_concmark();
 553   }
 554 
 555   if (VerifyBeforeGC) {
 556     Universe::verify();
 557   }
 558 
 559   _generation->set_concurrent_mark_in_progress(true);
 560 
 561   if (_do_old_gc_bootstrap) {
 562     // Update region state for both young and old regions
 563     ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states);
 564     ShenandoahInitMarkUpdateRegionStateClosure cl;
 565     heap->parallel_heap_region_iterate(&cl);
 566     heap->old_generation()->parallel_heap_region_iterate(&cl);
 567   } else {
 568     // Update region state for only young regions
 569     ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states);
 570     ShenandoahInitMarkUpdateRegionStateClosure cl;
 571     _generation->parallel_heap_region_iterate(&cl);
 572   }
 573 
 574   // Weak reference processing
 575   ShenandoahReferenceProcessor* rp = _generation->ref_processor();
 576   rp->reset_thread_locals();
 577   rp->set_soft_reference_policy(heap->soft_ref_policy()->should_clear_all_soft_refs());
 578 
 579   // Make above changes visible to worker threads
 580   OrderAccess::fence();
 581 
 582   // Arm nmethods for concurrent marking. When a nmethod is about to be executed,
 583   // we need to make sure that all its metadata are marked. alternative is to remark
 584   // thread roots at final mark pause, but it can be potential latency killer.
 585   if (heap->unload_classes()) {
 586     ShenandoahCodeRoots::arm_nmethods();
 587   }
 588 
 589   ShenandoahStackWatermark::change_epoch_id();
 590   if (ShenandoahPacing) {
 591     heap->pacer()->setup_for_mark();
 592   }
 593 }
 594 
 595 void ShenandoahConcurrentGC::op_mark_roots() {
 596   _mark.mark_concurrent_roots();
 597 }
 598 
 599 void ShenandoahConcurrentGC::op_mark() {
 600   _mark.concurrent_mark();
 601 }
 602 
 603 void ShenandoahConcurrentGC::op_final_mark() {
 604   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 605   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
 606   assert(!heap->has_forwarded_objects(), "No forwarded objects on this path");
 607 
 608   if (ShenandoahVerify) {
 609     heap->verifier()->verify_roots_no_forwarded();
 610   }
 611 
 612   if (!heap->cancelled_gc()) {
 613     _mark.finish_mark();
 614     assert(!heap->cancelled_gc(), "STW mark cannot OOM");
 615 
 616     // Notify JVMTI that the tagmap table will need cleaning.
 617     JvmtiTagMap::set_needs_cleaning();
 618 
 619     bool mixed_evac = _generation->prepare_regions_and_collection_set(true /*concurrent*/);
 620     heap->set_mixed_evac(mixed_evac);
 621 
 622     // Has to be done after cset selection
 623     heap->prepare_concurrent_roots();
 624 
 625     if (!heap->collection_set()->is_empty()) {
 626       if (ShenandoahVerify) {
 627         heap->verifier()->verify_before_evacuation();
 628       }
 629 
 630       heap->set_evacuation_in_progress(true);
 631       // From here on, we need to update references.
 632       heap->set_has_forwarded_objects(true);
 633 
 634       // Verify before arming for concurrent processing.
 635       // Otherwise, verification can trigger stack processing.
 636       if (ShenandoahVerify) {
 637         heap->verifier()->verify_during_evacuation();
 638       }
 639 
 640       // Arm nmethods/stack for concurrent processing

 693     ShenandoahContextEvacuateUpdateRootsClosure oops_cl;
 694     ShenandoahConcurrentEvacThreadClosure thr_cl(&oops_cl);
 695     _java_threads.threads_do(&thr_cl, worker_id);
 696   }
 697 };
 698 
 699 void ShenandoahConcurrentGC::op_thread_roots() {
 700   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 701   assert(heap->is_evacuation_in_progress(), "Checked by caller");
 702   ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_thread_roots);
 703   ShenandoahConcurrentEvacUpdateThreadTask task(heap->workers()->active_workers());
 704   heap->workers()->run_task(&task);
 705 }
 706 
 707 void ShenandoahConcurrentGC::op_weak_refs() {
 708   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 709   assert(heap->is_concurrent_weak_root_in_progress(), "Only during this phase");
 710   // Concurrent weak refs processing
 711   ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_refs);
 712   ShenandoahBreakpoint::at_after_reference_processing_started();
 713   _generation->ref_processor()->process_references(ShenandoahPhaseTimings::conc_weak_refs, heap->workers(), true /* concurrent */);
 714 }
 715 
 716 class ShenandoahEvacUpdateCleanupOopStorageRootsClosure : public BasicOopIterateClosure {
 717 private:
 718   ShenandoahHeap* const _heap;
 719   ShenandoahMarkingContext* const _mark_context;
 720   bool  _evac_in_progress;
 721   Thread* const _thread;
 722 
 723 public:
 724   ShenandoahEvacUpdateCleanupOopStorageRootsClosure();
 725   void do_oop(oop* p);
 726   void do_oop(narrowOop* p);
 727 };
 728 
 729 ShenandoahEvacUpdateCleanupOopStorageRootsClosure::ShenandoahEvacUpdateCleanupOopStorageRootsClosure() :
 730   _heap(ShenandoahHeap::heap()),
 731   _mark_context(ShenandoahHeap::heap()->marking_context()),
 732   _evac_in_progress(ShenandoahHeap::heap()->is_evacuation_in_progress()),
 733   _thread(Thread::current()) {
 734 }
 735 
 736 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(oop* p) {
 737   const oop obj = RawAccess<>::oop_load(p);
 738   if (!CompressedOops::is_null(obj)) {
 739     if (!_mark_context->is_marked(obj)) {
 740       if (_heap->is_in_active_generation(obj)) {
 741         // TODO: This worries me. Here we are asserting that an unmarked from-space object is 'correct'.
 742         // Normally, I would call this a bogus assert, but there seems to be a legitimate use-case for
 743         // accessing from-space objects during class unloading. However, the from-space object may have
 744         // been "filled". We've made no effort to prevent old generation classes being unloaded by young
 745         // gen (and vice-versa).
 746         shenandoah_assert_correct(p, obj);
 747         ShenandoahHeap::atomic_clear_oop(p, obj);
 748       }
 749     } else if (_evac_in_progress && _heap->in_collection_set(obj)) {
 750       oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
 751       if (resolved == obj) {
 752         resolved = _heap->evacuate_object(obj, _thread);
 753       }
 754       ShenandoahHeap::atomic_update_oop(resolved, p, obj);
 755       assert(_heap->cancelled_gc() ||
 756              _mark_context->is_marked(resolved) && !_heap->in_collection_set(resolved),
 757              "Sanity");
 758     }
 759   }
 760 }
 761 
 762 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(narrowOop* p) {
 763   ShouldNotReachHere();
 764 }
 765 
 766 class ShenandoahIsCLDAliveClosure : public CLDClosure {
 767 public:
 768   void do_cld(ClassLoaderData* cld) {

 952   assert(heap->is_concurrent_strong_root_in_progress(), "Checked by caller");
 953   ShenandoahConcurrentRootsEvacUpdateTask task(ShenandoahPhaseTimings::conc_strong_roots);
 954   heap->workers()->run_task(&task);
 955   heap->set_concurrent_strong_root_in_progress(false);
 956 }
 957 
 958 void ShenandoahConcurrentGC::op_cleanup_early() {
 959   ShenandoahHeap::heap()->free_set()->recycle_trash();
 960 }
 961 
 962 void ShenandoahConcurrentGC::op_evacuate() {
 963   ShenandoahHeap::heap()->evacuate_collection_set(true /*concurrent*/);
 964 }
 965 
 966 void ShenandoahConcurrentGC::op_init_updaterefs() {
 967   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 968   heap->set_evacuation_in_progress(false);
 969   heap->set_concurrent_weak_root_in_progress(false);
 970   heap->prepare_update_heap_references(true /*concurrent*/);
 971   heap->set_update_refs_in_progress(true);
 972   if (ShenandoahVerify) {
 973     heap->verifier()->verify_before_updaterefs();
 974   }
 975   if (ShenandoahPacing) {
 976     heap->pacer()->setup_for_updaterefs();
 977   }
 978 }
 979 
 980 void ShenandoahConcurrentGC::op_updaterefs() {
 981   ShenandoahHeap::heap()->update_heap_references(true /*concurrent*/);
 982 }
 983 
 984 class ShenandoahUpdateThreadClosure : public HandshakeClosure {
 985 private:
 986   ShenandoahUpdateRefsClosure _cl;
 987 public:
 988   ShenandoahUpdateThreadClosure();
 989   void do_thread(Thread* thread);
 990 };
 991 
 992 ShenandoahUpdateThreadClosure::ShenandoahUpdateThreadClosure() :
 993   HandshakeClosure("Shenandoah Update Thread Roots") {
 994 }

 999     ResourceMark rm;
1000     jt->oops_do(&_cl, NULL);
1001   }
1002 }
1003 
1004 void ShenandoahConcurrentGC::op_update_thread_roots() {
1005   ShenandoahUpdateThreadClosure cl;
1006   Handshake::execute(&cl);
1007 }
1008 
1009 void ShenandoahConcurrentGC::op_final_updaterefs() {
1010   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1011   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
1012   assert(!heap->_update_refs_iterator.has_next(), "Should have finished update references");
1013 
1014   heap->finish_concurrent_roots();
1015 
1016   // Clear cancelled GC, if set. On cancellation path, the block before would handle
1017   // everything.
1018   if (heap->cancelled_gc()) {
1019     heap->clear_cancelled_gc(true /* clear oom handler */);
1020   }
1021 
1022   // Has to be done before cset is clear
1023   if (ShenandoahVerify) {
1024     heap->verifier()->verify_roots_in_to_space();
1025   }
1026 
1027   heap->update_heap_region_states(true /*concurrent*/);
1028 
1029   if (heap->is_concurrent_old_mark_in_progress()) {
1030     // Purge the SATB buffers, transferring any valid, old pointers to the
1031     // old generation mark queue. From here on, no mutator will have access
1032     // to anything that will be trashed and recycled.
1033     heap->purge_old_satb_buffers(false /* abandon */);
1034   }
1035 
1036   heap->set_update_refs_in_progress(false);
1037   heap->set_has_forwarded_objects(false);
1038 
1039   // Aging_cycle is only relevant during evacuation cycle for individual objects and during final mark for
1040   // entire regions.  Both of these relevant operations occur before final update refs.
1041   heap->set_aging_cycle(false);
1042 
1043   if (ShenandoahVerify) {
1044     heap->verifier()->verify_after_updaterefs();
1045   }
1046 
1047   if (VerifyAfterGC) {
1048     Universe::verify();
1049   }
1050 
1051   heap->rebuild_free_set(true /*concurrent*/);
1052 }
1053 
1054 void ShenandoahConcurrentGC::op_final_roots() {
1055   ShenandoahHeap::heap()->set_concurrent_weak_root_in_progress(false);
1056 }
1057 
1058 void ShenandoahConcurrentGC::op_cleanup_complete() {
1059   ShenandoahHeap::heap()->free_set()->recycle_trash();
1060 }
1061 
1062 void ShenandoahConcurrentGC::op_global_coalesce_and_fill() {
1063   ShenandoahHeap::heap()->coalesce_and_fill_old_regions();
1064 }
1065 
1066 bool ShenandoahConcurrentGC::check_cancellation_and_abort(ShenandoahDegenPoint point) {
1067   if (ShenandoahHeap::heap()->cancelled_gc()) {
1068     _degen_point = point;
1069     return true;
1070   }
1071   return false;
1072 }
1073 
1074 void ShenandoahConcurrentGC::init_mark_event_message(char* buf, size_t len) const {
1075   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1076   assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here");
1077   if (heap->unload_classes()) {
1078     jio_snprintf(buf, len, "Pause Init Mark (%s) (unload classes)", _generation->name());
1079   } else {
1080     jio_snprintf(buf, len, "Pause Init Mark (%s)", _generation->name());
1081   }
1082 }
1083 
1084 void ShenandoahConcurrentGC::final_mark_event_message(char* buf, size_t len) const {
1085   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1086   assert(!heap->has_forwarded_objects() || heap->is_concurrent_old_mark_in_progress(),
1087          "Should not have forwarded objects during final mark (unless old gen concurrent mark is running)");
1088   if (heap->unload_classes()) {
1089     jio_snprintf(buf, len, "Pause Final Mark (%s) (unload classes)", _generation->name());
1090   } else {
1091     jio_snprintf(buf, len, "Pause Final Mark (%s)", _generation->name());
1092   }
1093 }
1094 
1095 void ShenandoahConcurrentGC::conc_mark_event_message(char* buf, size_t len) const {
1096   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1097   assert(!heap->has_forwarded_objects() || heap->is_concurrent_old_mark_in_progress(),
1098          "Should not have forwarded objects concurrent mark (unless old gen concurrent mark is running");
1099   if (heap->unload_classes()) {
1100     jio_snprintf(buf, len, "Concurrent marking (%s) (unload classes)", _generation->name());
1101   } else {
1102     jio_snprintf(buf, len, "Concurrent marking (%s)", _generation->name());
1103   }
1104 }
< prev index next >