< prev index next >

src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp

Print this page

  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 
  27 #include "gc/shared/barrierSetNMethod.hpp"
  28 #include "gc/shared/collectorCounters.hpp"
  29 #include "gc/shared/continuationGCSupport.inline.hpp"
  30 #include "gc/shenandoah/shenandoahBreakpoint.hpp"
  31 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  32 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
  33 #include "gc/shenandoah/shenandoahFreeSet.hpp"



  34 #include "gc/shenandoah/shenandoahLock.hpp"
  35 #include "gc/shenandoah/shenandoahMark.inline.hpp"
  36 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  37 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  38 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  39 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
  40 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
  41 #include "gc/shenandoah/shenandoahStackWatermark.hpp"
  42 #include "gc/shenandoah/shenandoahUtils.hpp"
  43 #include "gc/shenandoah/shenandoahVerifier.hpp"
  44 #include "gc/shenandoah/shenandoahVMOperations.hpp"
  45 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
  46 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  47 #include "memory/allocation.hpp"
  48 #include "prims/jvmtiTagMap.hpp"
  49 #include "runtime/vmThread.hpp"
  50 #include "utilities/events.hpp"
  51 
  52 // Breakpoint support
  53 class ShenandoahBreakpointGCScope : public StackObj {

  68   }
  69 };
  70 
  71 class ShenandoahBreakpointMarkScope : public StackObj {
  72 private:
  73   const GCCause::Cause _cause;
  74 public:
  75   ShenandoahBreakpointMarkScope(GCCause::Cause cause) : _cause(cause) {
  76     if (_cause == GCCause::_wb_breakpoint) {
  77       ShenandoahBreakpoint::at_after_marking_started();
  78     }
  79   }
  80 
  81   ~ShenandoahBreakpointMarkScope() {
  82     if (_cause == GCCause::_wb_breakpoint) {
  83       ShenandoahBreakpoint::at_before_marking_completed();
  84     }
  85   }
  86 };
  87 
  88 ShenandoahConcurrentGC::ShenandoahConcurrentGC() :
  89   _mark(),
  90   _degen_point(ShenandoahDegenPoint::_degenerated_unset) {



  91 }
  92 
  93 ShenandoahGC::ShenandoahDegenPoint ShenandoahConcurrentGC::degen_point() const {
  94   return _degen_point;
  95 }
  96 
  97 void ShenandoahConcurrentGC::cancel() {
  98   ShenandoahConcurrentMark::cancel();
  99 }
 100 
 101 bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) {
 102   ShenandoahHeap* const heap = ShenandoahHeap::heap();


 103   ShenandoahBreakpointGCScope breakpoint_gc_scope(cause);
 104 
 105   // Reset for upcoming marking
 106   entry_reset();
 107 
 108   // Start initial mark under STW
 109   vmop_entry_init_mark();
 110 
 111   {
 112     ShenandoahBreakpointMarkScope breakpoint_mark_scope(cause);











 113     // Concurrent mark roots
 114     entry_mark_roots();
 115     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_outside_cycle)) return false;
 116 
 117     // Continue concurrent mark
 118     entry_mark();
 119     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_mark)) return false;
 120   }
 121 
 122   // Complete marking under STW, and start evacuation
 123   vmop_entry_final_mark();
 124 













 125   // Concurrent stack processing
 126   if (heap->is_evacuation_in_progress()) {
 127     entry_thread_roots();
 128   }
 129 
 130   // Process weak roots that might still point to regions that would be broken by cleanup
 131   if (heap->is_concurrent_weak_root_in_progress()) {
 132     entry_weak_refs();
 133     entry_weak_roots();
 134   }
 135 
 136   // Final mark might have reclaimed some immediate garbage, kick cleanup to reclaim
 137   // the space. This would be the last action if there is nothing to evacuate.

 138   entry_cleanup_early();
 139 
 140   {
 141     ShenandoahHeapLocker locker(heap->lock());
 142     heap->free_set()->log_status();
 143   }
 144 
 145   // Perform concurrent class unloading
 146   if (heap->unload_classes() &&
 147       heap->is_concurrent_weak_root_in_progress()) {
 148     entry_class_unloading();
 149   }
 150 
 151   // Processing strong roots
 152   // This may be skipped if there is nothing to update/evacuate.
 153   // If so, strong_root_in_progress would be unset.
 154   if (heap->is_concurrent_strong_root_in_progress()) {
 155     entry_strong_roots();
 156   }
 157 






 158   // Continue the cycle with evacuation and optional update-refs.
 159   // This may be skipped if there is nothing to evacuate.
 160   // If so, evac_in_progress would be unset by collection set preparation code.
 161   if (heap->is_evacuation_in_progress()) {
 162     // Concurrently evacuate
 163     entry_evacuate();
 164     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_evac)) return false;
 165 
 166     // Perform update-refs phase.
 167     vmop_entry_init_updaterefs();
 168     entry_updaterefs();
 169     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_updaterefs)) return false;
 170 
 171     // Concurrent update thread roots
 172     entry_update_thread_roots();
 173     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_updaterefs)) return false;
 174 
 175     vmop_entry_final_updaterefs();
 176 
 177     // Update references freed up collection set, kick the cleanup to reclaim the space.
 178     entry_cleanup_complete();
 179   } else {
 180     vmop_entry_final_roots();

































 181   }
 182 
 183   return true;
 184 }
 185 
 186 void ShenandoahConcurrentGC::vmop_entry_init_mark() {
 187   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 188   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 189   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_mark_gross);
 190 
 191   heap->try_inject_alloc_failure();
 192   VM_ShenandoahInitMark op(this);
 193   VMThread::execute(&op); // jump to entry_init_mark() under safepoint
 194 }
 195 
 196 void ShenandoahConcurrentGC::vmop_entry_final_mark() {
 197   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 198   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 199   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_mark_gross);
 200 

 206 void ShenandoahConcurrentGC::vmop_entry_init_updaterefs() {
 207   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 208   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 209   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_update_refs_gross);
 210 
 211   heap->try_inject_alloc_failure();
 212   VM_ShenandoahInitUpdateRefs op(this);
 213   VMThread::execute(&op);
 214 }
 215 
 216 void ShenandoahConcurrentGC::vmop_entry_final_updaterefs() {
 217   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 218   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 219   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_update_refs_gross);
 220 
 221   heap->try_inject_alloc_failure();
 222   VM_ShenandoahFinalUpdateRefs op(this);
 223   VMThread::execute(&op);
 224 }
 225 
 226 void ShenandoahConcurrentGC::vmop_entry_final_roots() {
 227   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 228   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 229   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_roots_gross);
 230 
 231   // This phase does not use workers, no need for setup
 232   heap->try_inject_alloc_failure();
 233   VM_ShenandoahFinalRoots op(this);
 234   VMThread::execute(&op);
 235 }
 236 
 237 void ShenandoahConcurrentGC::entry_init_mark() {
 238   const char* msg = init_mark_event_message();

 239   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::init_mark);
 240   EventMark em("%s", msg);
 241 
 242   ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
 243                               ShenandoahWorkerPolicy::calc_workers_for_init_marking(),
 244                               "init marking");
 245 
 246   op_init_mark();
 247 }
 248 
 249 void ShenandoahConcurrentGC::entry_final_mark() {
 250   const char* msg = final_mark_event_message();

 251   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_mark);
 252   EventMark em("%s", msg);
 253 
 254   ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
 255                               ShenandoahWorkerPolicy::calc_workers_for_final_marking(),
 256                               "final marking");
 257 
 258   op_final_mark();
 259 }
 260 
 261 void ShenandoahConcurrentGC::entry_init_updaterefs() {
 262   static const char* msg = "Pause Init Update Refs";
 263   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::init_update_refs);
 264   EventMark em("%s", msg);
 265 
 266   // No workers used in this phase, no setup required
 267   op_init_updaterefs();
 268 }
 269 
 270 void ShenandoahConcurrentGC::entry_final_updaterefs() {

 285   EventMark em("%s", msg);
 286 
 287   op_final_roots();
 288 }
 289 
 290 void ShenandoahConcurrentGC::entry_reset() {
 291   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 292   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 293   static const char* msg = "Concurrent reset";
 294   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_reset);
 295   EventMark em("%s", msg);
 296 
 297   ShenandoahWorkerScope scope(heap->workers(),
 298                               ShenandoahWorkerPolicy::calc_workers_for_conc_reset(),
 299                               "concurrent reset");
 300 
 301   heap->try_inject_alloc_failure();
 302   op_reset();
 303 }
 304 

















 305 void ShenandoahConcurrentGC::entry_mark_roots() {
 306   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 307   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 308   const char* msg = "Concurrent marking roots";
 309   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_mark_roots);
 310   EventMark em("%s", msg);
 311 
 312   ShenandoahWorkerScope scope(heap->workers(),
 313                               ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
 314                               "concurrent marking roots");
 315 
 316   heap->try_inject_alloc_failure();
 317   op_mark_roots();
 318 }
 319 
 320 void ShenandoahConcurrentGC::entry_mark() {

 321   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 322   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 323   const char* msg = conc_mark_event_message();
 324   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_mark);
 325   EventMark em("%s", msg);
 326 
 327   ShenandoahWorkerScope scope(heap->workers(),
 328                               ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
 329                               "concurrent marking");
 330 
 331   heap->try_inject_alloc_failure();
 332   op_mark();
 333 }
 334 
 335 void ShenandoahConcurrentGC::entry_thread_roots() {
 336   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 337   static const char* msg = "Concurrent thread roots";
 338   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_thread_roots);
 339   EventMark em("%s", msg);
 340 
 341   ShenandoahWorkerScope scope(heap->workers(),
 342                               ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
 343                               msg);

 458   ShenandoahWorkerScope scope(heap->workers(),
 459                               ShenandoahWorkerPolicy::calc_workers_for_conc_update_ref(),
 460                               "concurrent reference update");
 461 
 462   heap->try_inject_alloc_failure();
 463   op_updaterefs();
 464 }
 465 
 466 void ShenandoahConcurrentGC::entry_cleanup_complete() {
 467   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 468   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 469   static const char* msg = "Concurrent cleanup";
 470   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_cleanup_complete, true /* log_heap_usage */);
 471   EventMark em("%s", msg);
 472 
 473   // This phase does not use workers, no need for setup
 474   heap->try_inject_alloc_failure();
 475   op_cleanup_complete();
 476 }
 477 















 478 void ShenandoahConcurrentGC::op_reset() {
 479   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 480   if (ShenandoahPacing) {
 481     heap->pacer()->setup_for_reset();
 482   }
 483 
 484   heap->prepare_gc();
 485 }
 486 
 487 class ShenandoahInitMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
 488 private:
 489   ShenandoahMarkingContext* const _ctx;
 490 public:
 491   ShenandoahInitMarkUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
 492 
 493   void heap_region_do(ShenandoahHeapRegion* r) {
 494     assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index());
 495     if (r->is_active()) {
 496       // Check if region needs updating its TAMS. We have updated it already during concurrent
 497       // reset, so it is very likely we don't need to do another write here.

 498       if (_ctx->top_at_mark_start(r) != r->top()) {
 499         _ctx->capture_top_at_mark_start(r);
 500       }
 501     } else {
 502       assert(_ctx->top_at_mark_start(r) == r->top(),
 503              "Region " SIZE_FORMAT " should already have correct TAMS", r->index());
 504     }
 505   }
 506 
 507   bool is_thread_safe() { return true; }
 508 };
 509 
 510 void ShenandoahConcurrentGC::start_mark() {
 511   _mark.start_mark();
 512 }
 513 
 514 void ShenandoahConcurrentGC::op_init_mark() {
 515   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 516   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
 517   assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
 518 
 519   assert(heap->marking_context()->is_bitmap_clear(), "need clear marking bitmap");
 520   assert(!heap->marking_context()->is_complete(), "should not be complete");
 521   assert(!heap->has_forwarded_objects(), "No forwarded objects on this path");
 522 





















 523   if (ShenandoahVerify) {
 524     heap->verifier()->verify_before_concmark();
 525   }
 526 
 527   if (VerifyBeforeGC) {
 528     Universe::verify();
 529   }
 530 
 531   heap->set_concurrent_mark_in_progress(true);
 532 
 533   start_mark();
 534 
 535   {





 536     ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states);
 537     ShenandoahInitMarkUpdateRegionStateClosure cl;
 538     heap->parallel_heap_region_iterate(&cl);





 539   }
 540 
 541   // Weak reference processing
 542   ShenandoahReferenceProcessor* rp = heap->ref_processor();
 543   rp->reset_thread_locals();
 544   rp->set_soft_reference_policy(heap->soft_ref_policy()->should_clear_all_soft_refs());
 545 
 546   // Make above changes visible to worker threads
 547   OrderAccess::fence();

 548   // Arm nmethods for concurrent marking. When a nmethod is about to be executed,
 549   // we need to make sure that all its metadata are marked. alternative is to remark
 550   // thread roots at final mark pause, but it can be potential latency killer.
 551   if (heap->unload_classes()) {
 552     ShenandoahCodeRoots::arm_nmethods();
 553   }
 554 
 555   ShenandoahStackWatermark::change_epoch_id();
 556   if (ShenandoahPacing) {
 557     heap->pacer()->setup_for_mark();
 558   }
 559 }
 560 
 561 void ShenandoahConcurrentGC::op_mark_roots() {
 562   _mark.mark_concurrent_roots();
 563 }
 564 
 565 void ShenandoahConcurrentGC::op_mark() {
 566   _mark.concurrent_mark();
 567 }
 568 
 569 void ShenandoahConcurrentGC::op_final_mark() {
 570   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 571   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
 572   assert(!heap->has_forwarded_objects(), "No forwarded objects on this path");
 573 
 574   if (ShenandoahVerify) {
 575     heap->verifier()->verify_roots_no_forwarded();
 576   }
 577 
 578   if (!heap->cancelled_gc()) {
 579     _mark.finish_mark();
 580     assert(!heap->cancelled_gc(), "STW mark cannot OOM");
 581 
 582     // Notify JVMTI that the tagmap table will need cleaning.
 583     JvmtiTagMap::set_needs_cleaning();
 584 
 585     heap->prepare_regions_and_collection_set(true /*concurrent*/);




























 586 
 587     // Has to be done after cset selection
 588     heap->prepare_concurrent_roots();
 589 
 590     if (!heap->collection_set()->is_empty()) {
 591       if (ShenandoahVerify) {
 592         heap->verifier()->verify_before_evacuation();
 593       }
 594 
 595       heap->set_evacuation_in_progress(true);
 596       // From here on, we need to update references.
 597       heap->set_has_forwarded_objects(true);
 598 
 599       // Verify before arming for concurrent processing.
 600       // Otherwise, verification can trigger stack processing.
 601       if (ShenandoahVerify) {
 602         heap->verifier()->verify_during_evacuation();
 603       }
 604 
 605       // Arm nmethods/stack for concurrent processing
 606       ShenandoahCodeRoots::arm_nmethods();
 607       ShenandoahStackWatermark::change_epoch_id();
 608 
 609       // Notify JVMTI that oops are changed.
 610       JvmtiTagMap::set_needs_rehashing();
 611 














 612       if (ShenandoahPacing) {
 613         heap->pacer()->setup_for_evac();
 614       }
 615     } else {
 616       if (ShenandoahVerify) {
 617         heap->verifier()->verify_after_concmark();
 618       }
 619 
 620       if (VerifyAfterGC) {
 621         Universe::verify();
 622       }
 623     }
 624   }
 625 }
 626 
 627 class ShenandoahConcurrentEvacThreadClosure : public ThreadClosure {
 628 private:
 629   OopClosure* const _oops;
 630 
 631 public:

 660     _java_threads.threads_do(&thr_cl, worker_id);
 661   }
 662 };
 663 
 664 void ShenandoahConcurrentGC::op_thread_roots() {
 665   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 666   assert(heap->is_evacuation_in_progress(), "Checked by caller");
 667   ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_thread_roots);
 668   ShenandoahConcurrentEvacUpdateThreadTask task(heap->workers()->active_workers());
 669   heap->workers()->run_task(&task);
 670 }
 671 
 672 void ShenandoahConcurrentGC::op_weak_refs() {
 673   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 674   assert(heap->is_concurrent_weak_root_in_progress(), "Only during this phase");
 675   // Concurrent weak refs processing
 676   ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_refs);
 677   if (heap->gc_cause() == GCCause::_wb_breakpoint) {
 678     ShenandoahBreakpoint::at_after_reference_processing_started();
 679   }
 680   heap->ref_processor()->process_references(ShenandoahPhaseTimings::conc_weak_refs, heap->workers(), true /* concurrent */);
 681 }
 682 
 683 class ShenandoahEvacUpdateCleanupOopStorageRootsClosure : public BasicOopIterateClosure {
 684 private:
 685   ShenandoahHeap* const _heap;
 686   ShenandoahMarkingContext* const _mark_context;
 687   bool  _evac_in_progress;
 688   Thread* const _thread;
 689 
 690 public:
 691   ShenandoahEvacUpdateCleanupOopStorageRootsClosure();
 692   void do_oop(oop* p);
 693   void do_oop(narrowOop* p);
 694 };
 695 
 696 ShenandoahEvacUpdateCleanupOopStorageRootsClosure::ShenandoahEvacUpdateCleanupOopStorageRootsClosure() :
 697   _heap(ShenandoahHeap::heap()),
 698   _mark_context(ShenandoahHeap::heap()->marking_context()),
 699   _evac_in_progress(ShenandoahHeap::heap()->is_evacuation_in_progress()),
 700   _thread(Thread::current()) {
 701 }
 702 
 703 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(oop* p) {
 704   const oop obj = RawAccess<>::oop_load(p);
 705   if (!CompressedOops::is_null(obj)) {
 706     if (!_mark_context->is_marked(obj)) {
 707       shenandoah_assert_correct(p, obj);
 708       ShenandoahHeap::atomic_clear_oop(p, obj);







 709     } else if (_evac_in_progress && _heap->in_collection_set(obj)) {
 710       oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
 711       if (resolved == obj) {
 712         resolved = _heap->evacuate_object(obj, _thread);
 713       }
 714       ShenandoahHeap::atomic_update_oop(resolved, p, obj);
 715       assert(_heap->cancelled_gc() ||
 716              _mark_context->is_marked(resolved) && !_heap->in_collection_set(resolved),
 717              "Sanity");
 718     }
 719   }
 720 }
 721 
 722 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(narrowOop* p) {
 723   ShouldNotReachHere();
 724 }
 725 
 726 class ShenandoahIsCLDAliveClosure : public CLDClosure {
 727 public:
 728   void do_cld(ClassLoaderData* cld) {

 913   assert(heap->is_concurrent_strong_root_in_progress(), "Checked by caller");
 914   ShenandoahConcurrentRootsEvacUpdateTask task(ShenandoahPhaseTimings::conc_strong_roots);
 915   heap->workers()->run_task(&task);
 916   heap->set_concurrent_strong_root_in_progress(false);
 917 }
 918 
 919 void ShenandoahConcurrentGC::op_cleanup_early() {
 920   ShenandoahHeap::heap()->free_set()->recycle_trash();
 921 }
 922 
 923 void ShenandoahConcurrentGC::op_evacuate() {
 924   ShenandoahHeap::heap()->evacuate_collection_set(true /*concurrent*/);
 925 }
 926 
 927 void ShenandoahConcurrentGC::op_init_updaterefs() {
 928   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 929   heap->set_evacuation_in_progress(false);
 930   heap->set_concurrent_weak_root_in_progress(false);
 931   heap->prepare_update_heap_references(true /*concurrent*/);
 932   heap->set_update_refs_in_progress(true);
 933 


 934   if (ShenandoahPacing) {
 935     heap->pacer()->setup_for_updaterefs();
 936   }
 937 }
 938 
 939 void ShenandoahConcurrentGC::op_updaterefs() {
 940   ShenandoahHeap::heap()->update_heap_references(true /*concurrent*/);
 941 }
 942 
 943 class ShenandoahUpdateThreadClosure : public HandshakeClosure {
 944 private:
 945   ShenandoahUpdateRefsClosure _cl;
 946 public:
 947   ShenandoahUpdateThreadClosure();
 948   void do_thread(Thread* thread);
 949 };
 950 
 951 ShenandoahUpdateThreadClosure::ShenandoahUpdateThreadClosure() :
 952   HandshakeClosure("Shenandoah Update Thread Roots") {
 953 }

 958     ResourceMark rm;
 959     jt->oops_do(&_cl, NULL);
 960   }
 961 }
 962 
 963 void ShenandoahConcurrentGC::op_update_thread_roots() {
 964   ShenandoahUpdateThreadClosure cl;
 965   Handshake::execute(&cl);
 966 }
 967 
 968 void ShenandoahConcurrentGC::op_final_updaterefs() {
 969   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 970   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
 971   assert(!heap->_update_refs_iterator.has_next(), "Should have finished update references");
 972 
 973   heap->finish_concurrent_roots();
 974 
 975   // Clear cancelled GC, if set. On cancellation path, the block before would handle
 976   // everything.
 977   if (heap->cancelled_gc()) {
 978     heap->clear_cancelled_gc();
 979   }
 980 
 981   // Has to be done before cset is clear
 982   if (ShenandoahVerify) {
 983     heap->verifier()->verify_roots_in_to_space();
 984   }
 985 


















 986   heap->update_heap_region_states(true /*concurrent*/);
 987 
 988   heap->set_update_refs_in_progress(false);
 989   heap->set_has_forwarded_objects(false);
 990 




 991   if (ShenandoahVerify) {
 992     heap->verifier()->verify_after_updaterefs();
 993   }
 994 
 995   if (VerifyAfterGC) {
 996     Universe::verify();
 997   }
 998 
 999   heap->rebuild_free_set(true /*concurrent*/);
1000 }
1001 
1002 void ShenandoahConcurrentGC::op_final_roots() {
1003   ShenandoahHeap::heap()->set_concurrent_weak_root_in_progress(false);
1004 }
1005 
1006 void ShenandoahConcurrentGC::op_cleanup_complete() {
1007   ShenandoahHeap::heap()->free_set()->recycle_trash();
1008 }
1009 




1010 bool ShenandoahConcurrentGC::check_cancellation_and_abort(ShenandoahDegenPoint point) {
1011   if (ShenandoahHeap::heap()->cancelled_gc()) {
1012     _degen_point = point;
1013     return true;
1014   }
1015   return false;
1016 }
1017 
1018 const char* ShenandoahConcurrentGC::init_mark_event_message() const {
1019   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1020   assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here");
1021   if (heap->unload_classes()) {
1022     return "Pause Init Mark (unload classes)";
1023   } else {
1024     return "Pause Init Mark";
1025   }
1026 }
1027 
1028 const char* ShenandoahConcurrentGC::final_mark_event_message() const {
1029   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1030   assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here");

1031   if (heap->unload_classes()) {
1032     return "Pause Final Mark (unload classes)";
1033   } else {
1034     return "Pause Final Mark";
1035   }
1036 }
1037 
1038 const char* ShenandoahConcurrentGC::conc_mark_event_message() const {
1039   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1040   assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here");

1041   if (heap->unload_classes()) {
1042     return "Concurrent marking (unload classes)";
1043   } else {
1044     return "Concurrent marking";
1045   }
1046 }

  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 
  27 #include "gc/shared/barrierSetNMethod.hpp"
  28 #include "gc/shared/collectorCounters.hpp"
  29 #include "gc/shared/continuationGCSupport.inline.hpp"
  30 #include "gc/shenandoah/shenandoahBreakpoint.hpp"
  31 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  32 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
  33 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  34 #include "gc/shenandoah/shenandoahGeneration.hpp"
  35 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
  36 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
  37 #include "gc/shenandoah/shenandoahLock.hpp"
  38 #include "gc/shenandoah/shenandoahMark.inline.hpp"
  39 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  40 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  41 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  42 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
  43 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
  44 #include "gc/shenandoah/shenandoahStackWatermark.hpp"
  45 #include "gc/shenandoah/shenandoahUtils.hpp"
  46 #include "gc/shenandoah/shenandoahVerifier.hpp"
  47 #include "gc/shenandoah/shenandoahVMOperations.hpp"
  48 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
  49 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  50 #include "memory/allocation.hpp"
  51 #include "prims/jvmtiTagMap.hpp"
  52 #include "runtime/vmThread.hpp"
  53 #include "utilities/events.hpp"
  54 
  55 // Breakpoint support
  56 class ShenandoahBreakpointGCScope : public StackObj {

  71   }
  72 };
  73 
  74 class ShenandoahBreakpointMarkScope : public StackObj {
  75 private:
  76   const GCCause::Cause _cause;
  77 public:
  78   ShenandoahBreakpointMarkScope(GCCause::Cause cause) : _cause(cause) {
  79     if (_cause == GCCause::_wb_breakpoint) {
  80       ShenandoahBreakpoint::at_after_marking_started();
  81     }
  82   }
  83 
  84   ~ShenandoahBreakpointMarkScope() {
  85     if (_cause == GCCause::_wb_breakpoint) {
  86       ShenandoahBreakpoint::at_before_marking_completed();
  87     }
  88   }
  89 };
  90 
  91 ShenandoahConcurrentGC::ShenandoahConcurrentGC(ShenandoahGeneration* generation, bool do_old_gc_bootstrap) :
  92   _mark(generation),
  93   _degen_point(ShenandoahDegenPoint::_degenerated_unset),
  94   _abbreviated(false),
  95   _do_old_gc_bootstrap(do_old_gc_bootstrap),
  96   _generation(generation) {
  97 }
  98 
  99 ShenandoahGC::ShenandoahDegenPoint ShenandoahConcurrentGC::degen_point() const {
 100   return _degen_point;
 101 }
 102 




 103 bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) {
 104   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 105   heap->start_conc_gc();
 106 
 107   ShenandoahBreakpointGCScope breakpoint_gc_scope(cause);
 108 
 109   // Reset for upcoming marking
 110   entry_reset();
 111 
 112   // Start initial mark under STW
 113   vmop_entry_init_mark();
 114 
 115   {
 116     ShenandoahBreakpointMarkScope breakpoint_mark_scope(cause);
 117 
 118     // Reset task queue stats here, rather than in mark_concurrent_roots
 119     // because remembered set scan will `push` oops into the queues and
 120     // resetting after this happens will lose those counts.
 121     TASKQUEUE_STATS_ONLY(_mark.task_queues()->reset_taskqueue_stats());
 122 
 123     // Concurrent remembered set scanning
 124     entry_scan_remembered_set();
 125     // When RS scanning yields, we will need a check_cancellation_and_abort()
 126     // degeneration point here.
 127 
 128     // Concurrent mark roots
 129     entry_mark_roots();
 130     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_roots)) return false;
 131 
 132     // Continue concurrent mark
 133     entry_mark();
 134     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_mark)) return false;
 135   }
 136 
 137   // Complete marking under STW, and start evacuation
 138   vmop_entry_final_mark();
 139 
 140   // If GC was cancelled before final mark, then the safepoint operation will do nothing
 141   // and the concurrent mark will still be in progress. In this case it is safe to resume
 142   // the degenerated cycle from the marking phase. On the other hand, if the GC is cancelled
 143   // after final mark (but before this check), then the final mark safepoint operation
 144   // will have finished the mark (setting concurrent mark in progress to false). Final mark
 145   // will also have setup state (in concurrent stack processing) that will not be safe to
 146   // resume from the marking phase in the degenerated cycle. That is, if the cancellation
 147   // occurred after final mark, we must resume the degenerated cycle after the marking phase.
 148   if (_generation->is_concurrent_mark_in_progress() && check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_mark)) {
 149     assert(!heap->is_concurrent_weak_root_in_progress(), "Weak roots should not be in progress when concurrent mark is in progress");
 150     return false;
 151   }
 152 
 153   // Concurrent stack processing
 154   if (heap->is_evacuation_in_progress()) {
 155     entry_thread_roots();
 156   }
 157 
 158   // Process weak roots that might still point to regions that would be broken by cleanup
 159   if (heap->is_concurrent_weak_root_in_progress()) {
 160     entry_weak_refs();
 161     entry_weak_roots();
 162   }
 163 
 164   // Final mark might have reclaimed some immediate garbage, kick cleanup to reclaim
 165   // the space. This would be the last action if there is nothing to evacuate.  Note that
 166   // we will not age young-gen objects in the case that we skip evacuation.
 167   entry_cleanup_early();
 168 
 169   {
 170     ShenandoahHeapLocker locker(heap->lock());
 171     heap->free_set()->log_status();
 172   }
 173 
 174   // Perform concurrent class unloading
 175   if (heap->unload_classes() &&
 176       heap->is_concurrent_weak_root_in_progress()) {
 177     entry_class_unloading();
 178   }
 179 
 180   // Processing strong roots
 181   // This may be skipped if there is nothing to update/evacuate.
 182   // If so, strong_root_in_progress would be unset.
 183   if (heap->is_concurrent_strong_root_in_progress()) {
 184     entry_strong_roots();
 185   }
 186 
 187   // Global marking has completed. We need to fill in any unmarked objects in the old generation
 188   // so that subsequent remembered set scans will not walk pointers into reclaimed memory.
 189   if (!heap->cancelled_gc() && heap->mode()->is_generational() && _generation->generation_mode() == GLOBAL) {
 190     entry_global_coalesce_and_fill();
 191   }
 192 
 193   // Continue the cycle with evacuation and optional update-refs.
 194   // This may be skipped if there is nothing to evacuate.
 195   // If so, evac_in_progress would be unset by collection set preparation code.
 196   if (heap->is_evacuation_in_progress()) {
 197     // Concurrently evacuate
 198     entry_evacuate();
 199     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_evac)) return false;
 200 
 201     // Perform update-refs phase.
 202     vmop_entry_init_updaterefs();
 203     entry_updaterefs();
 204     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_updaterefs)) return false;
 205 
 206     // Concurrent update thread roots
 207     entry_update_thread_roots();
 208     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_updaterefs)) return false;
 209 
 210     vmop_entry_final_updaterefs();
 211 
 212     // Update references freed up collection set, kick the cleanup to reclaim the space.
 213     entry_cleanup_complete();
 214   } else {
 215     // We chose not to evacuate because we found sufficient immediate garbage.
 216     vmop_entry_final_roots(heap->is_aging_cycle());
 217     _abbreviated = true;
 218   }
 219 
 220   if (heap->mode()->is_generational()) {
 221     size_t old_available, young_available;
 222     {
 223       ShenandoahYoungGeneration* young_gen = heap->young_generation();
 224       ShenandoahGeneration* old_gen = heap->old_generation();
 225       ShenandoahHeapLocker locker(heap->lock());
 226 
 227       size_t old_usage_before_evac = heap->capture_old_usage(0);
 228       size_t old_usage_now = old_gen->used();
 229       size_t promoted_bytes = old_usage_now - old_usage_before_evac;
 230       heap->set_previous_promotion(promoted_bytes);
 231 
 232       young_gen->unadjust_available();
 233       old_gen->unadjust_available();
 234       // No need to old_gen->increase_used().
 235       // That was done when plabs were allocated, accounting for both old evacs and promotions.
 236 
 237       young_available = young_gen->adjusted_available();
 238       old_available = old_gen->adjusted_available();
 239 
 240       heap->set_alloc_supplement_reserve(0);
 241       heap->set_young_evac_reserve(0);
 242       heap->set_old_evac_reserve(0);
 243       heap->reset_old_evac_expended();
 244       heap->set_promoted_reserve(0);
 245     }
 246     log_info(gc, ergo)("At end of Concurrent GC, old_available: " SIZE_FORMAT "%s, young_available: " SIZE_FORMAT "%s",
 247                        byte_size_in_proper_unit(old_available), proper_unit_for_byte_size(old_available),
 248                        byte_size_in_proper_unit(young_available), proper_unit_for_byte_size(young_available));
 249   }
 250 
 251   return true;
 252 }
 253 
 254 void ShenandoahConcurrentGC::vmop_entry_init_mark() {
 255   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 256   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 257   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_mark_gross);
 258 
 259   heap->try_inject_alloc_failure();
 260   VM_ShenandoahInitMark op(this);
 261   VMThread::execute(&op); // jump to entry_init_mark() under safepoint
 262 }
 263 
 264 void ShenandoahConcurrentGC::vmop_entry_final_mark() {
 265   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 266   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 267   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_mark_gross);
 268 

 274 void ShenandoahConcurrentGC::vmop_entry_init_updaterefs() {
 275   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 276   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 277   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_update_refs_gross);
 278 
 279   heap->try_inject_alloc_failure();
 280   VM_ShenandoahInitUpdateRefs op(this);
 281   VMThread::execute(&op);
 282 }
 283 
 284 void ShenandoahConcurrentGC::vmop_entry_final_updaterefs() {
 285   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 286   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 287   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_update_refs_gross);
 288 
 289   heap->try_inject_alloc_failure();
 290   VM_ShenandoahFinalUpdateRefs op(this);
 291   VMThread::execute(&op);
 292 }
 293 
 294 void ShenandoahConcurrentGC::vmop_entry_final_roots(bool increment_region_ages) {
 295   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 296   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 297   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_roots_gross);
 298 
 299   // This phase does not use workers, no need for setup
 300   heap->try_inject_alloc_failure();
 301   VM_ShenandoahFinalRoots op(this, increment_region_ages);
 302   VMThread::execute(&op);
 303 }
 304 
 305 void ShenandoahConcurrentGC::entry_init_mark() {
 306   char msg[1024];
 307   init_mark_event_message(msg, sizeof(msg));
 308   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::init_mark);
 309   EventMark em("%s", msg);
 310 
 311   ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
 312                               ShenandoahWorkerPolicy::calc_workers_for_init_marking(),
 313                               "init marking");
 314 
 315   op_init_mark();
 316 }
 317 
 318 void ShenandoahConcurrentGC::entry_final_mark() {
 319   char msg[1024];
 320   final_mark_event_message(msg, sizeof(msg));
 321   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_mark);
 322   EventMark em("%s", msg);
 323 
 324   ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
 325                               ShenandoahWorkerPolicy::calc_workers_for_final_marking(),
 326                               "final marking");
 327 
 328   op_final_mark();
 329 }
 330 
 331 void ShenandoahConcurrentGC::entry_init_updaterefs() {
 332   static const char* msg = "Pause Init Update Refs";
 333   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::init_update_refs);
 334   EventMark em("%s", msg);
 335 
 336   // No workers used in this phase, no setup required
 337   op_init_updaterefs();
 338 }
 339 
 340 void ShenandoahConcurrentGC::entry_final_updaterefs() {

 355   EventMark em("%s", msg);
 356 
 357   op_final_roots();
 358 }
 359 
 360 void ShenandoahConcurrentGC::entry_reset() {
 361   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 362   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 363   static const char* msg = "Concurrent reset";
 364   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_reset);
 365   EventMark em("%s", msg);
 366 
 367   ShenandoahWorkerScope scope(heap->workers(),
 368                               ShenandoahWorkerPolicy::calc_workers_for_conc_reset(),
 369                               "concurrent reset");
 370 
 371   heap->try_inject_alloc_failure();
 372   op_reset();
 373 }
 374 
 375 void ShenandoahConcurrentGC::entry_scan_remembered_set() {
 376   if (_generation->generation_mode() == YOUNG) {
 377     ShenandoahHeap* const heap = ShenandoahHeap::heap();
 378     TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 379     const char* msg = "Concurrent remembered set scanning";
 380     ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::init_scan_rset);
 381     EventMark em("%s", msg);
 382 
 383     ShenandoahWorkerScope scope(heap->workers(),
 384                                 ShenandoahWorkerPolicy::calc_workers_for_rs_scanning(),
 385                                 msg);
 386 
 387     heap->try_inject_alloc_failure();
 388     _generation->scan_remembered_set(true /* is_concurrent */);
 389   }
 390 }
 391 
 392 void ShenandoahConcurrentGC::entry_mark_roots() {
 393   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 394   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 395   const char* msg = "Concurrent marking roots";
 396   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_mark_roots);
 397   EventMark em("%s", msg);
 398 
 399   ShenandoahWorkerScope scope(heap->workers(),
 400                               ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
 401                               "concurrent marking roots");
 402 
 403   heap->try_inject_alloc_failure();
 404   op_mark_roots();
 405 }
 406 
 407 void ShenandoahConcurrentGC::entry_mark() {
 408   char msg[1024];
 409   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 410   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 411   conc_mark_event_message(msg, sizeof(msg));
 412   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_mark);
 413   EventMark em("%s", msg);
 414 
 415   ShenandoahWorkerScope scope(heap->workers(),
 416                               ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
 417                               "concurrent marking");
 418 
 419   heap->try_inject_alloc_failure();
 420   op_mark();
 421 }
 422 
 423 void ShenandoahConcurrentGC::entry_thread_roots() {
 424   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 425   static const char* msg = "Concurrent thread roots";
 426   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_thread_roots);
 427   EventMark em("%s", msg);
 428 
 429   ShenandoahWorkerScope scope(heap->workers(),
 430                               ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
 431                               msg);

 546   ShenandoahWorkerScope scope(heap->workers(),
 547                               ShenandoahWorkerPolicy::calc_workers_for_conc_update_ref(),
 548                               "concurrent reference update");
 549 
 550   heap->try_inject_alloc_failure();
 551   op_updaterefs();
 552 }
 553 
 554 void ShenandoahConcurrentGC::entry_cleanup_complete() {
 555   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 556   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 557   static const char* msg = "Concurrent cleanup";
 558   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_cleanup_complete, true /* log_heap_usage */);
 559   EventMark em("%s", msg);
 560 
 561   // This phase does not use workers, no need for setup
 562   heap->try_inject_alloc_failure();
 563   op_cleanup_complete();
 564 }
 565 
 566 void ShenandoahConcurrentGC::entry_global_coalesce_and_fill() {
 567   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 568 
 569   const char* msg = "Coalescing and filling old regions in global collect";
 570   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::coalesce_and_fill);
 571 
 572   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 573   EventMark em("%s", msg);
 574   ShenandoahWorkerScope scope(heap->workers(),
 575                               ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
 576                               "concurrent coalesce and fill");
 577 
 578   op_global_coalesce_and_fill();
 579 }
 580 
 581 void ShenandoahConcurrentGC::op_reset() {
 582   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 583   if (ShenandoahPacing) {
 584     heap->pacer()->setup_for_reset();
 585   }
 586   _generation->prepare_gc();

 587 }
 588 
 589 class ShenandoahInitMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
 590 private:
 591   ShenandoahMarkingContext* const _ctx;
 592 public:
 593   ShenandoahInitMarkUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
 594 
 595   void heap_region_do(ShenandoahHeapRegion* r) {
 596     assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index());
 597     if (r->is_active()) {
 598       // Check if region needs updating its TAMS. We have updated it already during concurrent
 599       // reset, so it is very likely we don't need to do another write here.  Since most regions
 600       // are not "active", this path is relatively rare.
 601       if (_ctx->top_at_mark_start(r) != r->top()) {
 602         _ctx->capture_top_at_mark_start(r);
 603       }
 604     } else {
 605       assert(_ctx->top_at_mark_start(r) == r->top(),
 606              "Region " SIZE_FORMAT " should already have correct TAMS", r->index());
 607     }
 608   }
 609 
 610   bool is_thread_safe() { return true; }
 611 };
 612 
 613 void ShenandoahConcurrentGC::start_mark() {
 614   _mark.start_mark();
 615 }
 616 
 617 void ShenandoahConcurrentGC::op_init_mark() {
 618   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 619   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
 620   assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
 621 
 622   assert(_generation->is_bitmap_clear(), "need clear marking bitmap");
 623   assert(!_generation->is_mark_complete(), "should not be complete");
 624   assert(!heap->has_forwarded_objects(), "No forwarded objects on this path");
 625 
 626 
 627   if (heap->mode()->is_generational()) {
 628       if (_generation->generation_mode() == YOUNG || (_generation->generation_mode() == GLOBAL && ShenandoahVerify)) {
 629       // The current implementation of swap_remembered_set() copies the write-card-table
 630       // to the read-card-table. The remembered sets are also swapped for GLOBAL collections
 631       // so that the verifier works with the correct copy of the card table when verifying.
 632         ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_swap_rset);
 633         _generation->swap_remembered_set();
 634     }
 635 
 636     if (_generation->generation_mode() == GLOBAL) {
 637       heap->cancel_old_gc();
 638     } else if (heap->is_concurrent_old_mark_in_progress()) {
 639       // Purge the SATB buffers, transferring any valid, old pointers to the
 640       // old generation mark queue. Any pointers in a young region will be
 641       // abandoned.
 642       ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_transfer_satb);
 643       heap->transfer_old_pointers_from_satb();
 644     }
 645   }
 646 
 647   if (ShenandoahVerify) {
 648     heap->verifier()->verify_before_concmark();
 649   }
 650 
 651   if (VerifyBeforeGC) {
 652     Universe::verify();
 653   }
 654 
 655   _generation->set_concurrent_mark_in_progress(true);
 656 
 657   start_mark();
 658 
 659   if (_do_old_gc_bootstrap) {
 660     // Update region state for both young and old regions
 661     // TODO: We should be able to pull this out of the safepoint for the bootstrap
 662     // cycle. The top of an old region will only move when a GC cycle evacuates
 663     // objects into it. When we start an old cycle, we know that nothing can touch
 664     // the top of old regions.
 665     ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states);
 666     ShenandoahInitMarkUpdateRegionStateClosure cl;
 667     heap->parallel_heap_region_iterate(&cl);
 668   } else {
 669     // Update region state for only young regions
 670     ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states);
 671     ShenandoahInitMarkUpdateRegionStateClosure cl;
 672     _generation->parallel_heap_region_iterate(&cl);
 673   }
 674 
 675   // Weak reference processing
 676   ShenandoahReferenceProcessor* rp = _generation->ref_processor();
 677   rp->reset_thread_locals();
 678   rp->set_soft_reference_policy(heap->soft_ref_policy()->should_clear_all_soft_refs());
 679 
 680   // Make above changes visible to worker threads
 681   OrderAccess::fence();
 682 
 683   // Arm nmethods for concurrent marking. When a nmethod is about to be executed,
 684   // we need to make sure that all its metadata are marked. alternative is to remark
 685   // thread roots at final mark pause, but it can be potential latency killer.
 686   if (heap->unload_classes()) {
 687     ShenandoahCodeRoots::arm_nmethods();
 688   }
 689 
 690   ShenandoahStackWatermark::change_epoch_id();
 691   if (ShenandoahPacing) {
 692     heap->pacer()->setup_for_mark();
 693   }
 694 }
 695 
 696 void ShenandoahConcurrentGC::op_mark_roots() {
 697   _mark.mark_concurrent_roots();
 698 }
 699 
 700 void ShenandoahConcurrentGC::op_mark() {
 701   _mark.concurrent_mark();
 702 }
 703 
 704 void ShenandoahConcurrentGC::op_final_mark() {
 705   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 706   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
 707   assert(!heap->has_forwarded_objects(), "No forwarded objects on this path");
 708 
 709   if (ShenandoahVerify) {
 710     heap->verifier()->verify_roots_no_forwarded();
 711   }
 712 
 713   if (!heap->cancelled_gc()) {
 714     _mark.finish_mark();
 715     assert(!heap->cancelled_gc(), "STW mark cannot OOM");
 716 
 717     // Notify JVMTI that the tagmap table will need cleaning.
 718     JvmtiTagMap::set_needs_cleaning();
 719 
 720     // The collection set is chosen by prepare_regions_and_collection_set().
 721     //
 722     // TODO: Under severe memory overload conditions that can be checked here, we may want to limit
 723     // the inclusion of old-gen candidates within the collection set.  This would allow us to prioritize efforts on
 724     // evacuating young-gen,  This remediation is most appropriate when old-gen availability is very high (so there
 725     // are negligible negative impacts from delaying completion of old-gen evacuation) and when young-gen collections
 726     // are "under duress" (as signalled by very low availability of memory within young-gen, indicating that/ young-gen
 727     // collections are not triggering frequently enough).
 728     _generation->prepare_regions_and_collection_set(true /*concurrent*/);
 729 
 730     // Upon return from prepare_regions_and_collection_set(), certain parameters have been established to govern the
 731     // evacuation efforts that are about to begin.  In particular:
 732     //
 733     // heap->get_promoted_reserve() represents the amount of memory within old-gen's available memory that has
 734     //   been set aside to hold objects promoted from young-gen memory.  This represents an estimated percentage
 735     //   of the live young-gen memory within the collection set.  If there is more data ready to be promoted than
 736     //   can fit within this reserve, the promotion of some objects will be deferred until a subsequent evacuation
 737     //   pass.
 738     //
 739     // heap->get_old_evac_reserve() represents the amount of memory within old-gen's available memory that has been
 740     //  set aside to hold objects evacuated from the old-gen collection set.
 741     //
 742     // heap->get_young_evac_reserve() represents the amount of memory within young-gen's available memory that has
 743     //  been set aside to hold objects evacuated from the young-gen collection set.  Conservatively, this value
 744     //  equals the entire amount of live young-gen memory within the collection set, even though some of this memory
 745     //  will likely be promoted.
 746     //
 747     // heap->get_alloc_supplement_reserve() represents the amount of old-gen memory that can be allocated during evacuation
 748     // and update-refs phases of gc.  The young evacuation reserve has already been removed from this quantity.
 749 
 750     // Has to be done after cset selection
 751     heap->prepare_concurrent_roots();
 752 
 753     if (!heap->collection_set()->is_empty()) {
 754       if (ShenandoahVerify) {
 755         heap->verifier()->verify_before_evacuation();
 756       }
 757 
 758       heap->set_evacuation_in_progress(true);
 759       // From here on, we need to update references.
 760       heap->set_has_forwarded_objects(true);
 761 
 762       // Verify before arming for concurrent processing.
 763       // Otherwise, verification can trigger stack processing.
 764       if (ShenandoahVerify) {
 765         heap->verifier()->verify_during_evacuation();
 766       }
 767 
 768       // Arm nmethods/stack for concurrent processing
 769       ShenandoahCodeRoots::arm_nmethods();
 770       ShenandoahStackWatermark::change_epoch_id();
 771 
 772       // Notify JVMTI that oops are changed.
 773       JvmtiTagMap::set_needs_rehashing();
 774 
 775       if (heap->mode()->is_generational()) {
 776         // Calculate the temporary evacuation allowance supplement to young-gen memory capacity (for allocations
 777         // and young-gen evacuations).
 778         size_t young_available = heap->young_generation()->adjust_available(heap->get_alloc_supplement_reserve());
 779         // old_available is memory that can hold promotions and evacuations.  Subtract out the memory that is being
 780         // loaned for young-gen allocations or evacuations.
 781         size_t old_available = heap->old_generation()->adjust_available(-heap->get_alloc_supplement_reserve());
 782 
 783         log_info(gc, ergo)("After generational memory budget adjustments, old available: " SIZE_FORMAT
 784                            "%s, young_available: " SIZE_FORMAT "%s",
 785                            byte_size_in_proper_unit(old_available), proper_unit_for_byte_size(old_available),
 786                            byte_size_in_proper_unit(young_available), proper_unit_for_byte_size(young_available));
 787       }
 788 
 789       if (ShenandoahPacing) {
 790         heap->pacer()->setup_for_evac();
 791       }
 792     } else {
 793       if (ShenandoahVerify) {
 794         heap->verifier()->verify_after_concmark();
 795       }
 796 
 797       if (VerifyAfterGC) {
 798         Universe::verify();
 799       }
 800     }
 801   }
 802 }
 803 
 804 class ShenandoahConcurrentEvacThreadClosure : public ThreadClosure {
 805 private:
 806   OopClosure* const _oops;
 807 
 808 public:

 837     _java_threads.threads_do(&thr_cl, worker_id);
 838   }
 839 };
 840 
 841 void ShenandoahConcurrentGC::op_thread_roots() {
 842   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 843   assert(heap->is_evacuation_in_progress(), "Checked by caller");
 844   ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_thread_roots);
 845   ShenandoahConcurrentEvacUpdateThreadTask task(heap->workers()->active_workers());
 846   heap->workers()->run_task(&task);
 847 }
 848 
 849 void ShenandoahConcurrentGC::op_weak_refs() {
 850   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 851   assert(heap->is_concurrent_weak_root_in_progress(), "Only during this phase");
 852   // Concurrent weak refs processing
 853   ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_refs);
 854   if (heap->gc_cause() == GCCause::_wb_breakpoint) {
 855     ShenandoahBreakpoint::at_after_reference_processing_started();
 856   }
 857   _generation->ref_processor()->process_references(ShenandoahPhaseTimings::conc_weak_refs, heap->workers(), true /* concurrent */);
 858 }
 859 
 860 class ShenandoahEvacUpdateCleanupOopStorageRootsClosure : public BasicOopIterateClosure {
 861 private:
 862   ShenandoahHeap* const _heap;
 863   ShenandoahMarkingContext* const _mark_context;
 864   bool  _evac_in_progress;
 865   Thread* const _thread;
 866 
 867 public:
 868   ShenandoahEvacUpdateCleanupOopStorageRootsClosure();
 869   void do_oop(oop* p);
 870   void do_oop(narrowOop* p);
 871 };
 872 
 873 ShenandoahEvacUpdateCleanupOopStorageRootsClosure::ShenandoahEvacUpdateCleanupOopStorageRootsClosure() :
 874   _heap(ShenandoahHeap::heap()),
 875   _mark_context(ShenandoahHeap::heap()->marking_context()),
 876   _evac_in_progress(ShenandoahHeap::heap()->is_evacuation_in_progress()),
 877   _thread(Thread::current()) {
 878 }
 879 
 880 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(oop* p) {
 881   const oop obj = RawAccess<>::oop_load(p);
 882   if (!CompressedOops::is_null(obj)) {
 883     if (!_mark_context->is_marked(obj)) {
 884       if (_heap->is_in_active_generation(obj)) {
 885         // TODO: This worries me. Here we are asserting that an unmarked from-space object is 'correct'.
 886         // Normally, I would call this a bogus assert, but there seems to be a legitimate use-case for
 887         // accessing from-space objects during class unloading. However, the from-space object may have
 888         // been "filled". We've made no effort to prevent old generation classes being unloaded by young
 889         // gen (and vice-versa).
 890         shenandoah_assert_correct(p, obj);
 891         ShenandoahHeap::atomic_clear_oop(p, obj);
 892       }
 893     } else if (_evac_in_progress && _heap->in_collection_set(obj)) {
 894       oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
 895       if (resolved == obj) {
 896         resolved = _heap->evacuate_object(obj, _thread);
 897       }
 898       ShenandoahHeap::atomic_update_oop(resolved, p, obj);
 899       assert(_heap->cancelled_gc() ||
 900              _mark_context->is_marked(resolved) && !_heap->in_collection_set(resolved),
 901              "Sanity");
 902     }
 903   }
 904 }
 905 
 906 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(narrowOop* p) {
 907   ShouldNotReachHere();
 908 }
 909 
 910 class ShenandoahIsCLDAliveClosure : public CLDClosure {
 911 public:
 912   void do_cld(ClassLoaderData* cld) {

1097   assert(heap->is_concurrent_strong_root_in_progress(), "Checked by caller");
1098   ShenandoahConcurrentRootsEvacUpdateTask task(ShenandoahPhaseTimings::conc_strong_roots);
1099   heap->workers()->run_task(&task);
1100   heap->set_concurrent_strong_root_in_progress(false);
1101 }
1102 
1103 void ShenandoahConcurrentGC::op_cleanup_early() {
1104   ShenandoahHeap::heap()->free_set()->recycle_trash();
1105 }
1106 
1107 void ShenandoahConcurrentGC::op_evacuate() {
1108   ShenandoahHeap::heap()->evacuate_collection_set(true /*concurrent*/);
1109 }
1110 
1111 void ShenandoahConcurrentGC::op_init_updaterefs() {
1112   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1113   heap->set_evacuation_in_progress(false);
1114   heap->set_concurrent_weak_root_in_progress(false);
1115   heap->prepare_update_heap_references(true /*concurrent*/);
1116   heap->set_update_refs_in_progress(true);
1117   if (ShenandoahVerify) {
1118     heap->verifier()->verify_before_updaterefs();
1119   }
1120   if (ShenandoahPacing) {
1121     heap->pacer()->setup_for_updaterefs();
1122   }
1123 }
1124 
1125 void ShenandoahConcurrentGC::op_updaterefs() {
1126   ShenandoahHeap::heap()->update_heap_references(true /*concurrent*/);
1127 }
1128 
1129 class ShenandoahUpdateThreadClosure : public HandshakeClosure {
1130 private:
1131   ShenandoahUpdateRefsClosure _cl;
1132 public:
1133   ShenandoahUpdateThreadClosure();
1134   void do_thread(Thread* thread);
1135 };
1136 
1137 ShenandoahUpdateThreadClosure::ShenandoahUpdateThreadClosure() :
1138   HandshakeClosure("Shenandoah Update Thread Roots") {
1139 }

1144     ResourceMark rm;
1145     jt->oops_do(&_cl, NULL);
1146   }
1147 }
1148 
1149 void ShenandoahConcurrentGC::op_update_thread_roots() {
1150   ShenandoahUpdateThreadClosure cl;
1151   Handshake::execute(&cl);
1152 }
1153 
1154 void ShenandoahConcurrentGC::op_final_updaterefs() {
1155   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1156   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
1157   assert(!heap->_update_refs_iterator.has_next(), "Should have finished update references");
1158 
1159   heap->finish_concurrent_roots();
1160 
1161   // Clear cancelled GC, if set. On cancellation path, the block before would handle
1162   // everything.
1163   if (heap->cancelled_gc()) {
1164     heap->clear_cancelled_gc(true /* clear oom handler */);
1165   }
1166 
1167   // Has to be done before cset is clear
1168   if (ShenandoahVerify) {
1169     heap->verifier()->verify_roots_in_to_space();
1170   }
1171 
1172   if (heap->mode()->is_generational() && heap->is_concurrent_old_mark_in_progress()) {
1173     // When the SATB barrier is left on to support concurrent old gen mark, it may pick up writes to
1174     // objects in the collection set. After those objects are evacuated, the pointers in the
1175     // SATB are no longer safe. Once we have finished update references, we are guaranteed that
1176     // no more writes to the collection set are possible.
1177     //
1178     // This will transfer any old pointers in _active_ regions from the SATB to the old gen
1179     // mark queues. All other pointers will be discarded. This would also discard any pointers
1180     // in old regions that were included in a mixed evacuation. We aren't using the SATB filter
1181     // methods here because we cannot control when they execute. If the SATB filter runs _after_
1182     // a region has been recycled, we will not be able to detect the bad pointer.
1183     //
1184     // We are not concerned about skipping this step in abbreviated cycles because regions
1185     // with no live objects cannot have been written to and so cannot have entries in the SATB
1186     // buffers.
1187     heap->transfer_old_pointers_from_satb();
1188   }
1189 
1190   heap->update_heap_region_states(true /*concurrent*/);
1191 
1192   heap->set_update_refs_in_progress(false);
1193   heap->set_has_forwarded_objects(false);
1194 
1195   // Aging_cycle is only relevant during evacuation cycle for individual objects and during final mark for
1196   // entire regions.  Both of these relevant operations occur before final update refs.
1197   heap->set_aging_cycle(false);
1198 
1199   if (ShenandoahVerify) {
1200     heap->verifier()->verify_after_updaterefs();
1201   }
1202 
1203   if (VerifyAfterGC) {
1204     Universe::verify();
1205   }
1206 
1207   heap->rebuild_free_set(true /*concurrent*/);
1208 }
1209 
1210 void ShenandoahConcurrentGC::op_final_roots() {
1211   ShenandoahHeap::heap()->set_concurrent_weak_root_in_progress(false);
1212 }
1213 
1214 void ShenandoahConcurrentGC::op_cleanup_complete() {
1215   ShenandoahHeap::heap()->free_set()->recycle_trash();
1216 }
1217 
1218 void ShenandoahConcurrentGC::op_global_coalesce_and_fill() {
1219   ShenandoahHeap::heap()->coalesce_and_fill_old_regions();
1220 }
1221 
1222 bool ShenandoahConcurrentGC::check_cancellation_and_abort(ShenandoahDegenPoint point) {
1223   if (ShenandoahHeap::heap()->cancelled_gc()) {
1224     _degen_point = point;
1225     return true;
1226   }
1227   return false;
1228 }
1229 
1230 void ShenandoahConcurrentGC::init_mark_event_message(char* buf, size_t len) const {
1231   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1232   assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here");
1233   if (heap->unload_classes()) {
1234     jio_snprintf(buf, len, "Pause Init Mark (%s) (unload classes)", _generation->name());
1235   } else {
1236     jio_snprintf(buf, len, "Pause Init Mark (%s)", _generation->name());
1237   }
1238 }
1239 
1240 void ShenandoahConcurrentGC::final_mark_event_message(char* buf, size_t len) const {
1241   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1242   assert(!heap->has_forwarded_objects() || heap->is_concurrent_old_mark_in_progress(),
1243          "Should not have forwarded objects during final mark (unless old gen concurrent mark is running)");
1244   if (heap->unload_classes()) {
1245     jio_snprintf(buf, len, "Pause Final Mark (%s) (unload classes)", _generation->name());
1246   } else {
1247     jio_snprintf(buf, len, "Pause Final Mark (%s)", _generation->name());
1248   }
1249 }
1250 
1251 void ShenandoahConcurrentGC::conc_mark_event_message(char* buf, size_t len) const {
1252   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1253   assert(!heap->has_forwarded_objects() || heap->is_concurrent_old_mark_in_progress(),
1254          "Should not have forwarded objects concurrent mark (unless old gen concurrent mark is running");
1255   if (heap->unload_classes()) {
1256     jio_snprintf(buf, len, "Concurrent marking (%s) (unload classes)", _generation->name());
1257   } else {
1258     jio_snprintf(buf, len, "Concurrent marking (%s)", _generation->name());
1259   }
1260 }
< prev index next >