< prev index next >

src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp

Print this page

  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 
  27 #include "gc/shared/barrierSetNMethod.hpp"
  28 #include "gc/shared/collectorCounters.hpp"
  29 #include "gc/shenandoah/shenandoahBreakpoint.hpp"
  30 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  31 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
  32 #include "gc/shenandoah/shenandoahFreeSet.hpp"



  33 #include "gc/shenandoah/shenandoahLock.hpp"
  34 #include "gc/shenandoah/shenandoahMark.inline.hpp"
  35 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  36 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  37 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  38 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
  39 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
  40 #include "gc/shenandoah/shenandoahStackWatermark.hpp"
  41 #include "gc/shenandoah/shenandoahUtils.hpp"
  42 #include "gc/shenandoah/shenandoahVerifier.hpp"
  43 #include "gc/shenandoah/shenandoahVMOperations.hpp"
  44 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
  45 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  46 #include "memory/allocation.hpp"
  47 #include "prims/jvmtiTagMap.hpp"
  48 #include "runtime/vmThread.hpp"
  49 #include "utilities/events.hpp"
  50 
  51 // Breakpoint support
  52 class ShenandoahBreakpointGCScope : public StackObj {

  67   }
  68 };
  69 
  70 class ShenandoahBreakpointMarkScope : public StackObj {
  71 private:
  72   const GCCause::Cause _cause;
  73 public:
  74   ShenandoahBreakpointMarkScope(GCCause::Cause cause) : _cause(cause) {
  75     if (_cause == GCCause::_wb_breakpoint) {
  76       ShenandoahBreakpoint::at_after_marking_started();
  77     }
  78   }
  79 
  80   ~ShenandoahBreakpointMarkScope() {
  81     if (_cause == GCCause::_wb_breakpoint) {
  82       ShenandoahBreakpoint::at_before_marking_completed();
  83     }
  84   }
  85 };
  86 
  87 ShenandoahConcurrentGC::ShenandoahConcurrentGC() :
  88   _mark(),
  89   _degen_point(ShenandoahDegenPoint::_degenerated_unset) {



  90 }
  91 
  92 ShenandoahGC::ShenandoahDegenPoint ShenandoahConcurrentGC::degen_point() const {
  93   return _degen_point;
  94 }
  95 
  96 void ShenandoahConcurrentGC::cancel() {
  97   ShenandoahConcurrentMark::cancel();
  98 }
  99 
 100 bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) {
 101   ShenandoahHeap* const heap = ShenandoahHeap::heap();


 102   ShenandoahBreakpointGCScope breakpoint_gc_scope(cause);
 103 
 104   // Reset for upcoming marking
 105   entry_reset();
 106 
 107   // Start initial mark under STW
 108   vmop_entry_init_mark();
 109 
 110   {
 111     ShenandoahBreakpointMarkScope breakpoint_mark_scope(cause);












 112     // Concurrent mark roots
 113     entry_mark_roots();
 114     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_outside_cycle)) return false;
 115 
 116     // Continue concurrent mark
 117     entry_mark();
 118     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_mark)) return false;
 119   }
 120 
 121   // Complete marking under STW, and start evacuation
 122   vmop_entry_final_mark();
 123 













 124   // Concurrent stack processing
 125   if (heap->is_evacuation_in_progress()) {
 126     entry_thread_roots();
 127   }
 128 
 129   // Process weak roots that might still point to regions that would be broken by cleanup
 130   if (heap->is_concurrent_weak_root_in_progress()) {
 131     entry_weak_refs();
 132     entry_weak_roots();
 133   }
 134 
 135   // Final mark might have reclaimed some immediate garbage, kick cleanup to reclaim
 136   // the space. This would be the last action if there is nothing to evacuate.

 137   entry_cleanup_early();
 138 
 139   {
 140     ShenandoahHeapLocker locker(heap->lock());
 141     heap->free_set()->log_status();
 142   }
 143 
 144   // Perform concurrent class unloading
 145   if (heap->unload_classes() &&
 146       heap->is_concurrent_weak_root_in_progress()) {
 147     entry_class_unloading();
 148   }
 149 
 150   // Processing strong roots
 151   // This may be skipped if there is nothing to update/evacuate.
 152   // If so, strong_root_in_progress would be unset.
 153   if (heap->is_concurrent_strong_root_in_progress()) {
 154     entry_strong_roots();
 155   }
 156 






 157   // Continue the cycle with evacuation and optional update-refs.
 158   // This may be skipped if there is nothing to evacuate.
 159   // If so, evac_in_progress would be unset by collection set preparation code.
 160   if (heap->is_evacuation_in_progress()) {
 161     // Concurrently evacuate
 162     entry_evacuate();
 163     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_evac)) return false;
 164 
 165     // Perform update-refs phase.
 166     vmop_entry_init_updaterefs();
 167     entry_updaterefs();
 168     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_updaterefs)) return false;
 169 
 170     // Concurrent update thread roots
 171     entry_update_thread_roots();
 172     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_updaterefs)) return false;
 173 
 174     vmop_entry_final_updaterefs();
 175 
 176     // Update references freed up collection set, kick the cleanup to reclaim the space.
 177     entry_cleanup_complete();
 178   } else {
 179     vmop_entry_final_roots();


 180   }
 181 




























 182   return true;
 183 }
 184 
 185 void ShenandoahConcurrentGC::vmop_entry_init_mark() {
 186   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 187   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 188   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_mark_gross);
 189 
 190   heap->try_inject_alloc_failure();
 191   VM_ShenandoahInitMark op(this);
 192   VMThread::execute(&op); // jump to entry_init_mark() under safepoint
 193 }
 194 
 195 void ShenandoahConcurrentGC::vmop_entry_final_mark() {
 196   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 197   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 198   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_mark_gross);
 199 
 200   heap->try_inject_alloc_failure();
 201   VM_ShenandoahFinalMarkStartEvac op(this);
 202   VMThread::execute(&op); // jump to entry_final_mark under safepoint
 203 }
 204 
 205 void ShenandoahConcurrentGC::vmop_entry_init_updaterefs() {
 206   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 207   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 208   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_update_refs_gross);
 209 
 210   heap->try_inject_alloc_failure();
 211   VM_ShenandoahInitUpdateRefs op(this);
 212   VMThread::execute(&op);
 213 }
 214 
 215 void ShenandoahConcurrentGC::vmop_entry_final_updaterefs() {
 216   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 217   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 218   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_update_refs_gross);
 219 
 220   heap->try_inject_alloc_failure();
 221   VM_ShenandoahFinalUpdateRefs op(this);
 222   VMThread::execute(&op);
 223 }
 224 
 225 void ShenandoahConcurrentGC::vmop_entry_final_roots() {
 226   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 227   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 228   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_roots_gross);
 229 
 230   // This phase does not use workers, no need for setup
 231   heap->try_inject_alloc_failure();
 232   VM_ShenandoahFinalRoots op(this);
 233   VMThread::execute(&op);
 234 }
 235 
 236 void ShenandoahConcurrentGC::entry_init_mark() {
 237   const char* msg = init_mark_event_message();

 238   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::init_mark);
 239   EventMark em("%s", msg);
 240 
 241   ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
 242                               ShenandoahWorkerPolicy::calc_workers_for_init_marking(),
 243                               "init marking");
 244 
 245   op_init_mark();
 246 }
 247 
 248 void ShenandoahConcurrentGC::entry_final_mark() {
 249   const char* msg = final_mark_event_message();

 250   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_mark);
 251   EventMark em("%s", msg);
 252 
 253   ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
 254                               ShenandoahWorkerPolicy::calc_workers_for_final_marking(),
 255                               "final marking");
 256 
 257   op_final_mark();
 258 }
 259 
 260 void ShenandoahConcurrentGC::entry_init_updaterefs() {
 261   static const char* msg = "Pause Init Update Refs";
 262   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::init_update_refs);
 263   EventMark em("%s", msg);
 264 
 265   // No workers used in this phase, no setup required
 266   op_init_updaterefs();
 267 }
 268 
 269 void ShenandoahConcurrentGC::entry_final_updaterefs() {

 300   heap->try_inject_alloc_failure();
 301   op_reset();
 302 }
 303 
 304 void ShenandoahConcurrentGC::entry_mark_roots() {
 305   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 306   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 307   const char* msg = "Concurrent marking roots";
 308   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_mark_roots);
 309   EventMark em("%s", msg);
 310 
 311   ShenandoahWorkerScope scope(heap->workers(),
 312                               ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
 313                               "concurrent marking roots");
 314 
 315   heap->try_inject_alloc_failure();
 316   op_mark_roots();
 317 }
 318 
 319 void ShenandoahConcurrentGC::entry_mark() {

 320   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 321   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 322   const char* msg = conc_mark_event_message();
 323   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_mark);
 324   EventMark em("%s", msg);
 325 
 326   ShenandoahWorkerScope scope(heap->workers(),
 327                               ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
 328                               "concurrent marking");
 329 
 330   heap->try_inject_alloc_failure();
 331   op_mark();
 332 }
 333 
 334 void ShenandoahConcurrentGC::entry_thread_roots() {
 335   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 336   static const char* msg = "Concurrent thread roots";
 337   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_thread_roots);
 338   EventMark em("%s", msg);
 339 
 340   ShenandoahWorkerScope scope(heap->workers(),
 341                               ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
 342                               msg);

 457   ShenandoahWorkerScope scope(heap->workers(),
 458                               ShenandoahWorkerPolicy::calc_workers_for_conc_update_ref(),
 459                               "concurrent reference update");
 460 
 461   heap->try_inject_alloc_failure();
 462   op_updaterefs();
 463 }
 464 
 465 void ShenandoahConcurrentGC::entry_cleanup_complete() {
 466   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 467   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 468   static const char* msg = "Concurrent cleanup";
 469   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_cleanup_complete, true /* log_heap_usage */);
 470   EventMark em("%s", msg);
 471 
 472   // This phase does not use workers, no need for setup
 473   heap->try_inject_alloc_failure();
 474   op_cleanup_complete();
 475 }
 476 















 477 void ShenandoahConcurrentGC::op_reset() {
 478   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 479   if (ShenandoahPacing) {
 480     heap->pacer()->setup_for_reset();
 481   }
 482 
 483   heap->prepare_gc();
 484 }
 485 
 486 class ShenandoahInitMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
 487 private:
 488   ShenandoahMarkingContext* const _ctx;
 489 public:
 490   ShenandoahInitMarkUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
 491 
 492   void heap_region_do(ShenandoahHeapRegion* r) {
 493     assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index());
 494     if (r->is_active()) {
 495       // Check if region needs updating its TAMS. We have updated it already during concurrent
 496       // reset, so it is very likely we don't need to do another write here.

 497       if (_ctx->top_at_mark_start(r) != r->top()) {
 498         _ctx->capture_top_at_mark_start(r);
 499       }
 500     } else {
 501       assert(_ctx->top_at_mark_start(r) == r->top(),
 502              "Region " SIZE_FORMAT " should already have correct TAMS", r->index());
 503     }
 504   }
 505 
 506   bool is_thread_safe() { return true; }
 507 };
 508 
 509 void ShenandoahConcurrentGC::op_init_mark() {
 510   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 511   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
 512   assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
 513 
 514   assert(heap->marking_context()->is_bitmap_clear(), "need clear marking bitmap");
 515   assert(!heap->marking_context()->is_complete(), "should not be complete");
 516   assert(!heap->has_forwarded_objects(), "No forwarded objects on this path");
 517 





















 518   if (ShenandoahVerify) {
 519     heap->verifier()->verify_before_concmark();
 520   }
 521 
 522   if (VerifyBeforeGC) {
 523     Universe::verify();
 524   }
 525 
 526   heap->set_concurrent_mark_in_progress(true);
 527 
 528   {

 529     ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states);
 530     ShenandoahInitMarkUpdateRegionStateClosure cl;
 531     heap->parallel_heap_region_iterate(&cl);






 532   }
 533 
 534   // Weak reference processing
 535   ShenandoahReferenceProcessor* rp = heap->ref_processor();
 536   rp->reset_thread_locals();
 537   rp->set_soft_reference_policy(heap->soft_ref_policy()->should_clear_all_soft_refs());
 538 
 539   // Make above changes visible to worker threads
 540   OrderAccess::fence();

 541   // Arm nmethods for concurrent marking. When a nmethod is about to be executed,
 542   // we need to make sure that all its metadata are marked. alternative is to remark
 543   // thread roots at final mark pause, but it can be potential latency killer.
 544   if (heap->unload_classes()) {
 545     ShenandoahCodeRoots::arm_nmethods();
 546   }
 547 
 548   ShenandoahStackWatermark::change_epoch_id();
 549   if (ShenandoahPacing) {
 550     heap->pacer()->setup_for_mark();
 551   }
 552 }
 553 
 554 void ShenandoahConcurrentGC::op_mark_roots() {
 555   _mark.mark_concurrent_roots();
 556 }
 557 
 558 void ShenandoahConcurrentGC::op_mark() {
 559   _mark.concurrent_mark();
 560 }
 561 
 562 void ShenandoahConcurrentGC::op_final_mark() {
 563   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 564   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
 565   assert(!heap->has_forwarded_objects(), "No forwarded objects on this path");
 566 
 567   if (ShenandoahVerify) {
 568     heap->verifier()->verify_roots_no_forwarded();
 569   }
 570 
 571   if (!heap->cancelled_gc()) {
 572     _mark.finish_mark();
 573     assert(!heap->cancelled_gc(), "STW mark cannot OOM");
 574 
 575     // Notify JVMTI that the tagmap table will need cleaning.
 576     JvmtiTagMap::set_needs_cleaning();
 577 
 578     heap->prepare_regions_and_collection_set(true /*concurrent*/);




























 579 
 580     // Has to be done after cset selection
 581     heap->prepare_concurrent_roots();
 582 
 583     if (!heap->collection_set()->is_empty()) {
 584       if (ShenandoahVerify) {
 585         heap->verifier()->verify_before_evacuation();
 586       }
 587 
 588       heap->set_evacuation_in_progress(true);
 589       // From here on, we need to update references.
 590       heap->set_has_forwarded_objects(true);
 591 
 592       // Verify before arming for concurrent processing.
 593       // Otherwise, verification can trigger stack processing.
 594       if (ShenandoahVerify) {
 595         heap->verifier()->verify_during_evacuation();
 596       }
 597 
 598       // Arm nmethods/stack for concurrent processing
 599       ShenandoahCodeRoots::arm_nmethods();
 600       ShenandoahStackWatermark::change_epoch_id();
 601 
 602       // Notify JVMTI that oops are changed.
 603       JvmtiTagMap::set_needs_rehashing();
 604 














 605       if (ShenandoahPacing) {
 606         heap->pacer()->setup_for_evac();
 607       }
 608     } else {
 609       if (ShenandoahVerify) {
 610         heap->verifier()->verify_after_concmark();
 611       }
 612 
 613       if (VerifyAfterGC) {
 614         Universe::verify();
 615       }
 616     }
 617   }
 618 }
 619 
 620 class ShenandoahConcurrentEvacThreadClosure : public ThreadClosure {
 621 private:
 622   OopClosure* const _oops;
 623 
 624 public:

 653     _java_threads.threads_do(&thr_cl, worker_id);
 654   }
 655 };
 656 
 657 void ShenandoahConcurrentGC::op_thread_roots() {
 658   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 659   assert(heap->is_evacuation_in_progress(), "Checked by caller");
 660   ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_thread_roots);
 661   ShenandoahConcurrentEvacUpdateThreadTask task(heap->workers()->active_workers());
 662   heap->workers()->run_task(&task);
 663 }
 664 
 665 void ShenandoahConcurrentGC::op_weak_refs() {
 666   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 667   assert(heap->is_concurrent_weak_root_in_progress(), "Only during this phase");
 668   // Concurrent weak refs processing
 669   ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_refs);
 670   if (heap->gc_cause() == GCCause::_wb_breakpoint) {
 671     ShenandoahBreakpoint::at_after_reference_processing_started();
 672   }
 673   heap->ref_processor()->process_references(ShenandoahPhaseTimings::conc_weak_refs, heap->workers(), true /* concurrent */);
 674 }
 675 
 676 class ShenandoahEvacUpdateCleanupOopStorageRootsClosure : public BasicOopIterateClosure {
 677 private:
 678   ShenandoahHeap* const _heap;
 679   ShenandoahMarkingContext* const _mark_context;
 680   bool  _evac_in_progress;
 681   Thread* const _thread;
 682 
 683 public:
 684   ShenandoahEvacUpdateCleanupOopStorageRootsClosure();
 685   void do_oop(oop* p);
 686   void do_oop(narrowOop* p);
 687 };
 688 
 689 ShenandoahEvacUpdateCleanupOopStorageRootsClosure::ShenandoahEvacUpdateCleanupOopStorageRootsClosure() :
 690   _heap(ShenandoahHeap::heap()),
 691   _mark_context(ShenandoahHeap::heap()->marking_context()),
 692   _evac_in_progress(ShenandoahHeap::heap()->is_evacuation_in_progress()),
 693   _thread(Thread::current()) {
 694 }
 695 
 696 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(oop* p) {
 697   const oop obj = RawAccess<>::oop_load(p);
 698   if (!CompressedOops::is_null(obj)) {
 699     if (!_mark_context->is_marked(obj)) {
 700       shenandoah_assert_correct(p, obj);
 701       ShenandoahHeap::atomic_clear_oop(p, obj);







 702     } else if (_evac_in_progress && _heap->in_collection_set(obj)) {
 703       oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
 704       if (resolved == obj) {
 705         resolved = _heap->evacuate_object(obj, _thread);
 706       }
 707       ShenandoahHeap::atomic_update_oop(resolved, p, obj);
 708       assert(_heap->cancelled_gc() ||
 709              _mark_context->is_marked(resolved) && !_heap->in_collection_set(resolved),
 710              "Sanity");
 711     }
 712   }
 713 }
 714 
 715 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(narrowOop* p) {
 716   ShouldNotReachHere();
 717 }
 718 
 719 class ShenandoahIsCLDAliveClosure : public CLDClosure {
 720 public:
 721   void do_cld(ClassLoaderData* cld) {

 906   assert(heap->is_concurrent_strong_root_in_progress(), "Checked by caller");
 907   ShenandoahConcurrentRootsEvacUpdateTask task(ShenandoahPhaseTimings::conc_strong_roots);
 908   heap->workers()->run_task(&task);
 909   heap->set_concurrent_strong_root_in_progress(false);
 910 }
 911 
 912 void ShenandoahConcurrentGC::op_cleanup_early() {
 913   ShenandoahHeap::heap()->free_set()->recycle_trash();
 914 }
 915 
 916 void ShenandoahConcurrentGC::op_evacuate() {
 917   ShenandoahHeap::heap()->evacuate_collection_set(true /*concurrent*/);
 918 }
 919 
 920 void ShenandoahConcurrentGC::op_init_updaterefs() {
 921   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 922   heap->set_evacuation_in_progress(false);
 923   heap->set_concurrent_weak_root_in_progress(false);
 924   heap->prepare_update_heap_references(true /*concurrent*/);
 925   heap->set_update_refs_in_progress(true);
 926 


 927   if (ShenandoahPacing) {
 928     heap->pacer()->setup_for_updaterefs();
 929   }
 930 }
 931 
 932 void ShenandoahConcurrentGC::op_updaterefs() {
 933   ShenandoahHeap::heap()->update_heap_references(true /*concurrent*/);
 934 }
 935 
 936 class ShenandoahUpdateThreadClosure : public HandshakeClosure {
 937 private:
 938   ShenandoahUpdateRefsClosure _cl;
 939 public:
 940   ShenandoahUpdateThreadClosure();
 941   void do_thread(Thread* thread);
 942 };
 943 
 944 ShenandoahUpdateThreadClosure::ShenandoahUpdateThreadClosure() :
 945   HandshakeClosure("Shenandoah Update Thread Roots") {
 946 }

 951     ResourceMark rm;
 952     jt->oops_do(&_cl, NULL);
 953   }
 954 }
 955 
 956 void ShenandoahConcurrentGC::op_update_thread_roots() {
 957   ShenandoahUpdateThreadClosure cl;
 958   Handshake::execute(&cl);
 959 }
 960 
 961 void ShenandoahConcurrentGC::op_final_updaterefs() {
 962   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 963   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
 964   assert(!heap->_update_refs_iterator.has_next(), "Should have finished update references");
 965 
 966   heap->finish_concurrent_roots();
 967 
 968   // Clear cancelled GC, if set. On cancellation path, the block before would handle
 969   // everything.
 970   if (heap->cancelled_gc()) {
 971     heap->clear_cancelled_gc();
 972   }
 973 
 974   // Has to be done before cset is clear
 975   if (ShenandoahVerify) {
 976     heap->verifier()->verify_roots_in_to_space();
 977   }
 978 


















 979   heap->update_heap_region_states(true /*concurrent*/);
 980 
 981   heap->set_update_refs_in_progress(false);
 982   heap->set_has_forwarded_objects(false);
 983 




 984   if (ShenandoahVerify) {
 985     heap->verifier()->verify_after_updaterefs();
 986   }
 987 
 988   if (VerifyAfterGC) {
 989     Universe::verify();
 990   }
 991 
 992   heap->rebuild_free_set(true /*concurrent*/);
 993 }
 994 
 995 void ShenandoahConcurrentGC::op_final_roots() {
 996   ShenandoahHeap::heap()->set_concurrent_weak_root_in_progress(false);
 997 }
 998 
 999 void ShenandoahConcurrentGC::op_cleanup_complete() {
1000   ShenandoahHeap::heap()->free_set()->recycle_trash();
1001 }
1002 




1003 bool ShenandoahConcurrentGC::check_cancellation_and_abort(ShenandoahDegenPoint point) {
1004   if (ShenandoahHeap::heap()->cancelled_gc()) {
1005     _degen_point = point;
1006     return true;
1007   }
1008   return false;
1009 }
1010 
1011 const char* ShenandoahConcurrentGC::init_mark_event_message() const {
1012   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1013   assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here");
1014   if (heap->unload_classes()) {
1015     return "Pause Init Mark (unload classes)";
1016   } else {
1017     return "Pause Init Mark";
1018   }
1019 }
1020 
1021 const char* ShenandoahConcurrentGC::final_mark_event_message() const {
1022   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1023   assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here");

1024   if (heap->unload_classes()) {
1025     return "Pause Final Mark (unload classes)";
1026   } else {
1027     return "Pause Final Mark";
1028   }
1029 }
1030 
1031 const char* ShenandoahConcurrentGC::conc_mark_event_message() const {
1032   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1033   assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here");

1034   if (heap->unload_classes()) {
1035     return "Concurrent marking (unload classes)";
1036   } else {
1037     return "Concurrent marking";
1038   }
1039 }

  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 
  27 #include "gc/shared/barrierSetNMethod.hpp"
  28 #include "gc/shared/collectorCounters.hpp"
  29 #include "gc/shenandoah/shenandoahBreakpoint.hpp"
  30 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  31 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
  32 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  33 #include "gc/shenandoah/shenandoahGeneration.hpp"
  34 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
  35 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
  36 #include "gc/shenandoah/shenandoahLock.hpp"
  37 #include "gc/shenandoah/shenandoahMark.inline.hpp"
  38 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  39 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  40 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  41 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
  42 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
  43 #include "gc/shenandoah/shenandoahStackWatermark.hpp"
  44 #include "gc/shenandoah/shenandoahUtils.hpp"
  45 #include "gc/shenandoah/shenandoahVerifier.hpp"
  46 #include "gc/shenandoah/shenandoahVMOperations.hpp"
  47 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
  48 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  49 #include "memory/allocation.hpp"
  50 #include "prims/jvmtiTagMap.hpp"
  51 #include "runtime/vmThread.hpp"
  52 #include "utilities/events.hpp"
  53 
  54 // Breakpoint support
  55 class ShenandoahBreakpointGCScope : public StackObj {

  70   }
  71 };
  72 
  73 class ShenandoahBreakpointMarkScope : public StackObj {
  74 private:
  75   const GCCause::Cause _cause;
  76 public:
  77   ShenandoahBreakpointMarkScope(GCCause::Cause cause) : _cause(cause) {
  78     if (_cause == GCCause::_wb_breakpoint) {
  79       ShenandoahBreakpoint::at_after_marking_started();
  80     }
  81   }
  82 
  83   ~ShenandoahBreakpointMarkScope() {
  84     if (_cause == GCCause::_wb_breakpoint) {
  85       ShenandoahBreakpoint::at_before_marking_completed();
  86     }
  87   }
  88 };
  89 
  90 ShenandoahConcurrentGC::ShenandoahConcurrentGC(ShenandoahGeneration* generation, bool do_old_gc_bootstrap) :
  91   _mark(generation),
  92   _degen_point(ShenandoahDegenPoint::_degenerated_unset),
  93   _abbreviated(false),
  94   _do_old_gc_bootstrap(do_old_gc_bootstrap),
  95   _generation(generation) {
  96 }
  97 
  98 ShenandoahGC::ShenandoahDegenPoint ShenandoahConcurrentGC::degen_point() const {
  99   return _degen_point;
 100 }
 101 




 102 bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) {
 103   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 104   heap->start_conc_gc();
 105 
 106   ShenandoahBreakpointGCScope breakpoint_gc_scope(cause);
 107 
 108   // Reset for upcoming marking
 109   entry_reset();
 110 
 111   // Start initial mark under STW
 112   vmop_entry_init_mark();
 113 
 114   {
 115     ShenandoahBreakpointMarkScope breakpoint_mark_scope(cause);
 116 
 117     // Reset task queue stats here, rather than in mark_concurrent_roots
 118     // because remembered set scan will `push` oops into the queues and
 119     // resetting after this happens will lose those counts.
 120     TASKQUEUE_STATS_ONLY(_mark.task_queues()->reset_taskqueue_stats());
 121 
 122     // Concurrent remembered set scanning
 123     if (_generation->generation_mode() == YOUNG) {
 124       ShenandoahConcurrentPhase gc_phase("Concurrent remembered set scanning", ShenandoahPhaseTimings::init_scan_rset);
 125       _generation->scan_remembered_set();
 126     }
 127 
 128     // Concurrent mark roots
 129     entry_mark_roots();
 130     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_roots)) return false;
 131 
 132     // Continue concurrent mark
 133     entry_mark();
 134     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_mark)) return false;
 135   }
 136 
 137   // Complete marking under STW, and start evacuation
 138   vmop_entry_final_mark();
 139 
 140   // If GC was cancelled before final mark, then the safepoint operation will do nothing
 141   // and the concurrent mark will still be in progress. In this case it is safe to resume
 142   // the degenerated cycle from the marking phase. On the other hand, if the GC is cancelled
 143   // after final mark (but before this check), then the final mark safepoint operation
 144   // will have finished the mark (setting concurrent mark in progress to false). Final mark
 145   // will also have setup state (in concurrent stack processing) that will not be safe to
 146   // resume from the marking phase in the degenerated cycle. That is, if the cancellation
 147   // occurred after final mark, we must resume the degenerated cycle after the marking phase.
 148   if (_generation->is_concurrent_mark_in_progress() && check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_mark)) {
 149     assert(!heap->is_concurrent_weak_root_in_progress(), "Weak roots should not be in progress when concurrent mark is in progress");
 150     return false;
 151   }
 152 
 153   // Concurrent stack processing
 154   if (heap->is_evacuation_in_progress()) {
 155     entry_thread_roots();
 156   }
 157 
 158   // Process weak roots that might still point to regions that would be broken by cleanup
 159   if (heap->is_concurrent_weak_root_in_progress()) {
 160     entry_weak_refs();
 161     entry_weak_roots();
 162   }
 163 
 164   // Final mark might have reclaimed some immediate garbage, kick cleanup to reclaim
 165   // the space. This would be the last action if there is nothing to evacuate.  Note that
 166   // we will not age young-gen objects in the case that we skip evacuation.
 167   entry_cleanup_early();
 168 
 169   {
 170     ShenandoahHeapLocker locker(heap->lock());
 171     heap->free_set()->log_status();
 172   }
 173 
 174   // Perform concurrent class unloading
 175   if (heap->unload_classes() &&
 176       heap->is_concurrent_weak_root_in_progress()) {
 177     entry_class_unloading();
 178   }
 179 
 180   // Processing strong roots
 181   // This may be skipped if there is nothing to update/evacuate.
 182   // If so, strong_root_in_progress would be unset.
 183   if (heap->is_concurrent_strong_root_in_progress()) {
 184     entry_strong_roots();
 185   }
 186 
 187   // Global marking has completed. We need to fill in any unmarked objects in the old generation
 188   // so that subsequent remembered set scans will not walk pointers into reclaimed memory.
 189   if (!heap->cancelled_gc() && heap->mode()->is_generational() && _generation->generation_mode() == GLOBAL) {
 190     entry_global_coalesce_and_fill();
 191   }
 192 
 193   // Continue the cycle with evacuation and optional update-refs.
 194   // This may be skipped if there is nothing to evacuate.
 195   // If so, evac_in_progress would be unset by collection set preparation code.
 196   if (heap->is_evacuation_in_progress()) {
 197     // Concurrently evacuate
 198     entry_evacuate();
 199     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_evac)) return false;
 200 
 201     // Perform update-refs phase.
 202     vmop_entry_init_updaterefs();
 203     entry_updaterefs();
 204     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_updaterefs)) return false;
 205 
 206     // Concurrent update thread roots
 207     entry_update_thread_roots();
 208     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_updaterefs)) return false;
 209 
 210     vmop_entry_final_updaterefs();
 211 
 212     // Update references freed up collection set, kick the cleanup to reclaim the space.
 213     entry_cleanup_complete();
 214   } else {
 215     // We chose not to evacuate because we found sufficient immediate garbage.
 216     vmop_entry_final_roots(heap->is_aging_cycle());
 217     _abbreviated = true;
 218   }
 219 
 220   size_t old_available, young_available;
 221   {
 222     ShenandoahYoungGeneration* young_gen = heap->young_generation();
 223     ShenandoahGeneration* old_gen = heap->old_generation();
 224     ShenandoahHeapLocker locker(heap->lock());
 225 
 226     size_t old_usage_before_evac = heap->capture_old_usage(0);
 227     size_t old_usage_now = old_gen->used();
 228     size_t promoted_bytes = old_usage_now - old_usage_before_evac;
 229     heap->set_previous_promotion(promoted_bytes);
 230 
 231     young_gen->unadjust_available();
 232     old_gen->unadjust_available();
 233     // No need to old_gen->increase_used().  That was done when plabs were allocated, accounting for both old evacs and promotions.
 234 
 235     young_available = young_gen->adjusted_available();
 236     old_available = old_gen->adjusted_available();
 237 
 238     heap->set_alloc_supplement_reserve(0);
 239     heap->set_young_evac_reserve(0);
 240     heap->set_old_evac_reserve(0);
 241     heap->reset_old_evac_expended();
 242     heap->set_promotion_reserve(0);
 243   }
 244   log_info(gc, ergo)("At end of concurrent GC, old_available: " SIZE_FORMAT "%s, young_available: " SIZE_FORMAT "%s",
 245                      byte_size_in_proper_unit(old_available), proper_unit_for_byte_size(old_available),
 246                      byte_size_in_proper_unit(young_available), proper_unit_for_byte_size(young_available));
 247 
 248   return true;
 249 }
 250 
 251 void ShenandoahConcurrentGC::vmop_entry_init_mark() {
 252   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 253   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 254   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_mark_gross);
 255 
 256   heap->try_inject_alloc_failure();
 257   VM_ShenandoahInitMark op(this, _do_old_gc_bootstrap);
 258   VMThread::execute(&op); // jump to entry_init_mark() under safepoint
 259 }
 260 
 261 void ShenandoahConcurrentGC::vmop_entry_final_mark() {
 262   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 263   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 264   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_mark_gross);
 265 
 266   heap->try_inject_alloc_failure();
 267   VM_ShenandoahFinalMarkStartEvac op(this);
 268   VMThread::execute(&op); // jump to entry_final_mark under safepoint
 269 }
 270 
 271 void ShenandoahConcurrentGC::vmop_entry_init_updaterefs() {
 272   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 273   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 274   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_update_refs_gross);
 275 
 276   heap->try_inject_alloc_failure();
 277   VM_ShenandoahInitUpdateRefs op(this);
 278   VMThread::execute(&op);
 279 }
 280 
 281 void ShenandoahConcurrentGC::vmop_entry_final_updaterefs() {
 282   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 283   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 284   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_update_refs_gross);
 285 
 286   heap->try_inject_alloc_failure();
 287   VM_ShenandoahFinalUpdateRefs op(this);
 288   VMThread::execute(&op);
 289 }
 290 
 291 void ShenandoahConcurrentGC::vmop_entry_final_roots(bool increment_region_ages) {
 292   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 293   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 294   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_roots_gross);
 295 
 296   // This phase does not use workers, no need for setup
 297   heap->try_inject_alloc_failure();
 298   VM_ShenandoahFinalRoots op(this, increment_region_ages);
 299   VMThread::execute(&op);
 300 }
 301 
 302 void ShenandoahConcurrentGC::entry_init_mark() {
 303   char msg[1024];
 304   init_mark_event_message(msg, sizeof(msg));
 305   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::init_mark);
 306   EventMark em("%s", msg);
 307 
 308   ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
 309                               ShenandoahWorkerPolicy::calc_workers_for_init_marking(),
 310                               "init marking");
 311 
 312   op_init_mark();
 313 }
 314 
 315 void ShenandoahConcurrentGC::entry_final_mark() {
 316   char msg[1024];
 317   final_mark_event_message(msg, sizeof(msg));
 318   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_mark);
 319   EventMark em("%s", msg);
 320 
 321   ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
 322                               ShenandoahWorkerPolicy::calc_workers_for_final_marking(),
 323                               "final marking");
 324 
 325   op_final_mark();
 326 }
 327 
 328 void ShenandoahConcurrentGC::entry_init_updaterefs() {
 329   static const char* msg = "Pause Init Update Refs";
 330   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::init_update_refs);
 331   EventMark em("%s", msg);
 332 
 333   // No workers used in this phase, no setup required
 334   op_init_updaterefs();
 335 }
 336 
 337 void ShenandoahConcurrentGC::entry_final_updaterefs() {

 368   heap->try_inject_alloc_failure();
 369   op_reset();
 370 }
 371 
 372 void ShenandoahConcurrentGC::entry_mark_roots() {
 373   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 374   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 375   const char* msg = "Concurrent marking roots";
 376   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_mark_roots);
 377   EventMark em("%s", msg);
 378 
 379   ShenandoahWorkerScope scope(heap->workers(),
 380                               ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
 381                               "concurrent marking roots");
 382 
 383   heap->try_inject_alloc_failure();
 384   op_mark_roots();
 385 }
 386 
 387 void ShenandoahConcurrentGC::entry_mark() {
 388   char msg[1024];
 389   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 390   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 391   conc_mark_event_message(msg, sizeof(msg));
 392   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_mark);
 393   EventMark em("%s", msg);
 394 
 395   ShenandoahWorkerScope scope(heap->workers(),
 396                               ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
 397                               "concurrent marking");
 398 
 399   heap->try_inject_alloc_failure();
 400   op_mark();
 401 }
 402 
 403 void ShenandoahConcurrentGC::entry_thread_roots() {
 404   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 405   static const char* msg = "Concurrent thread roots";
 406   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_thread_roots);
 407   EventMark em("%s", msg);
 408 
 409   ShenandoahWorkerScope scope(heap->workers(),
 410                               ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
 411                               msg);

 526   ShenandoahWorkerScope scope(heap->workers(),
 527                               ShenandoahWorkerPolicy::calc_workers_for_conc_update_ref(),
 528                               "concurrent reference update");
 529 
 530   heap->try_inject_alloc_failure();
 531   op_updaterefs();
 532 }
 533 
 534 void ShenandoahConcurrentGC::entry_cleanup_complete() {
 535   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 536   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 537   static const char* msg = "Concurrent cleanup";
 538   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_cleanup_complete, true /* log_heap_usage */);
 539   EventMark em("%s", msg);
 540 
 541   // This phase does not use workers, no need for setup
 542   heap->try_inject_alloc_failure();
 543   op_cleanup_complete();
 544 }
 545 
 546 void ShenandoahConcurrentGC::entry_global_coalesce_and_fill() {
 547   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 548 
 549   const char* msg = "Coalescing and filling old regions in global collect";
 550   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::coalesce_and_fill);
 551 
 552   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 553   EventMark em("%s", msg);
 554   ShenandoahWorkerScope scope(heap->workers(),
 555                               ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
 556                               "concurrent coalesce and fill");
 557 
 558   op_global_coalesce_and_fill();
 559 }
 560 
 561 void ShenandoahConcurrentGC::op_reset() {
 562   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 563   if (ShenandoahPacing) {
 564     heap->pacer()->setup_for_reset();
 565   }
 566   _generation->prepare_gc(_do_old_gc_bootstrap);

 567 }
 568 
 569 class ShenandoahInitMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
 570 private:
 571   ShenandoahMarkingContext* const _ctx;
 572 public:
 573   ShenandoahInitMarkUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
 574 
 575   void heap_region_do(ShenandoahHeapRegion* r) {
 576     assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index());
 577     if (r->is_active()) {
 578       // Check if region needs updating its TAMS. We have updated it already during concurrent
 579       // reset, so it is very likely we don't need to do another write here.  Since most regions
 580       // are not "active", this path is relatively rare.
 581       if (_ctx->top_at_mark_start(r) != r->top()) {
 582         _ctx->capture_top_at_mark_start(r);
 583       }
 584     } else {
 585       assert(_ctx->top_at_mark_start(r) == r->top(),
 586              "Region " SIZE_FORMAT " should already have correct TAMS", r->index());
 587     }
 588   }
 589 
 590   bool is_thread_safe() { return true; }
 591 };
 592 
 593 void ShenandoahConcurrentGC::op_init_mark() {
 594   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 595   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
 596   assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
 597 
 598   assert(_generation->is_bitmap_clear(), "need clear marking bitmap");
 599   assert(!_generation->is_mark_complete(), "should not be complete");
 600   assert(!heap->has_forwarded_objects(), "No forwarded objects on this path");
 601 
 602 
 603   if (heap->mode()->is_generational()) {
 604       if (_generation->generation_mode() == YOUNG || (_generation->generation_mode() == GLOBAL && ShenandoahVerify)) {
 605       // The current implementation of swap_remembered_set() copies the write-card-table
 606       // to the read-card-table. The remembered sets are also swapped for GLOBAL collections
 607       // so that the verifier works with the correct copy of the card table when verifying.
 608         ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_swap_rset);
 609         _generation->swap_remembered_set();
 610     }
 611 
 612     if (_generation->generation_mode() == GLOBAL) {
 613       heap->cancel_old_gc();
 614     } else if (heap->is_concurrent_old_mark_in_progress()) {
 615       // Purge the SATB buffers, transferring any valid, old pointers to the
 616       // old generation mark queue. Any pointers in a young region will be
 617       // abandoned.
 618       ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_transfer_satb);
 619       heap->transfer_old_pointers_from_satb();
 620     }
 621   }
 622 
 623   if (ShenandoahVerify) {
 624     heap->verifier()->verify_before_concmark();
 625   }
 626 
 627   if (VerifyBeforeGC) {
 628     Universe::verify();
 629   }
 630 
 631   _generation->set_concurrent_mark_in_progress(true);
 632 
 633   if (_do_old_gc_bootstrap) {
 634     // Update region state for both young and old regions
 635     ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states);
 636     ShenandoahInitMarkUpdateRegionStateClosure cl;
 637     heap->parallel_heap_region_iterate(&cl);
 638     heap->old_generation()->parallel_heap_region_iterate(&cl);
 639   } else {
 640     // Update region state for only young regions
 641     ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states);
 642     ShenandoahInitMarkUpdateRegionStateClosure cl;
 643     _generation->parallel_heap_region_iterate(&cl);
 644   }
 645 
 646   // Weak reference processing
 647   ShenandoahReferenceProcessor* rp = _generation->ref_processor();
 648   rp->reset_thread_locals();
 649   rp->set_soft_reference_policy(heap->soft_ref_policy()->should_clear_all_soft_refs());
 650 
 651   // Make above changes visible to worker threads
 652   OrderAccess::fence();
 653 
 654   // Arm nmethods for concurrent marking. When a nmethod is about to be executed,
 655   // we need to make sure that all its metadata are marked. alternative is to remark
 656   // thread roots at final mark pause, but it can be potential latency killer.
 657   if (heap->unload_classes()) {
 658     ShenandoahCodeRoots::arm_nmethods();
 659   }
 660 
 661   ShenandoahStackWatermark::change_epoch_id();
 662   if (ShenandoahPacing) {
 663     heap->pacer()->setup_for_mark();
 664   }
 665 }
 666 
 667 void ShenandoahConcurrentGC::op_mark_roots() {
 668   _mark.mark_concurrent_roots();
 669 }
 670 
 671 void ShenandoahConcurrentGC::op_mark() {
 672   _mark.concurrent_mark();
 673 }
 674 
 675 void ShenandoahConcurrentGC::op_final_mark() {
 676   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 677   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
 678   assert(!heap->has_forwarded_objects(), "No forwarded objects on this path");
 679 
 680   if (ShenandoahVerify) {
 681     heap->verifier()->verify_roots_no_forwarded();
 682   }
 683 
 684   if (!heap->cancelled_gc()) {
 685     _mark.finish_mark();
 686     assert(!heap->cancelled_gc(), "STW mark cannot OOM");
 687 
 688     // Notify JVMTI that the tagmap table will need cleaning.
 689     JvmtiTagMap::set_needs_cleaning();
 690 
 691     // The collection set is chosen by prepare_regions_and_collection_set().
 692     //
 693     // TODO: Under severe memory overload conditions that can be checked here, we may want to limit
 694     // the inclusion of old-gen candidates within the collection set.  This would allow us to prioritize efforts on
 695     // evacuating young-gen,  This remediation is most appropriate when old-gen availability is very high (so there
 696     // are negligible negative impacts from delaying completion of old-gen evacuation) and when young-gen collections
 697     // are "under duress" (as signalled by very low availability of memory within young-gen, indicating that/ young-gen
 698     // collections are not triggering frequently enough).
 699     _generation->prepare_regions_and_collection_set(true /*concurrent*/);
 700 
 701     // Upon return from prepare_regions_and_collection_set(), certain parameters have been established to govern the
 702     // evacuation efforts that are about to begin.  In particular:
 703     //
 704     // heap->get_promotion_reserve() represents the amount of memory within old-gen's available memory that has
 705     //   been set aside to hold objects promoted from young-gen memory.  This represents an estimated percentage
 706     //   of the live young-gen memory within the collection set.  If there is more data ready to be promoted than
 707     //   can fit within this reserve, the promotion of some objects will be deferred until a subsequent evacuation
 708     //   pass.
 709     //
 710     // heap->get_old_evac_reserve() represents the amount of memory within old-gen's available memory that has been
 711     //  set aside to hold objects evacuated from the old-gen collection set.
 712     //
 713     // heap->get_young_evac_reserve() represents the amount of memory within young-gen's available memory that has
 714     //  been set aside to hold objects evacuated from the young-gen collection set.  Conservatively, this value
 715     //  equals the entire amount of live young-gen memory within the collection set, even though some of this memory
 716     //  will likely be promoted.
 717     //
 718     // heap->get_alloc_supplement_reserve() represents the amount of old-gen memory that can be allocated during evacuation
 719     // and update-refs phases of gc.  The young evacuation reserve has already been removed from this quantity.
 720 
 721     // Has to be done after cset selection
 722     heap->prepare_concurrent_roots();
 723 
 724     if (!heap->collection_set()->is_empty()) {
 725       if (ShenandoahVerify) {
 726         heap->verifier()->verify_before_evacuation();
 727       }
 728 
 729       heap->set_evacuation_in_progress(true);
 730       // From here on, we need to update references.
 731       heap->set_has_forwarded_objects(true);
 732 
 733       // Verify before arming for concurrent processing.
 734       // Otherwise, verification can trigger stack processing.
 735       if (ShenandoahVerify) {
 736         heap->verifier()->verify_during_evacuation();
 737       }
 738 
 739       // Arm nmethods/stack for concurrent processing
 740       ShenandoahCodeRoots::arm_nmethods();
 741       ShenandoahStackWatermark::change_epoch_id();
 742 
 743       // Notify JVMTI that oops are changed.
 744       JvmtiTagMap::set_needs_rehashing();
 745 
 746       if (heap->mode()->is_generational()) {
 747         // Calculate the temporary evacuation allowance supplement to young-gen memory capacity (for allocations
 748         // and young-gen evacuations).
 749         size_t young_available = heap->young_generation()->adjust_available(heap->get_alloc_supplement_reserve());
 750         // old_available is memory that can hold promotions and evacuations.  Subtract out the memory that is being
 751         // loaned for young-gen allocations or evacuations.
 752         size_t old_available = heap->old_generation()->adjust_available(-heap->get_alloc_supplement_reserve());
 753 
 754         log_info(gc, ergo)("After generational memory budget adjustments, old avaiable: " SIZE_FORMAT
 755                            "%s, young_available: " SIZE_FORMAT "%s",
 756                            byte_size_in_proper_unit(old_available), proper_unit_for_byte_size(old_available),
 757                            byte_size_in_proper_unit(young_available), proper_unit_for_byte_size(young_available));
 758       }
 759 
 760       if (ShenandoahPacing) {
 761         heap->pacer()->setup_for_evac();
 762       }
 763     } else {
 764       if (ShenandoahVerify) {
 765         heap->verifier()->verify_after_concmark();
 766       }
 767 
 768       if (VerifyAfterGC) {
 769         Universe::verify();
 770       }
 771     }
 772   }
 773 }
 774 
 775 class ShenandoahConcurrentEvacThreadClosure : public ThreadClosure {
 776 private:
 777   OopClosure* const _oops;
 778 
 779 public:

 808     _java_threads.threads_do(&thr_cl, worker_id);
 809   }
 810 };
 811 
 812 void ShenandoahConcurrentGC::op_thread_roots() {
 813   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 814   assert(heap->is_evacuation_in_progress(), "Checked by caller");
 815   ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_thread_roots);
 816   ShenandoahConcurrentEvacUpdateThreadTask task(heap->workers()->active_workers());
 817   heap->workers()->run_task(&task);
 818 }
 819 
 820 void ShenandoahConcurrentGC::op_weak_refs() {
 821   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 822   assert(heap->is_concurrent_weak_root_in_progress(), "Only during this phase");
 823   // Concurrent weak refs processing
 824   ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_refs);
 825   if (heap->gc_cause() == GCCause::_wb_breakpoint) {
 826     ShenandoahBreakpoint::at_after_reference_processing_started();
 827   }
 828   _generation->ref_processor()->process_references(ShenandoahPhaseTimings::conc_weak_refs, heap->workers(), true /* concurrent */);
 829 }
 830 
 831 class ShenandoahEvacUpdateCleanupOopStorageRootsClosure : public BasicOopIterateClosure {
 832 private:
 833   ShenandoahHeap* const _heap;
 834   ShenandoahMarkingContext* const _mark_context;
 835   bool  _evac_in_progress;
 836   Thread* const _thread;
 837 
 838 public:
 839   ShenandoahEvacUpdateCleanupOopStorageRootsClosure();
 840   void do_oop(oop* p);
 841   void do_oop(narrowOop* p);
 842 };
 843 
 844 ShenandoahEvacUpdateCleanupOopStorageRootsClosure::ShenandoahEvacUpdateCleanupOopStorageRootsClosure() :
 845   _heap(ShenandoahHeap::heap()),
 846   _mark_context(ShenandoahHeap::heap()->marking_context()),
 847   _evac_in_progress(ShenandoahHeap::heap()->is_evacuation_in_progress()),
 848   _thread(Thread::current()) {
 849 }
 850 
 851 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(oop* p) {
 852   const oop obj = RawAccess<>::oop_load(p);
 853   if (!CompressedOops::is_null(obj)) {
 854     if (!_mark_context->is_marked(obj)) {
 855       if (_heap->is_in_active_generation(obj)) {
 856         // TODO: This worries me. Here we are asserting that an unmarked from-space object is 'correct'.
 857         // Normally, I would call this a bogus assert, but there seems to be a legitimate use-case for
 858         // accessing from-space objects during class unloading. However, the from-space object may have
 859         // been "filled". We've made no effort to prevent old generation classes being unloaded by young
 860         // gen (and vice-versa).
 861         shenandoah_assert_correct(p, obj);
 862         ShenandoahHeap::atomic_clear_oop(p, obj);
 863       }
 864     } else if (_evac_in_progress && _heap->in_collection_set(obj)) {
 865       oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
 866       if (resolved == obj) {
 867         resolved = _heap->evacuate_object(obj, _thread);
 868       }
 869       ShenandoahHeap::atomic_update_oop(resolved, p, obj);
 870       assert(_heap->cancelled_gc() ||
 871              _mark_context->is_marked(resolved) && !_heap->in_collection_set(resolved),
 872              "Sanity");
 873     }
 874   }
 875 }
 876 
 877 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(narrowOop* p) {
 878   ShouldNotReachHere();
 879 }
 880 
 881 class ShenandoahIsCLDAliveClosure : public CLDClosure {
 882 public:
 883   void do_cld(ClassLoaderData* cld) {

1068   assert(heap->is_concurrent_strong_root_in_progress(), "Checked by caller");
1069   ShenandoahConcurrentRootsEvacUpdateTask task(ShenandoahPhaseTimings::conc_strong_roots);
1070   heap->workers()->run_task(&task);
1071   heap->set_concurrent_strong_root_in_progress(false);
1072 }
1073 
1074 void ShenandoahConcurrentGC::op_cleanup_early() {
1075   ShenandoahHeap::heap()->free_set()->recycle_trash();
1076 }
1077 
1078 void ShenandoahConcurrentGC::op_evacuate() {
1079   ShenandoahHeap::heap()->evacuate_collection_set(true /*concurrent*/);
1080 }
1081 
1082 void ShenandoahConcurrentGC::op_init_updaterefs() {
1083   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1084   heap->set_evacuation_in_progress(false);
1085   heap->set_concurrent_weak_root_in_progress(false);
1086   heap->prepare_update_heap_references(true /*concurrent*/);
1087   heap->set_update_refs_in_progress(true);
1088   if (ShenandoahVerify) {
1089     heap->verifier()->verify_before_updaterefs();
1090   }
1091   if (ShenandoahPacing) {
1092     heap->pacer()->setup_for_updaterefs();
1093   }
1094 }
1095 
1096 void ShenandoahConcurrentGC::op_updaterefs() {
1097   ShenandoahHeap::heap()->update_heap_references(true /*concurrent*/);
1098 }
1099 
1100 class ShenandoahUpdateThreadClosure : public HandshakeClosure {
1101 private:
1102   ShenandoahUpdateRefsClosure _cl;
1103 public:
1104   ShenandoahUpdateThreadClosure();
1105   void do_thread(Thread* thread);
1106 };
1107 
1108 ShenandoahUpdateThreadClosure::ShenandoahUpdateThreadClosure() :
1109   HandshakeClosure("Shenandoah Update Thread Roots") {
1110 }

1115     ResourceMark rm;
1116     jt->oops_do(&_cl, NULL);
1117   }
1118 }
1119 
1120 void ShenandoahConcurrentGC::op_update_thread_roots() {
1121   ShenandoahUpdateThreadClosure cl;
1122   Handshake::execute(&cl);
1123 }
1124 
1125 void ShenandoahConcurrentGC::op_final_updaterefs() {
1126   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1127   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
1128   assert(!heap->_update_refs_iterator.has_next(), "Should have finished update references");
1129 
1130   heap->finish_concurrent_roots();
1131 
1132   // Clear cancelled GC, if set. On cancellation path, the block before would handle
1133   // everything.
1134   if (heap->cancelled_gc()) {
1135     heap->clear_cancelled_gc(true /* clear oom handler */);
1136   }
1137 
1138   // Has to be done before cset is clear
1139   if (ShenandoahVerify) {
1140     heap->verifier()->verify_roots_in_to_space();
1141   }
1142 
1143   if (heap->mode()->is_generational() && heap->is_concurrent_old_mark_in_progress()) {
1144     // When the SATB barrier is left on to support concurrent old gen mark, it may pick up writes to
1145     // objects in the collection set. After those objects are evacuated, the pointers in the
1146     // SATB are no longer safe. Once we have finished update references, we are guaranteed that
1147     // no more writes to the collection set are possible.
1148     //
1149     // This will transfer any old pointers in _active_ regions from the SATB to the old gen
1150     // mark queues. All other pointers will be discarded. This would also discard any pointers
1151     // in old regions that were included in a mixed evacuation. We aren't using the SATB filter
1152     // methods here because we cannot control when they execute. If the SATB filter runs _after_
1153     // a region has been recycled, we will not be able to detect the bad pointer.
1154     //
1155     // We are not concerned about skipping this step in abbreviated cycles because regions
1156     // with no live objects cannot have been written to and so cannot have entries in the SATB
1157     // buffers.
1158     heap->transfer_old_pointers_from_satb();
1159   }
1160 
1161   heap->update_heap_region_states(true /*concurrent*/);
1162 
1163   heap->set_update_refs_in_progress(false);
1164   heap->set_has_forwarded_objects(false);
1165 
1166   // Aging_cycle is only relevant during evacuation cycle for individual objects and during final mark for
1167   // entire regions.  Both of these relevant operations occur before final update refs.
1168   heap->set_aging_cycle(false);
1169 
1170   if (ShenandoahVerify) {
1171     heap->verifier()->verify_after_updaterefs();
1172   }
1173 
1174   if (VerifyAfterGC) {
1175     Universe::verify();
1176   }
1177 
1178   heap->rebuild_free_set(true /*concurrent*/);
1179 }
1180 
1181 void ShenandoahConcurrentGC::op_final_roots() {
1182   ShenandoahHeap::heap()->set_concurrent_weak_root_in_progress(false);
1183 }
1184 
1185 void ShenandoahConcurrentGC::op_cleanup_complete() {
1186   ShenandoahHeap::heap()->free_set()->recycle_trash();
1187 }
1188 
1189 void ShenandoahConcurrentGC::op_global_coalesce_and_fill() {
1190   ShenandoahHeap::heap()->coalesce_and_fill_old_regions();
1191 }
1192 
1193 bool ShenandoahConcurrentGC::check_cancellation_and_abort(ShenandoahDegenPoint point) {
1194   if (ShenandoahHeap::heap()->cancelled_gc()) {
1195     _degen_point = point;
1196     return true;
1197   }
1198   return false;
1199 }
1200 
1201 void ShenandoahConcurrentGC::init_mark_event_message(char* buf, size_t len) const {
1202   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1203   assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here");
1204   if (heap->unload_classes()) {
1205     jio_snprintf(buf, len, "Pause Init Mark (%s) (unload classes)", _generation->name());
1206   } else {
1207     jio_snprintf(buf, len, "Pause Init Mark (%s)", _generation->name());
1208   }
1209 }
1210 
1211 void ShenandoahConcurrentGC::final_mark_event_message(char* buf, size_t len) const {
1212   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1213   assert(!heap->has_forwarded_objects() || heap->is_concurrent_old_mark_in_progress(),
1214          "Should not have forwarded objects during final mark (unless old gen concurrent mark is running)");
1215   if (heap->unload_classes()) {
1216     jio_snprintf(buf, len, "Pause Final Mark (%s) (unload classes)", _generation->name());
1217   } else {
1218     jio_snprintf(buf, len, "Pause Final Mark (%s)", _generation->name());
1219   }
1220 }
1221 
1222 void ShenandoahConcurrentGC::conc_mark_event_message(char* buf, size_t len) const {
1223   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1224   assert(!heap->has_forwarded_objects() || heap->is_concurrent_old_mark_in_progress(),
1225          "Should not have forwarded objects concurrent mark (unless old gen concurrent mark is running");
1226   if (heap->unload_classes()) {
1227     jio_snprintf(buf, len, "Concurrent marking (%s) (unload classes)", _generation->name());
1228   } else {
1229     jio_snprintf(buf, len, "Concurrent marking (%s)", _generation->name());
1230   }
1231 }
< prev index next >