< prev index next >

src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp

Print this page

   1 /*
   2  * Copyright (c) 2021, 2022, Red Hat, Inc. All rights reserved.

   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 
  27 #include "gc/shared/barrierSetNMethod.hpp"
  28 #include "gc/shared/collectorCounters.hpp"
  29 #include "gc/shared/continuationGCSupport.inline.hpp"
  30 #include "gc/shenandoah/shenandoahBreakpoint.hpp"
  31 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  32 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
  33 #include "gc/shenandoah/shenandoahFreeSet.hpp"




  34 #include "gc/shenandoah/shenandoahLock.hpp"
  35 #include "gc/shenandoah/shenandoahMark.inline.hpp"
  36 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  37 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  38 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  39 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
  40 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
  41 #include "gc/shenandoah/shenandoahStackWatermark.hpp"
  42 #include "gc/shenandoah/shenandoahUtils.hpp"
  43 #include "gc/shenandoah/shenandoahVerifier.hpp"
  44 #include "gc/shenandoah/shenandoahVMOperations.hpp"
  45 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
  46 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  47 #include "memory/allocation.hpp"
  48 #include "prims/jvmtiTagMap.hpp"
  49 #include "runtime/vmThread.hpp"
  50 #include "utilities/events.hpp"
  51 
  52 // Breakpoint support
  53 class ShenandoahBreakpointGCScope : public StackObj {

  68   }
  69 };
  70 
  71 class ShenandoahBreakpointMarkScope : public StackObj {
  72 private:
  73   const GCCause::Cause _cause;
  74 public:
  75   ShenandoahBreakpointMarkScope(GCCause::Cause cause) : _cause(cause) {
  76     if (_cause == GCCause::_wb_breakpoint) {
  77       ShenandoahBreakpoint::at_after_marking_started();
  78     }
  79   }
  80 
  81   ~ShenandoahBreakpointMarkScope() {
  82     if (_cause == GCCause::_wb_breakpoint) {
  83       ShenandoahBreakpoint::at_before_marking_completed();
  84     }
  85   }
  86 };
  87 
  88 ShenandoahConcurrentGC::ShenandoahConcurrentGC() :
  89   _mark(),
  90   _degen_point(ShenandoahDegenPoint::_degenerated_unset),
  91   _abbreviated(false) {


  92 }
  93 
  94 ShenandoahGC::ShenandoahDegenPoint ShenandoahConcurrentGC::degen_point() const {
  95   return _degen_point;
  96 }
  97 
  98 void ShenandoahConcurrentGC::cancel() {
  99   ShenandoahConcurrentMark::cancel();
 100 }
 101 
 102 bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) {
 103   ShenandoahHeap* const heap = ShenandoahHeap::heap();

 104   ShenandoahBreakpointGCScope breakpoint_gc_scope(cause);
 105 
 106   // Reset for upcoming marking
 107   entry_reset();
 108 
 109   // Start initial mark under STW
 110   vmop_entry_init_mark();
 111 
 112   {
 113     ShenandoahBreakpointMarkScope breakpoint_mark_scope(cause);










 114     // Concurrent mark roots
 115     entry_mark_roots();
 116     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_outside_cycle)) {
 117       return false;
 118     }
 119 
 120     // Continue concurrent mark
 121     entry_mark();
 122     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_mark)) {
 123       return false;
 124     }
 125   }
 126 
 127   // Complete marking under STW, and start evacuation
 128   vmop_entry_final_mark();
 129 
 130   // If the GC was cancelled before final mark, nothing happens on the safepoint. We are still
 131   // in the marking phase and must resume the degenerated cycle from there. If the GC was cancelled
 132   // after final mark, then we've entered the evacuation phase and must resume the degenerated cycle
 133   // from that phase.
 134   if (heap->is_concurrent_mark_in_progress()) {
 135     bool cancelled = check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_mark);
 136     assert(cancelled, "GC must have been cancelled between concurrent and final mark");
 137     return false;
 138   }
 139 
 140   // Concurrent stack processing
 141   if (heap->is_evacuation_in_progress()) {
 142     entry_thread_roots();
 143   }
 144 
 145   // Process weak roots that might still point to regions that would be broken by cleanup
 146   if (heap->is_concurrent_weak_root_in_progress()) {
 147     entry_weak_refs();
 148     entry_weak_roots();
 149   }
 150 
 151   // Final mark might have reclaimed some immediate garbage, kick cleanup to reclaim
 152   // the space. This would be the last action if there is nothing to evacuate.

 153   entry_cleanup_early();
 154 
 155   {


 156     ShenandoahHeapLocker locker(heap->lock());
 157     heap->free_set()->log_status();
 158   }
 159 
 160   // Perform concurrent class unloading
 161   if (heap->unload_classes() &&
 162       heap->is_concurrent_weak_root_in_progress()) {
 163     entry_class_unloading();
 164   }
 165 
 166   // Processing strong roots
 167   // This may be skipped if there is nothing to update/evacuate.
 168   // If so, strong_root_in_progress would be unset.
 169   if (heap->is_concurrent_strong_root_in_progress()) {
 170     entry_strong_roots();
 171   }
 172 
 173   // Continue the cycle with evacuation and optional update-refs.
 174   // This may be skipped if there is nothing to evacuate.
 175   // If so, evac_in_progress would be unset by collection set preparation code.
 176   if (heap->is_evacuation_in_progress()) {
 177     // Concurrently evacuate
 178     entry_evacuate();
 179     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_evac)) {
 180       return false;
 181     }

 182 

 183     // Perform update-refs phase.
 184     vmop_entry_init_updaterefs();
 185     entry_updaterefs();
 186     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_updaterefs)) {
 187       return false;
 188     }
 189 
 190     // Concurrent update thread roots
 191     entry_update_thread_roots();
 192     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_updaterefs)) {
 193       return false;
 194     }
 195 
 196     vmop_entry_final_updaterefs();
 197 
 198     // Update references freed up collection set, kick the cleanup to reclaim the space.
 199     entry_cleanup_complete();
 200   } else {




 201     vmop_entry_final_roots();
 202     _abbreviated = true;
 203   }
 204 





 205   return true;
 206 }
 207 
 208 void ShenandoahConcurrentGC::vmop_entry_init_mark() {
 209   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 210   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 211   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_mark_gross);
 212 
 213   heap->try_inject_alloc_failure();
 214   VM_ShenandoahInitMark op(this);
 215   VMThread::execute(&op); // jump to entry_init_mark() under safepoint
 216 }
 217 
 218 void ShenandoahConcurrentGC::vmop_entry_final_mark() {
 219   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 220   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 221   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_mark_gross);
 222 
 223   heap->try_inject_alloc_failure();
 224   VM_ShenandoahFinalMarkStartEvac op(this);

 294   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_update_refs);
 295   EventMark em("%s", msg);
 296 
 297   ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
 298                               ShenandoahWorkerPolicy::calc_workers_for_final_update_ref(),
 299                               "final reference update");
 300 
 301   op_final_updaterefs();
 302 }
 303 
 304 void ShenandoahConcurrentGC::entry_final_roots() {
 305   static const char* msg = "Pause Final Roots";
 306   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_roots);
 307   EventMark em("%s", msg);
 308 
 309   op_final_roots();
 310 }
 311 
 312 void ShenandoahConcurrentGC::entry_reset() {
 313   ShenandoahHeap* const heap = ShenandoahHeap::heap();


 314   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 315   static const char* msg = "Concurrent reset";
 316   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_reset);
 317   EventMark em("%s", msg);







 318 
 319   ShenandoahWorkerScope scope(heap->workers(),
 320                               ShenandoahWorkerPolicy::calc_workers_for_conc_reset(),
 321                               "concurrent reset");




 322 
 323   heap->try_inject_alloc_failure();
 324   op_reset();

















 325 }
 326 
 327 void ShenandoahConcurrentGC::entry_mark_roots() {
 328   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 329   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 330   const char* msg = "Concurrent marking roots";
 331   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_mark_roots);
 332   EventMark em("%s", msg);
 333 
 334   ShenandoahWorkerScope scope(heap->workers(),
 335                               ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
 336                               "concurrent marking roots");
 337 
 338   heap->try_inject_alloc_failure();
 339   op_mark_roots();
 340 }
 341 
 342 void ShenandoahConcurrentGC::entry_mark() {
 343   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 344   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());

 485   op_updaterefs();
 486 }
 487 
 488 void ShenandoahConcurrentGC::entry_cleanup_complete() {
 489   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 490   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 491   static const char* msg = "Concurrent cleanup";
 492   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_cleanup_complete, true /* log_heap_usage */);
 493   EventMark em("%s", msg);
 494 
 495   // This phase does not use workers, no need for setup
 496   heap->try_inject_alloc_failure();
 497   op_cleanup_complete();
 498 }
 499 
 500 void ShenandoahConcurrentGC::op_reset() {
 501   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 502   if (ShenandoahPacing) {
 503     heap->pacer()->setup_for_reset();
 504   }
 505 
 506   heap->prepare_gc();
 507 }
 508 
 509 class ShenandoahInitMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
 510 private:
 511   ShenandoahMarkingContext* const _ctx;
 512 public:
 513   ShenandoahInitMarkUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
 514 
 515   void heap_region_do(ShenandoahHeapRegion* r) {
 516     assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index());
 517     if (r->is_active()) {
 518       // Check if region needs updating its TAMS. We have updated it already during concurrent
 519       // reset, so it is very likely we don't need to do another write here.

 520       if (_ctx->top_at_mark_start(r) != r->top()) {
 521         _ctx->capture_top_at_mark_start(r);
 522       }
 523     } else {
 524       assert(_ctx->top_at_mark_start(r) == r->top(),
 525              "Region " SIZE_FORMAT " should already have correct TAMS", r->index());
 526     }
 527   }
 528 
 529   bool is_thread_safe() { return true; }
 530 };
 531 
 532 void ShenandoahConcurrentGC::start_mark() {
 533   _mark.start_mark();
 534 }
 535 
 536 void ShenandoahConcurrentGC::op_init_mark() {
 537   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 538   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
 539   assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
 540 
 541   assert(heap->marking_context()->is_bitmap_clear(), "need clear marking bitmap");
 542   assert(!heap->marking_context()->is_complete(), "should not be complete");
 543   assert(!heap->has_forwarded_objects(), "No forwarded objects on this path");
 544 






















 545   if (ShenandoahVerify) {
 546     heap->verifier()->verify_before_concmark();
 547   }
 548 
 549   if (VerifyBeforeGC) {
 550     Universe::verify();
 551   }
 552 
 553   heap->set_concurrent_mark_in_progress(true);
 554 
 555   start_mark();
 556 
 557   {






 558     ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states);
 559     ShenandoahInitMarkUpdateRegionStateClosure cl;
 560     heap->parallel_heap_region_iterate(&cl);






 561   }
 562 
 563   // Weak reference processing
 564   ShenandoahReferenceProcessor* rp = heap->ref_processor();
 565   rp->reset_thread_locals();
 566   rp->set_soft_reference_policy(heap->soft_ref_policy()->should_clear_all_soft_refs());
 567 
 568   // Make above changes visible to worker threads
 569   OrderAccess::fence();
 570 
 571   // Arm nmethods for concurrent mark
 572   ShenandoahCodeRoots::arm_nmethods_for_mark();
 573 
 574   ShenandoahStackWatermark::change_epoch_id();
 575   if (ShenandoahPacing) {
 576     heap->pacer()->setup_for_mark();
 577   }
 578 }
 579 
 580 void ShenandoahConcurrentGC::op_mark_roots() {
 581   _mark.mark_concurrent_roots();
 582 }
 583 
 584 void ShenandoahConcurrentGC::op_mark() {
 585   _mark.concurrent_mark();
 586 }
 587 
 588 void ShenandoahConcurrentGC::op_final_mark() {
 589   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 590   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
 591   assert(!heap->has_forwarded_objects(), "No forwarded objects on this path");
 592 
 593   if (ShenandoahVerify) {
 594     heap->verifier()->verify_roots_no_forwarded();
 595   }
 596 
 597   if (!heap->cancelled_gc()) {
 598     _mark.finish_mark();
 599     assert(!heap->cancelled_gc(), "STW mark cannot OOM");
 600 
 601     // Notify JVMTI that the tagmap table will need cleaning.
 602     JvmtiTagMap::set_needs_cleaning();
 603 
 604     heap->prepare_regions_and_collection_set(true /*concurrent*/);

























 605 
 606     // Has to be done after cset selection
 607     heap->prepare_concurrent_roots();
 608 
 609     if (!heap->collection_set()->is_empty()) {










 610       if (ShenandoahVerify) {
 611         heap->verifier()->verify_before_evacuation();
 612       }
 613 

 614       heap->set_evacuation_in_progress(true);
 615       // From here on, we need to update references.
 616       heap->set_has_forwarded_objects(true);
 617 
 618       // Verify before arming for concurrent processing.
 619       // Otherwise, verification can trigger stack processing.
 620       if (ShenandoahVerify) {
 621         heap->verifier()->verify_during_evacuation();
 622       }
 623 





 624       // Arm nmethods/stack for concurrent processing
 625       ShenandoahCodeRoots::arm_nmethods_for_evac();
 626       ShenandoahStackWatermark::change_epoch_id();





 627 
 628       if (ShenandoahPacing) {
 629         heap->pacer()->setup_for_evac();
 630       }
 631     } else {
 632       if (ShenandoahVerify) {
 633         heap->verifier()->verify_after_concmark();
 634       }
 635 
 636       if (VerifyAfterGC) {
 637         Universe::verify();
 638       }
 639     }
 640   }
 641 }
 642 





 643 class ShenandoahConcurrentEvacThreadClosure : public ThreadClosure {
 644 private:
 645   OopClosure* const _oops;
 646 
 647 public:
 648   ShenandoahConcurrentEvacThreadClosure(OopClosure* oops);
 649   void do_thread(Thread* thread);
 650 };
 651 
 652 ShenandoahConcurrentEvacThreadClosure::ShenandoahConcurrentEvacThreadClosure(OopClosure* oops) :
 653   _oops(oops) {
 654 }
 655 
 656 void ShenandoahConcurrentEvacThreadClosure::do_thread(Thread* thread) {
 657   JavaThread* const jt = JavaThread::cast(thread);
 658   StackWatermarkSet::finish_processing(jt, _oops, StackWatermarkKind::gc);
 659 }
 660 

 661 class ShenandoahConcurrentEvacUpdateThreadTask : public WorkerTask {
 662 private:
 663   ShenandoahJavaThreadsIterator _java_threads;
 664 
 665 public:
 666   ShenandoahConcurrentEvacUpdateThreadTask(uint n_workers) :
 667     WorkerTask("Shenandoah Evacuate/Update Concurrent Thread Roots"),
 668     _java_threads(ShenandoahPhaseTimings::conc_thread_roots, n_workers) {
 669   }
 670 
 671   void work(uint worker_id) {





 672     // ShenandoahEvacOOMScope has to be setup by ShenandoahContextEvacuateUpdateRootsClosure.
 673     // Otherwise, may deadlock with watermark lock
 674     ShenandoahContextEvacuateUpdateRootsClosure oops_cl;
 675     ShenandoahConcurrentEvacThreadClosure thr_cl(&oops_cl);
 676     _java_threads.threads_do(&thr_cl, worker_id);
 677   }
 678 };
 679 
 680 void ShenandoahConcurrentGC::op_thread_roots() {
 681   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 682   assert(heap->is_evacuation_in_progress(), "Checked by caller");
 683   ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_thread_roots);
 684   ShenandoahConcurrentEvacUpdateThreadTask task(heap->workers()->active_workers());
 685   heap->workers()->run_task(&task);





 686 }
 687 
 688 void ShenandoahConcurrentGC::op_weak_refs() {
 689   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 690   assert(heap->is_concurrent_weak_root_in_progress(), "Only during this phase");
 691   // Concurrent weak refs processing
 692   ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_refs);
 693   if (heap->gc_cause() == GCCause::_wb_breakpoint) {
 694     ShenandoahBreakpoint::at_after_reference_processing_started();
 695   }
 696   heap->ref_processor()->process_references(ShenandoahPhaseTimings::conc_weak_refs, heap->workers(), true /* concurrent */);
 697 }
 698 
 699 class ShenandoahEvacUpdateCleanupOopStorageRootsClosure : public BasicOopIterateClosure {
 700 private:
 701   ShenandoahHeap* const _heap;
 702   ShenandoahMarkingContext* const _mark_context;
 703   bool  _evac_in_progress;
 704   Thread* const _thread;
 705 
 706 public:
 707   ShenandoahEvacUpdateCleanupOopStorageRootsClosure();
 708   void do_oop(oop* p);
 709   void do_oop(narrowOop* p);
 710 };
 711 
 712 ShenandoahEvacUpdateCleanupOopStorageRootsClosure::ShenandoahEvacUpdateCleanupOopStorageRootsClosure() :
 713   _heap(ShenandoahHeap::heap()),
 714   _mark_context(ShenandoahHeap::heap()->marking_context()),
 715   _evac_in_progress(ShenandoahHeap::heap()->is_evacuation_in_progress()),
 716   _thread(Thread::current()) {
 717 }
 718 
 719 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(oop* p) {
 720   const oop obj = RawAccess<>::oop_load(p);
 721   if (!CompressedOops::is_null(obj)) {
 722     if (!_mark_context->is_marked(obj)) {
 723       shenandoah_assert_correct(p, obj);
 724       ShenandoahHeap::atomic_clear_oop(p, obj);







 725     } else if (_evac_in_progress && _heap->in_collection_set(obj)) {
 726       oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
 727       if (resolved == obj) {
 728         resolved = _heap->evacuate_object(obj, _thread);
 729       }
 730       shenandoah_assert_not_in_cset_except(p, resolved, _heap->cancelled_gc());
 731       ShenandoahHeap::atomic_update_oop(resolved, p, obj);
 732     }
 733   }
 734 }
 735 
 736 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(narrowOop* p) {
 737   ShouldNotReachHere();
 738 }
 739 
 740 class ShenandoahIsCLDAliveClosure : public CLDClosure {
 741 public:
 742   void do_cld(ClassLoaderData* cld) {
 743     cld->is_alive();
 744   }

 928   assert(heap->is_concurrent_strong_root_in_progress(), "Checked by caller");
 929   ShenandoahConcurrentRootsEvacUpdateTask task(ShenandoahPhaseTimings::conc_strong_roots);
 930   heap->workers()->run_task(&task);
 931   heap->set_concurrent_strong_root_in_progress(false);
 932 }
 933 
 934 void ShenandoahConcurrentGC::op_cleanup_early() {
 935   ShenandoahHeap::heap()->free_set()->recycle_trash();
 936 }
 937 
 938 void ShenandoahConcurrentGC::op_evacuate() {
 939   ShenandoahHeap::heap()->evacuate_collection_set(true /*concurrent*/);
 940 }
 941 
 942 void ShenandoahConcurrentGC::op_init_updaterefs() {
 943   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 944   heap->set_evacuation_in_progress(false);
 945   heap->set_concurrent_weak_root_in_progress(false);
 946   heap->prepare_update_heap_references(true /*concurrent*/);
 947   heap->set_update_refs_in_progress(true);
 948 


 949   if (ShenandoahPacing) {
 950     heap->pacer()->setup_for_updaterefs();
 951   }
 952 }
 953 
 954 void ShenandoahConcurrentGC::op_updaterefs() {
 955   ShenandoahHeap::heap()->update_heap_references(true /*concurrent*/);
 956 }
 957 
 958 class ShenandoahUpdateThreadClosure : public HandshakeClosure {
 959 private:
 960   ShenandoahUpdateRefsClosure _cl;
 961 public:
 962   ShenandoahUpdateThreadClosure();
 963   void do_thread(Thread* thread);
 964 };
 965 
 966 ShenandoahUpdateThreadClosure::ShenandoahUpdateThreadClosure() :
 967   HandshakeClosure("Shenandoah Update Thread Roots") {
 968 }

 973     ResourceMark rm;
 974     jt->oops_do(&_cl, nullptr);
 975   }
 976 }
 977 
 978 void ShenandoahConcurrentGC::op_update_thread_roots() {
 979   ShenandoahUpdateThreadClosure cl;
 980   Handshake::execute(&cl);
 981 }
 982 
 983 void ShenandoahConcurrentGC::op_final_updaterefs() {
 984   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 985   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
 986   assert(!heap->_update_refs_iterator.has_next(), "Should have finished update references");
 987 
 988   heap->finish_concurrent_roots();
 989 
 990   // Clear cancelled GC, if set. On cancellation path, the block before would handle
 991   // everything.
 992   if (heap->cancelled_gc()) {
 993     heap->clear_cancelled_gc();
 994   }
 995 
 996   // Has to be done before cset is clear
 997   if (ShenandoahVerify) {
 998     heap->verifier()->verify_roots_in_to_space();
 999   }
1000 


1001   heap->update_heap_region_states(true /*concurrent*/);
1002 
1003   heap->set_update_refs_in_progress(false);
1004   heap->set_has_forwarded_objects(false);
1005 






















1006   if (ShenandoahVerify) {
1007     heap->verifier()->verify_after_updaterefs();
1008   }
1009 
1010   if (VerifyAfterGC) {
1011     Universe::verify();
1012   }
1013 
1014   heap->rebuild_free_set(true /*concurrent*/);
1015 }
1016 
1017 void ShenandoahConcurrentGC::op_final_roots() {
1018   ShenandoahHeap::heap()->set_concurrent_weak_root_in_progress(false);
















1019 }
1020 
1021 void ShenandoahConcurrentGC::op_cleanup_complete() {
1022   ShenandoahHeap::heap()->free_set()->recycle_trash();
1023 }
1024 
1025 bool ShenandoahConcurrentGC::check_cancellation_and_abort(ShenandoahDegenPoint point) {
1026   if (ShenandoahHeap::heap()->cancelled_gc()) {
1027     _degen_point = point;
1028     return true;
1029   }
1030   return false;
1031 }
1032 
1033 const char* ShenandoahConcurrentGC::init_mark_event_message() const {
1034   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1035   assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here");
1036   if (heap->unload_classes()) {
1037     return "Pause Init Mark (unload classes)";
1038   } else {
1039     return "Pause Init Mark";
1040   }
1041 }
1042 
1043 const char* ShenandoahConcurrentGC::final_mark_event_message() const {
1044   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1045   assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here");


1046   if (heap->unload_classes()) {
1047     return "Pause Final Mark (unload classes)";
1048   } else {
1049     return "Pause Final Mark";
1050   }
1051 }
1052 
1053 const char* ShenandoahConcurrentGC::conc_mark_event_message() const {
1054   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1055   assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here");

1056   if (heap->unload_classes()) {
1057     return "Concurrent marking (unload classes)";
1058   } else {
1059     return "Concurrent marking";
1060   }
1061 }

   1 /*
   2  * Copyright (c) 2021, 2022, Red Hat, Inc. All rights reserved.
   3  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 
  28 #include "gc/shared/barrierSetNMethod.hpp"
  29 #include "gc/shared/collectorCounters.hpp"
  30 #include "gc/shared/continuationGCSupport.inline.hpp"
  31 #include "gc/shenandoah/shenandoahBreakpoint.hpp"
  32 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  33 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
  34 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  35 #include "gc/shenandoah/shenandoahGeneration.hpp"
  36 #include "gc/shenandoah/shenandoahGenerationalHeap.hpp"
  37 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
  38 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
  39 #include "gc/shenandoah/shenandoahLock.hpp"
  40 #include "gc/shenandoah/shenandoahMark.inline.hpp"
  41 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  42 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  43 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  44 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
  45 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
  46 #include "gc/shenandoah/shenandoahStackWatermark.hpp"
  47 #include "gc/shenandoah/shenandoahUtils.hpp"
  48 #include "gc/shenandoah/shenandoahVerifier.hpp"
  49 #include "gc/shenandoah/shenandoahVMOperations.hpp"
  50 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
  51 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  52 #include "memory/allocation.hpp"
  53 #include "prims/jvmtiTagMap.hpp"
  54 #include "runtime/vmThread.hpp"
  55 #include "utilities/events.hpp"
  56 
  57 // Breakpoint support
  58 class ShenandoahBreakpointGCScope : public StackObj {

  73   }
  74 };
  75 
  76 class ShenandoahBreakpointMarkScope : public StackObj {
  77 private:
  78   const GCCause::Cause _cause;
  79 public:
  80   ShenandoahBreakpointMarkScope(GCCause::Cause cause) : _cause(cause) {
  81     if (_cause == GCCause::_wb_breakpoint) {
  82       ShenandoahBreakpoint::at_after_marking_started();
  83     }
  84   }
  85 
  86   ~ShenandoahBreakpointMarkScope() {
  87     if (_cause == GCCause::_wb_breakpoint) {
  88       ShenandoahBreakpoint::at_before_marking_completed();
  89     }
  90   }
  91 };
  92 
  93 ShenandoahConcurrentGC::ShenandoahConcurrentGC(ShenandoahGeneration* generation, bool do_old_gc_bootstrap) :
  94   _mark(generation),
  95   _degen_point(ShenandoahDegenPoint::_degenerated_unset),
  96   _abbreviated(false),
  97   _do_old_gc_bootstrap(do_old_gc_bootstrap),
  98   _generation(generation) {
  99 }
 100 
 101 ShenandoahGC::ShenandoahDegenPoint ShenandoahConcurrentGC::degen_point() const {
 102   return _degen_point;
 103 }
 104 




 105 bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) {
 106   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 107 
 108   ShenandoahBreakpointGCScope breakpoint_gc_scope(cause);
 109 
 110   // Reset for upcoming marking
 111   entry_reset();
 112 
 113   // Start initial mark under STW
 114   vmop_entry_init_mark();
 115 
 116   {
 117     ShenandoahBreakpointMarkScope breakpoint_mark_scope(cause);
 118 
 119     // Reset task queue stats here, rather than in mark_concurrent_roots,
 120     // because remembered set scan will `push` oops into the queues and
 121     // resetting after this happens will lose those counts.
 122     TASKQUEUE_STATS_ONLY(_mark.task_queues()->reset_taskqueue_stats());
 123 
 124     // Concurrent remembered set scanning
 125     entry_scan_remembered_set();
 126     // TODO: When RS scanning yields, we will need a check_cancellation_and_abort() degeneration point here.
 127 
 128     // Concurrent mark roots
 129     entry_mark_roots();
 130     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_roots)) {
 131       return false;
 132     }
 133 
 134     // Continue concurrent mark
 135     entry_mark();
 136     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_mark)) {
 137       return false;
 138     }
 139   }
 140 
 141   // Complete marking under STW, and start evacuation
 142   vmop_entry_final_mark();
 143 
 144   // If the GC was cancelled before final mark, nothing happens on the safepoint. We are still
 145   // in the marking phase and must resume the degenerated cycle from there. If the GC was cancelled
 146   // after final mark, then we've entered the evacuation phase and must resume the degenerated cycle
 147   // from that phase.
 148   if (_generation->is_concurrent_mark_in_progress()) {
 149     bool cancelled = check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_mark);
 150     assert(cancelled, "GC must have been cancelled between concurrent and final mark");
 151     return false;
 152   }
 153 
 154   // Concurrent stack processing
 155   if (heap->is_evacuation_in_progress()) {
 156     entry_thread_roots();
 157   }
 158 
 159   // Process weak roots that might still point to regions that would be broken by cleanup
 160   if (heap->is_concurrent_weak_root_in_progress()) {
 161     entry_weak_refs();
 162     entry_weak_roots();
 163   }
 164 
 165   // Final mark might have reclaimed some immediate garbage, kick cleanup to reclaim
 166   // the space. This would be the last action if there is nothing to evacuate.  Note that
 167   // we will not age young-gen objects in the case that we skip evacuation.
 168   entry_cleanup_early();
 169 
 170   {
 171     // TODO: Not sure there is value in logging free-set status right here.  Note that whenever the free set is rebuilt,
 172     // it logs the newly rebuilt status.
 173     ShenandoahHeapLocker locker(heap->lock());
 174     heap->free_set()->log_status();
 175   }
 176 
 177   // Perform concurrent class unloading
 178   if (heap->unload_classes() &&
 179       heap->is_concurrent_weak_root_in_progress()) {
 180     entry_class_unloading();
 181   }
 182 
 183   // Processing strong roots
 184   // This may be skipped if there is nothing to update/evacuate.
 185   // If so, strong_root_in_progress would be unset.
 186   if (heap->is_concurrent_strong_root_in_progress()) {
 187     entry_strong_roots();
 188   }
 189 
 190   // Continue the cycle with evacuation and optional update-refs.
 191   // This may be skipped if there is nothing to evacuate.
 192   // If so, evac_in_progress would be unset by collection set preparation code.
 193   if (heap->is_evacuation_in_progress()) {
 194     // Concurrently evacuate
 195     entry_evacuate();
 196     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_evac)) {
 197       return false;
 198     }
 199   }
 200 
 201   if (heap->has_forwarded_objects()) {
 202     // Perform update-refs phase.
 203     vmop_entry_init_updaterefs();
 204     entry_updaterefs();
 205     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_updaterefs)) {
 206       return false;
 207     }
 208 
 209     // Concurrent update thread roots
 210     entry_update_thread_roots();
 211     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_updaterefs)) {
 212       return false;
 213     }
 214 
 215     vmop_entry_final_updaterefs();
 216 
 217     // Update references freed up collection set, kick the cleanup to reclaim the space.
 218     entry_cleanup_complete();
 219   } else {
 220     // We chose not to evacuate because we found sufficient immediate garbage. Note that we
 221     // do not check for cancellation here because, at this point, the cycle is effectively
 222     // complete. If the cycle has been cancelled here, the control thread will detect it
 223     // on its next iteration and run a degenerated young cycle.
 224     vmop_entry_final_roots();
 225     _abbreviated = true;
 226   }
 227 
 228   // We defer generation resizing actions until after cset regions have been recycled.  We do this even following an
 229   // abbreviated cycle.
 230   if (heap->mode()->is_generational()) {
 231     ShenandoahGenerationalHeap::heap()->complete_concurrent_cycle();
 232   }
 233   return true;
 234 }
 235 
 236 void ShenandoahConcurrentGC::vmop_entry_init_mark() {
 237   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 238   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 239   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_mark_gross);
 240 
 241   heap->try_inject_alloc_failure();
 242   VM_ShenandoahInitMark op(this);
 243   VMThread::execute(&op); // jump to entry_init_mark() under safepoint
 244 }
 245 
 246 void ShenandoahConcurrentGC::vmop_entry_final_mark() {
 247   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 248   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 249   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_mark_gross);
 250 
 251   heap->try_inject_alloc_failure();
 252   VM_ShenandoahFinalMarkStartEvac op(this);

 322   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_update_refs);
 323   EventMark em("%s", msg);
 324 
 325   ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
 326                               ShenandoahWorkerPolicy::calc_workers_for_final_update_ref(),
 327                               "final reference update");
 328 
 329   op_final_updaterefs();
 330 }
 331 
 332 void ShenandoahConcurrentGC::entry_final_roots() {
 333   static const char* msg = "Pause Final Roots";
 334   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_roots);
 335   EventMark em("%s", msg);
 336 
 337   op_final_roots();
 338 }
 339 
 340 void ShenandoahConcurrentGC::entry_reset() {
 341   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 342   heap->try_inject_alloc_failure();
 343 
 344   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 345   {
 346     static const char* msg = "Concurrent reset";
 347     ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_reset);
 348     EventMark em("%s", msg);
 349 
 350     ShenandoahWorkerScope scope(heap->workers(),
 351                                 ShenandoahWorkerPolicy::calc_workers_for_conc_reset(),
 352                                 msg);
 353     op_reset();
 354   }
 355 
 356   if (_do_old_gc_bootstrap) {
 357     static const char* msg = "Concurrent reset (OLD)";
 358     ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_reset_old);
 359     ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
 360                                 ShenandoahWorkerPolicy::calc_workers_for_conc_reset(),
 361                                 msg);
 362     EventMark em("%s", msg);
 363 
 364     heap->old_generation()->prepare_gc();
 365   }
 366 }
 367 
 368 void ShenandoahConcurrentGC::entry_scan_remembered_set() {
 369   if (_generation->is_young()) {
 370     ShenandoahHeap* const heap = ShenandoahHeap::heap();
 371     TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 372     const char* msg = "Concurrent remembered set scanning";
 373     ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::init_scan_rset);
 374     EventMark em("%s", msg);
 375 
 376     ShenandoahWorkerScope scope(heap->workers(),
 377                                 ShenandoahWorkerPolicy::calc_workers_for_rs_scanning(),
 378                                 msg);
 379 
 380     heap->try_inject_alloc_failure();
 381     _generation->scan_remembered_set(true /* is_concurrent */);
 382   }
 383 }
 384 
 385 void ShenandoahConcurrentGC::entry_mark_roots() {
 386   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 387   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 388   const char* msg = "Concurrent marking roots";
 389   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_mark_roots);
 390   EventMark em("%s", msg);
 391 
 392   ShenandoahWorkerScope scope(heap->workers(),
 393                               ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
 394                               "concurrent marking roots");
 395 
 396   heap->try_inject_alloc_failure();
 397   op_mark_roots();
 398 }
 399 
 400 void ShenandoahConcurrentGC::entry_mark() {
 401   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 402   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());

 543   op_updaterefs();
 544 }
 545 
 546 void ShenandoahConcurrentGC::entry_cleanup_complete() {
 547   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 548   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 549   static const char* msg = "Concurrent cleanup";
 550   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_cleanup_complete, true /* log_heap_usage */);
 551   EventMark em("%s", msg);
 552 
 553   // This phase does not use workers, no need for setup
 554   heap->try_inject_alloc_failure();
 555   op_cleanup_complete();
 556 }
 557 
 558 void ShenandoahConcurrentGC::op_reset() {
 559   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 560   if (ShenandoahPacing) {
 561     heap->pacer()->setup_for_reset();
 562   }
 563   _generation->prepare_gc();

 564 }
 565 
 566 class ShenandoahInitMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
 567 private:
 568   ShenandoahMarkingContext* const _ctx;
 569 public:
 570   ShenandoahInitMarkUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
 571 
 572   void heap_region_do(ShenandoahHeapRegion* r) {
 573     assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index());
 574     if (r->is_active()) {
 575       // Check if region needs updating its TAMS. We have updated it already during concurrent
 576       // reset, so it is very likely we don't need to do another write here.  Since most regions
 577       // are not "active", this path is relatively rare.
 578       if (_ctx->top_at_mark_start(r) != r->top()) {
 579         _ctx->capture_top_at_mark_start(r);
 580       }
 581     } else {
 582       assert(_ctx->top_at_mark_start(r) == r->top(),
 583              "Region " SIZE_FORMAT " should already have correct TAMS", r->index());
 584     }
 585   }
 586 
 587   bool is_thread_safe() { return true; }
 588 };
 589 
 590 void ShenandoahConcurrentGC::start_mark() {
 591   _mark.start_mark();
 592 }
 593 
 594 void ShenandoahConcurrentGC::op_init_mark() {
 595   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 596   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
 597   assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
 598 
 599   assert(_generation->is_bitmap_clear(), "need clear marking bitmap");
 600   assert(!_generation->is_mark_complete(), "should not be complete");
 601   assert(!heap->has_forwarded_objects(), "No forwarded objects on this path");
 602 
 603 
 604   if (heap->mode()->is_generational()) {
 605     if (_generation->is_young() || (_generation->is_global() && ShenandoahVerify)) {
 606       // The current implementation of swap_remembered_set() copies the write-card-table
 607       // to the read-card-table. The remembered sets are also swapped for GLOBAL collections
 608       // so that the verifier works with the correct copy of the card table when verifying.
 609       // TODO: This path should not really depend on ShenandoahVerify.
 610       ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_swap_rset);
 611       _generation->swap_remembered_set();
 612     }
 613 
 614     if (_generation->is_global()) {
 615       heap->old_generation()->cancel_gc();
 616     } else if (heap->is_concurrent_old_mark_in_progress()) {
 617       // Purge the SATB buffers, transferring any valid, old pointers to the
 618       // old generation mark queue. Any pointers in a young region will be
 619       // abandoned.
 620       ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_transfer_satb);
 621       heap->old_generation()->transfer_pointers_from_satb();
 622     }
 623   }
 624 
 625   if (ShenandoahVerify) {
 626     heap->verifier()->verify_before_concmark();
 627   }
 628 
 629   if (VerifyBeforeGC) {
 630     Universe::verify();
 631   }
 632 
 633   _generation->set_concurrent_mark_in_progress(true);
 634 
 635   start_mark();
 636 
 637   if (_do_old_gc_bootstrap) {
 638     shenandoah_assert_generational();
 639     // Update region state for both young and old regions
 640     // TODO: We should be able to pull this out of the safepoint for the bootstrap
 641     // cycle. The top of an old region will only move when a GC cycle evacuates
 642     // objects into it. When we start an old cycle, we know that nothing can touch
 643     // the top of old regions.
 644     ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states);
 645     ShenandoahInitMarkUpdateRegionStateClosure cl;
 646     heap->parallel_heap_region_iterate(&cl);
 647     heap->old_generation()->ref_processor()->reset_thread_locals();
 648   } else {
 649     // Update region state for only young regions
 650     ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states);
 651     ShenandoahInitMarkUpdateRegionStateClosure cl;
 652     _generation->parallel_heap_region_iterate(&cl);
 653   }
 654 
 655   // Weak reference processing
 656   ShenandoahReferenceProcessor* rp = _generation->ref_processor();
 657   rp->reset_thread_locals();
 658   rp->set_soft_reference_policy(heap->soft_ref_policy()->should_clear_all_soft_refs());
 659 
 660   // Make above changes visible to worker threads
 661   OrderAccess::fence();
 662 
 663   // Arm nmethods for concurrent mark
 664   ShenandoahCodeRoots::arm_nmethods_for_mark();
 665 
 666   ShenandoahStackWatermark::change_epoch_id();
 667   if (ShenandoahPacing) {
 668     heap->pacer()->setup_for_mark();
 669   }
 670 }
 671 
 672 void ShenandoahConcurrentGC::op_mark_roots() {
 673   _mark.mark_concurrent_roots();
 674 }
 675 
 676 void ShenandoahConcurrentGC::op_mark() {
 677   _mark.concurrent_mark();
 678 }
 679 
 680 void ShenandoahConcurrentGC::op_final_mark() {
 681   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 682   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
 683   assert(!heap->has_forwarded_objects(), "No forwarded objects on this path");
 684 
 685   if (ShenandoahVerify) {
 686     heap->verifier()->verify_roots_no_forwarded();
 687   }
 688 
 689   if (!heap->cancelled_gc()) {
 690     _mark.finish_mark();
 691     assert(!heap->cancelled_gc(), "STW mark cannot OOM");
 692 
 693     // Notify JVMTI that the tagmap table will need cleaning.
 694     JvmtiTagMap::set_needs_cleaning();
 695 
 696     // The collection set is chosen by prepare_regions_and_collection_set().
 697     //
 698     // TODO: Under severe memory overload conditions that can be checked here, we may want to limit
 699     // the inclusion of old-gen candidates within the collection set.  This would allow us to prioritize efforts on
 700     // evacuating young-gen,  This remediation is most appropriate when old-gen availability is very high (so there
 701     // are negligible negative impacts from delaying completion of old-gen evacuation) and when young-gen collections
 702     // are "under duress" (as signalled by very low availability of memory within young-gen, indicating that/ young-gen
 703     // collections are not triggering frequently enough).
 704     _generation->prepare_regions_and_collection_set(true /*concurrent*/);
 705 
 706     // Upon return from prepare_regions_and_collection_set(), certain parameters have been established to govern the
 707     // evacuation efforts that are about to begin.  In particular:
 708     //
 709     // heap->get_promoted_reserve() represents the amount of memory within old-gen's available memory that has
 710     //   been set aside to hold objects promoted from young-gen memory.  This represents an estimated percentage
 711     //   of the live young-gen memory within the collection set.  If there is more data ready to be promoted than
 712     //   can fit within this reserve, the promotion of some objects will be deferred until a subsequent evacuation
 713     //   pass.
 714     //
 715     // heap->get_old_evac_reserve() represents the amount of memory within old-gen's available memory that has been
 716     //  set aside to hold objects evacuated from the old-gen collection set.
 717     //
 718     // heap->get_young_evac_reserve() represents the amount of memory within young-gen's available memory that has
 719     //  been set aside to hold objects evacuated from the young-gen collection set.  Conservatively, this value
 720     //  equals the entire amount of live young-gen memory within the collection set, even though some of this memory
 721     //  will likely be promoted.
 722 
 723     // Has to be done after cset selection
 724     heap->prepare_concurrent_roots();
 725 
 726     if (!heap->collection_set()->is_empty() || has_in_place_promotions(heap)) {
 727       // Even if the collection set is empty, we need to do evacuation if there are regions to be promoted in place.
 728       // Concurrent evacuation takes responsibility for registering objects and setting the remembered set cards to dirty.
 729 
 730       LogTarget(Debug, gc, cset) lt;
 731       if (lt.is_enabled()) {
 732         ResourceMark rm;
 733         LogStream ls(lt);
 734         heap->collection_set()->print_on(&ls);
 735       }
 736 
 737       if (ShenandoahVerify) {
 738         heap->verifier()->verify_before_evacuation();
 739       }
 740 
 741       // TODO: Do we need to set this if we are only promoting regions in place? We don't need the barriers on for that.
 742       heap->set_evacuation_in_progress(true);


 743 
 744       // Verify before arming for concurrent processing.
 745       // Otherwise, verification can trigger stack processing.
 746       if (ShenandoahVerify) {
 747         heap->verifier()->verify_during_evacuation();
 748       }
 749 
 750       // Generational mode may promote objects in place during the evacuation phase.
 751       // If that is the only reason we are evacuating, we don't need to update references
 752       // and there will be no forwarded objects on the heap.
 753       heap->set_has_forwarded_objects(!heap->collection_set()->is_empty());
 754 
 755       // Arm nmethods/stack for concurrent processing
 756       if (!heap->collection_set()->is_empty()) {
 757         // Iff objects will be evaluated, arm the nmethod barriers. These will be disarmed
 758         // under the same condition (established in prepare_concurrent_roots) after strong
 759         // root evacuation has completed (see op_strong_roots).
 760         ShenandoahCodeRoots::arm_nmethods_for_evac();
 761         ShenandoahStackWatermark::change_epoch_id();
 762       }
 763 
 764       if (ShenandoahPacing) {
 765         heap->pacer()->setup_for_evac();
 766       }
 767     } else {
 768       if (ShenandoahVerify) {
 769         heap->verifier()->verify_after_concmark();
 770       }
 771 
 772       if (VerifyAfterGC) {
 773         Universe::verify();
 774       }
 775     }
 776   }
 777 }
 778 
 779 bool ShenandoahConcurrentGC::has_in_place_promotions(ShenandoahHeap* heap) {
 780   return heap->mode()->is_generational() && heap->old_generation()->has_in_place_promotions();
 781 }
 782 
 783 template<bool GENERATIONAL>
 784 class ShenandoahConcurrentEvacThreadClosure : public ThreadClosure {
 785 private:
 786   OopClosure* const _oops;

 787 public:
 788   explicit ShenandoahConcurrentEvacThreadClosure(OopClosure* oops) : _oops(oops) {}


 789 
 790   void do_thread(Thread* thread) override {
 791     JavaThread* const jt = JavaThread::cast(thread);
 792     StackWatermarkSet::finish_processing(jt, _oops, StackWatermarkKind::gc);
 793     if (GENERATIONAL) {
 794       ShenandoahThreadLocalData::enable_plab_promotions(thread);
 795     }
 796   }
 797 };
 798 
 799 template<bool GENERATIONAL>
 800 class ShenandoahConcurrentEvacUpdateThreadTask : public WorkerTask {
 801 private:
 802   ShenandoahJavaThreadsIterator _java_threads;
 803 
 804 public:
 805   explicit ShenandoahConcurrentEvacUpdateThreadTask(uint n_workers) :
 806     WorkerTask("Shenandoah Evacuate/Update Concurrent Thread Roots"),
 807     _java_threads(ShenandoahPhaseTimings::conc_thread_roots, n_workers) {
 808   }
 809 
 810   void work(uint worker_id) override {
 811     if (GENERATIONAL) {
 812       Thread* worker_thread = Thread::current();
 813       ShenandoahThreadLocalData::enable_plab_promotions(worker_thread);
 814     }
 815 
 816     // ShenandoahEvacOOMScope has to be setup by ShenandoahContextEvacuateUpdateRootsClosure.
 817     // Otherwise, may deadlock with watermark lock
 818     ShenandoahContextEvacuateUpdateRootsClosure oops_cl;
 819     ShenandoahConcurrentEvacThreadClosure<GENERATIONAL> thr_cl(&oops_cl);
 820     _java_threads.threads_do(&thr_cl, worker_id);
 821   }
 822 };
 823 
 824 void ShenandoahConcurrentGC::op_thread_roots() {
 825   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 826   assert(heap->is_evacuation_in_progress(), "Checked by caller");
 827   ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_thread_roots);
 828   if (heap->mode()->is_generational()) {
 829     ShenandoahConcurrentEvacUpdateThreadTask<true> task(heap->workers()->active_workers());
 830     heap->workers()->run_task(&task);
 831   } else {
 832     ShenandoahConcurrentEvacUpdateThreadTask<false> task(heap->workers()->active_workers());
 833     heap->workers()->run_task(&task);
 834   }
 835 }
 836 
 837 void ShenandoahConcurrentGC::op_weak_refs() {
 838   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 839   assert(heap->is_concurrent_weak_root_in_progress(), "Only during this phase");
 840   // Concurrent weak refs processing
 841   ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_refs);
 842   if (heap->gc_cause() == GCCause::_wb_breakpoint) {
 843     ShenandoahBreakpoint::at_after_reference_processing_started();
 844   }
 845   _generation->ref_processor()->process_references(ShenandoahPhaseTimings::conc_weak_refs, heap->workers(), true /* concurrent */);
 846 }
 847 
 848 class ShenandoahEvacUpdateCleanupOopStorageRootsClosure : public BasicOopIterateClosure {
 849 private:
 850   ShenandoahHeap* const _heap;
 851   ShenandoahMarkingContext* const _mark_context;
 852   bool  _evac_in_progress;
 853   Thread* const _thread;
 854 
 855 public:
 856   ShenandoahEvacUpdateCleanupOopStorageRootsClosure();
 857   void do_oop(oop* p);
 858   void do_oop(narrowOop* p);
 859 };
 860 
 861 ShenandoahEvacUpdateCleanupOopStorageRootsClosure::ShenandoahEvacUpdateCleanupOopStorageRootsClosure() :
 862   _heap(ShenandoahHeap::heap()),
 863   _mark_context(ShenandoahHeap::heap()->marking_context()),
 864   _evac_in_progress(ShenandoahHeap::heap()->is_evacuation_in_progress()),
 865   _thread(Thread::current()) {
 866 }
 867 
 868 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(oop* p) {
 869   const oop obj = RawAccess<>::oop_load(p);
 870   if (!CompressedOops::is_null(obj)) {
 871     if (!_mark_context->is_marked(obj)) {
 872       if (_heap->is_in_active_generation(obj)) {
 873         // TODO: This worries me. Here we are asserting that an unmarked from-space object is 'correct'.
 874         // Normally, I would call this a bogus assert, but there seems to be a legitimate use-case for
 875         // accessing from-space objects during class unloading. However, the from-space object may have
 876         // been "filled". We've made no effort to prevent old generation classes being unloaded by young
 877         // gen (and vice-versa).
 878         shenandoah_assert_correct(p, obj);
 879         ShenandoahHeap::atomic_clear_oop(p, obj);
 880       }
 881     } else if (_evac_in_progress && _heap->in_collection_set(obj)) {
 882       oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
 883       if (resolved == obj) {
 884         resolved = _heap->evacuate_object(obj, _thread);
 885       }
 886       shenandoah_assert_not_in_cset_except(p, resolved, _heap->cancelled_gc());
 887       ShenandoahHeap::atomic_update_oop(resolved, p, obj);
 888     }
 889   }
 890 }
 891 
 892 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(narrowOop* p) {
 893   ShouldNotReachHere();
 894 }
 895 
 896 class ShenandoahIsCLDAliveClosure : public CLDClosure {
 897 public:
 898   void do_cld(ClassLoaderData* cld) {
 899     cld->is_alive();
 900   }

1084   assert(heap->is_concurrent_strong_root_in_progress(), "Checked by caller");
1085   ShenandoahConcurrentRootsEvacUpdateTask task(ShenandoahPhaseTimings::conc_strong_roots);
1086   heap->workers()->run_task(&task);
1087   heap->set_concurrent_strong_root_in_progress(false);
1088 }
1089 
1090 void ShenandoahConcurrentGC::op_cleanup_early() {
1091   ShenandoahHeap::heap()->free_set()->recycle_trash();
1092 }
1093 
1094 void ShenandoahConcurrentGC::op_evacuate() {
1095   ShenandoahHeap::heap()->evacuate_collection_set(true /*concurrent*/);
1096 }
1097 
1098 void ShenandoahConcurrentGC::op_init_updaterefs() {
1099   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1100   heap->set_evacuation_in_progress(false);
1101   heap->set_concurrent_weak_root_in_progress(false);
1102   heap->prepare_update_heap_references(true /*concurrent*/);
1103   heap->set_update_refs_in_progress(true);
1104   if (ShenandoahVerify) {
1105     heap->verifier()->verify_before_updaterefs();
1106   }
1107   if (ShenandoahPacing) {
1108     heap->pacer()->setup_for_updaterefs();
1109   }
1110 }
1111 
1112 void ShenandoahConcurrentGC::op_updaterefs() {
1113   ShenandoahHeap::heap()->update_heap_references(true /*concurrent*/);
1114 }
1115 
1116 class ShenandoahUpdateThreadClosure : public HandshakeClosure {
1117 private:
1118   ShenandoahUpdateRefsClosure _cl;
1119 public:
1120   ShenandoahUpdateThreadClosure();
1121   void do_thread(Thread* thread);
1122 };
1123 
1124 ShenandoahUpdateThreadClosure::ShenandoahUpdateThreadClosure() :
1125   HandshakeClosure("Shenandoah Update Thread Roots") {
1126 }

1131     ResourceMark rm;
1132     jt->oops_do(&_cl, nullptr);
1133   }
1134 }
1135 
1136 void ShenandoahConcurrentGC::op_update_thread_roots() {
1137   ShenandoahUpdateThreadClosure cl;
1138   Handshake::execute(&cl);
1139 }
1140 
1141 void ShenandoahConcurrentGC::op_final_updaterefs() {
1142   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1143   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
1144   assert(!heap->_update_refs_iterator.has_next(), "Should have finished update references");
1145 
1146   heap->finish_concurrent_roots();
1147 
1148   // Clear cancelled GC, if set. On cancellation path, the block before would handle
1149   // everything.
1150   if (heap->cancelled_gc()) {
1151     heap->clear_cancelled_gc(true /* clear oom handler */);
1152   }
1153 
1154   // Has to be done before cset is clear
1155   if (ShenandoahVerify) {
1156     heap->verifier()->verify_roots_in_to_space();
1157   }
1158 
1159   // If we are running in generational mode and this is an aging cycle, this will also age active
1160   // regions that haven't been used for allocation.
1161   heap->update_heap_region_states(true /*concurrent*/);
1162 
1163   heap->set_update_refs_in_progress(false);
1164   heap->set_has_forwarded_objects(false);
1165 
1166   if (heap->mode()->is_generational() && heap->is_concurrent_old_mark_in_progress()) {
1167     // When the SATB barrier is left on to support concurrent old gen mark, it may pick up writes to
1168     // objects in the collection set. After those objects are evacuated, the pointers in the
1169     // SATB are no longer safe. Once we have finished update references, we are guaranteed that
1170     // no more writes to the collection set are possible.
1171     //
1172     // This will transfer any old pointers in _active_ regions from the SATB to the old gen
1173     // mark queues. All other pointers will be discarded. This would also discard any pointers
1174     // in old regions that were included in a mixed evacuation. We aren't using the SATB filter
1175     // methods here because we cannot control when they execute. If the SATB filter runs _after_
1176     // a region has been recycled, we will not be able to detect the bad pointer.
1177     //
1178     // We are not concerned about skipping this step in abbreviated cycles because regions
1179     // with no live objects cannot have been written to and so cannot have entries in the SATB
1180     // buffers.
1181     heap->old_generation()->transfer_pointers_from_satb();
1182 
1183     // Aging_cycle is only relevant during evacuation cycle for individual objects and during final mark for
1184     // entire regions.  Both of these relevant operations occur before final update refs.
1185     ShenandoahGenerationalHeap::heap()->set_aging_cycle(false);
1186   }
1187 
1188   if (ShenandoahVerify) {
1189     heap->verifier()->verify_after_updaterefs();
1190   }
1191 
1192   if (VerifyAfterGC) {
1193     Universe::verify();
1194   }
1195 
1196   heap->rebuild_free_set(true /*concurrent*/);
1197 }
1198 
1199 void ShenandoahConcurrentGC::op_final_roots() {
1200 
1201   ShenandoahHeap *heap = ShenandoahHeap::heap();
1202   heap->set_concurrent_weak_root_in_progress(false);
1203   heap->set_evacuation_in_progress(false);
1204 
1205   if (heap->mode()->is_generational()) {
1206     // If the cycle was shortened for having enough immediate garbage, this could be
1207     // the last GC safepoint before concurrent marking of old resumes. We must be sure
1208     // that old mark threads don't see any pointers to garbage in the SATB buffers.
1209     if (heap->is_concurrent_old_mark_in_progress()) {
1210       heap->old_generation()->transfer_pointers_from_satb();
1211     }
1212 
1213     if (!_generation->is_old()) {
1214       ShenandoahGenerationalHeap::heap()->update_region_ages(_generation->complete_marking_context());
1215     }
1216   }
1217 }
1218 
1219 void ShenandoahConcurrentGC::op_cleanup_complete() {
1220   ShenandoahHeap::heap()->free_set()->recycle_trash();
1221 }
1222 
1223 bool ShenandoahConcurrentGC::check_cancellation_and_abort(ShenandoahDegenPoint point) {
1224   if (ShenandoahHeap::heap()->cancelled_gc()) {
1225     _degen_point = point;
1226     return true;
1227   }
1228   return false;
1229 }
1230 
1231 const char* ShenandoahConcurrentGC::init_mark_event_message() const {
1232   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1233   assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here");
1234   if (heap->unload_classes()) {
1235     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Init Mark", " (unload classes)");
1236   } else {
1237     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Init Mark", "");
1238   }
1239 }
1240 
1241 const char* ShenandoahConcurrentGC::final_mark_event_message() const {
1242   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1243   assert(!heap->has_forwarded_objects() || heap->is_concurrent_old_mark_in_progress(),
1244          "Should not have forwarded objects during final mark, unless old gen concurrent mark is running");
1245 
1246   if (heap->unload_classes()) {
1247     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Final Mark", " (unload classes)");
1248   } else {
1249     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Final Mark", "");
1250   }
1251 }
1252 
1253 const char* ShenandoahConcurrentGC::conc_mark_event_message() const {
1254   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1255   assert(!heap->has_forwarded_objects() || heap->is_concurrent_old_mark_in_progress(),
1256          "Should not have forwarded objects concurrent mark, unless old gen concurrent mark is running");
1257   if (heap->unload_classes()) {
1258     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent marking", " (unload classes)");
1259   } else {
1260     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent marking", "");
1261   }
1262 }
< prev index next >