< prev index next >

src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp

Print this page

   1 /*
   2  * Copyright (c) 2021, 2022, Red Hat, Inc. All rights reserved.

   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 
  27 #include "gc/shared/barrierSetNMethod.hpp"
  28 #include "gc/shared/collectorCounters.hpp"
  29 #include "gc/shared/continuationGCSupport.inline.hpp"
  30 #include "gc/shenandoah/shenandoahBreakpoint.hpp"
  31 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  32 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
  33 #include "gc/shenandoah/shenandoahFreeSet.hpp"




  34 #include "gc/shenandoah/shenandoahLock.hpp"
  35 #include "gc/shenandoah/shenandoahMark.inline.hpp"
  36 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  37 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  38 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  39 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
  40 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
  41 #include "gc/shenandoah/shenandoahStackWatermark.hpp"
  42 #include "gc/shenandoah/shenandoahUtils.hpp"
  43 #include "gc/shenandoah/shenandoahVerifier.hpp"
  44 #include "gc/shenandoah/shenandoahVMOperations.hpp"
  45 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
  46 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  47 #include "memory/allocation.hpp"
  48 #include "prims/jvmtiTagMap.hpp"
  49 #include "runtime/vmThread.hpp"
  50 #include "utilities/events.hpp"
  51 
  52 // Breakpoint support
  53 class ShenandoahBreakpointGCScope : public StackObj {

  68   }
  69 };
  70 
  71 class ShenandoahBreakpointMarkScope : public StackObj {
  72 private:
  73   const GCCause::Cause _cause;
  74 public:
  75   ShenandoahBreakpointMarkScope(GCCause::Cause cause) : _cause(cause) {
  76     if (_cause == GCCause::_wb_breakpoint) {
  77       ShenandoahBreakpoint::at_after_marking_started();
  78     }
  79   }
  80 
  81   ~ShenandoahBreakpointMarkScope() {
  82     if (_cause == GCCause::_wb_breakpoint) {
  83       ShenandoahBreakpoint::at_before_marking_completed();
  84     }
  85   }
  86 };
  87 
  88 ShenandoahConcurrentGC::ShenandoahConcurrentGC() :
  89   _mark(),
  90   _degen_point(ShenandoahDegenPoint::_degenerated_unset),
  91   _abbreviated(false) {


  92 }
  93 
  94 ShenandoahGC::ShenandoahDegenPoint ShenandoahConcurrentGC::degen_point() const {
  95   return _degen_point;
  96 }
  97 
  98 void ShenandoahConcurrentGC::cancel() {
  99   ShenandoahConcurrentMark::cancel();
 100 }
 101 
 102 bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) {
 103   ShenandoahHeap* const heap = ShenandoahHeap::heap();

 104   ShenandoahBreakpointGCScope breakpoint_gc_scope(cause);
 105 
 106   // Reset for upcoming marking
 107   entry_reset();
 108 
 109   // Start initial mark under STW
 110   vmop_entry_init_mark();
 111 
 112   {
 113     ShenandoahBreakpointMarkScope breakpoint_mark_scope(cause);










 114     // Concurrent mark roots
 115     entry_mark_roots();
 116     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_outside_cycle)) {
 117       return false;
 118     }
 119 
 120     // Continue concurrent mark
 121     entry_mark();
 122     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_mark)) {
 123       return false;
 124     }
 125   }
 126 
 127   // Complete marking under STW, and start evacuation
 128   vmop_entry_final_mark();
 129 













 130   // Concurrent stack processing
 131   if (heap->is_evacuation_in_progress()) {
 132     entry_thread_roots();
 133   }
 134 
 135   // Process weak roots that might still point to regions that would be broken by cleanup
 136   if (heap->is_concurrent_weak_root_in_progress()) {
 137     entry_weak_refs();
 138     entry_weak_roots();
 139   }
 140 
 141   // Final mark might have reclaimed some immediate garbage, kick cleanup to reclaim
 142   // the space. This would be the last action if there is nothing to evacuate.

 143   entry_cleanup_early();
 144 
 145   {


 146     ShenandoahHeapLocker locker(heap->lock());
 147     heap->free_set()->log_status();
 148   }
 149 
 150   // Perform concurrent class unloading
 151   if (heap->unload_classes() &&
 152       heap->is_concurrent_weak_root_in_progress()) {
 153     entry_class_unloading();
 154   }
 155 
 156   // Processing strong roots
 157   // This may be skipped if there is nothing to update/evacuate.
 158   // If so, strong_root_in_progress would be unset.
 159   if (heap->is_concurrent_strong_root_in_progress()) {
 160     entry_strong_roots();
 161   }
 162 
 163   // Continue the cycle with evacuation and optional update-refs.
 164   // This may be skipped if there is nothing to evacuate.
 165   // If so, evac_in_progress would be unset by collection set preparation code.
 166   if (heap->is_evacuation_in_progress()) {
 167     // Concurrently evacuate
 168     entry_evacuate();
 169     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_evac)) {
 170       return false;
 171     }

 172 

 173     // Perform update-refs phase.
 174     vmop_entry_init_updaterefs();
 175     entry_updaterefs();
 176     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_updaterefs)) {
 177       return false;
 178     }
 179 
 180     // Concurrent update thread roots
 181     entry_update_thread_roots();
 182     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_updaterefs)) {
 183       return false;
 184     }
 185 
 186     vmop_entry_final_updaterefs();
 187 
 188     // Update references freed up collection set, kick the cleanup to reclaim the space.
 189     entry_cleanup_complete();
 190   } else {




 191     vmop_entry_final_roots();
 192     _abbreviated = true;
 193   }
 194 



















 195   return true;
 196 }
 197 
 198 void ShenandoahConcurrentGC::vmop_entry_init_mark() {
 199   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 200   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 201   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_mark_gross);
 202 
 203   heap->try_inject_alloc_failure();
 204   VM_ShenandoahInitMark op(this);
 205   VMThread::execute(&op); // jump to entry_init_mark() under safepoint
 206 }
 207 
 208 void ShenandoahConcurrentGC::vmop_entry_final_mark() {
 209   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 210   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 211   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_mark_gross);
 212 
 213   heap->try_inject_alloc_failure();
 214   VM_ShenandoahFinalMarkStartEvac op(this);

 284   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_update_refs);
 285   EventMark em("%s", msg);
 286 
 287   ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
 288                               ShenandoahWorkerPolicy::calc_workers_for_final_update_ref(),
 289                               "final reference update");
 290 
 291   op_final_updaterefs();
 292 }
 293 
 294 void ShenandoahConcurrentGC::entry_final_roots() {
 295   static const char* msg = "Pause Final Roots";
 296   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_roots);
 297   EventMark em("%s", msg);
 298 
 299   op_final_roots();
 300 }
 301 
 302 void ShenandoahConcurrentGC::entry_reset() {
 303   ShenandoahHeap* const heap = ShenandoahHeap::heap();


 304   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 305   static const char* msg = "Concurrent reset";
 306   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_reset);
 307   EventMark em("%s", msg);







 308 
 309   ShenandoahWorkerScope scope(heap->workers(),
 310                               ShenandoahWorkerPolicy::calc_workers_for_conc_reset(),
 311                               "concurrent reset");




 312 
 313   heap->try_inject_alloc_failure();
 314   op_reset();

















 315 }
 316 
 317 void ShenandoahConcurrentGC::entry_mark_roots() {
 318   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 319   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 320   const char* msg = "Concurrent marking roots";
 321   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_mark_roots);
 322   EventMark em("%s", msg);
 323 
 324   ShenandoahWorkerScope scope(heap->workers(),
 325                               ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
 326                               "concurrent marking roots");
 327 
 328   heap->try_inject_alloc_failure();
 329   op_mark_roots();
 330 }
 331 
 332 void ShenandoahConcurrentGC::entry_mark() {
 333   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 334   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());

 475   op_updaterefs();
 476 }
 477 
 478 void ShenandoahConcurrentGC::entry_cleanup_complete() {
 479   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 480   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 481   static const char* msg = "Concurrent cleanup";
 482   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_cleanup_complete, true /* log_heap_usage */);
 483   EventMark em("%s", msg);
 484 
 485   // This phase does not use workers, no need for setup
 486   heap->try_inject_alloc_failure();
 487   op_cleanup_complete();
 488 }
 489 
 490 void ShenandoahConcurrentGC::op_reset() {
 491   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 492   if (ShenandoahPacing) {
 493     heap->pacer()->setup_for_reset();
 494   }
 495 
 496   heap->prepare_gc();
 497 }
 498 
 499 class ShenandoahInitMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
 500 private:
 501   ShenandoahMarkingContext* const _ctx;
 502 public:
 503   ShenandoahInitMarkUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
 504 
 505   void heap_region_do(ShenandoahHeapRegion* r) {
 506     assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index());
 507     if (r->is_active()) {
 508       // Check if region needs updating its TAMS. We have updated it already during concurrent
 509       // reset, so it is very likely we don't need to do another write here.

 510       if (_ctx->top_at_mark_start(r) != r->top()) {
 511         _ctx->capture_top_at_mark_start(r);
 512       }
 513     } else {
 514       assert(_ctx->top_at_mark_start(r) == r->top(),
 515              "Region " SIZE_FORMAT " should already have correct TAMS", r->index());
 516     }
 517   }
 518 
 519   bool is_thread_safe() { return true; }
 520 };
 521 
 522 void ShenandoahConcurrentGC::start_mark() {
 523   _mark.start_mark();
 524 }
 525 
 526 void ShenandoahConcurrentGC::op_init_mark() {
 527   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 528   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
 529   assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
 530 
 531   assert(heap->marking_context()->is_bitmap_clear(), "need clear marking bitmap");
 532   assert(!heap->marking_context()->is_complete(), "should not be complete");
 533   assert(!heap->has_forwarded_objects(), "No forwarded objects on this path");
 534 






















 535   if (ShenandoahVerify) {
 536     heap->verifier()->verify_before_concmark();
 537   }
 538 
 539   if (VerifyBeforeGC) {
 540     Universe::verify();
 541   }
 542 
 543   heap->set_concurrent_mark_in_progress(true);
 544 
 545   start_mark();
 546 
 547   {





 548     ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states);
 549     ShenandoahInitMarkUpdateRegionStateClosure cl;
 550     heap->parallel_heap_region_iterate(&cl);






 551   }
 552 
 553   // Weak reference processing
 554   ShenandoahReferenceProcessor* rp = heap->ref_processor();
 555   rp->reset_thread_locals();
 556   rp->set_soft_reference_policy(heap->soft_ref_policy()->should_clear_all_soft_refs());
 557 
 558   // Make above changes visible to worker threads
 559   OrderAccess::fence();
 560 
 561   // Arm nmethods for concurrent mark
 562   ShenandoahCodeRoots::arm_nmethods_for_mark();
 563 
 564   ShenandoahStackWatermark::change_epoch_id();
 565   if (ShenandoahPacing) {
 566     heap->pacer()->setup_for_mark();
 567   }
 568 }
 569 
 570 void ShenandoahConcurrentGC::op_mark_roots() {
 571   _mark.mark_concurrent_roots();
 572 }
 573 
 574 void ShenandoahConcurrentGC::op_mark() {
 575   _mark.concurrent_mark();
 576 }
 577 
 578 void ShenandoahConcurrentGC::op_final_mark() {
 579   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 580   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
 581   assert(!heap->has_forwarded_objects(), "No forwarded objects on this path");
 582 
 583   if (ShenandoahVerify) {
 584     heap->verifier()->verify_roots_no_forwarded();
 585   }
 586 
 587   if (!heap->cancelled_gc()) {
 588     _mark.finish_mark();
 589     assert(!heap->cancelled_gc(), "STW mark cannot OOM");
 590 
 591     // Notify JVMTI that the tagmap table will need cleaning.
 592     JvmtiTagMap::set_needs_cleaning();
 593 
 594     heap->prepare_regions_and_collection_set(true /*concurrent*/);

























 595 
 596     // Has to be done after cset selection
 597     heap->prepare_concurrent_roots();
 598 
 599     if (!heap->collection_set()->is_empty()) {
 600       if (ShenandoahVerify) {
 601         heap->verifier()->verify_before_evacuation();
 602       }
 603 
 604       heap->set_evacuation_in_progress(true);
 605       // From here on, we need to update references.
 606       heap->set_has_forwarded_objects(true);
 607 
 608       // Verify before arming for concurrent processing.
 609       // Otherwise, verification can trigger stack processing.
 610       if (ShenandoahVerify) {
 611         heap->verifier()->verify_during_evacuation();
 612       }
 613 
 614       // Arm nmethods/stack for concurrent processing
 615       ShenandoahCodeRoots::arm_nmethods_for_evac();
 616       ShenandoahStackWatermark::change_epoch_id();
 617 
 618       if (ShenandoahPacing) {
 619         heap->pacer()->setup_for_evac();




























 620       }
 621     } else {
 622       if (ShenandoahVerify) {
 623         heap->verifier()->verify_after_concmark();
 624       }
 625 
 626       if (VerifyAfterGC) {
 627         Universe::verify();

































 628       }
 629     }
 630   }
 631 }
 632 
 633 class ShenandoahConcurrentEvacThreadClosure : public ThreadClosure {
 634 private:
 635   OopClosure* const _oops;
 636 
 637 public:
 638   ShenandoahConcurrentEvacThreadClosure(OopClosure* oops);
 639   void do_thread(Thread* thread);
 640 };
 641 
 642 ShenandoahConcurrentEvacThreadClosure::ShenandoahConcurrentEvacThreadClosure(OopClosure* oops) :
 643   _oops(oops) {
 644 }
 645 
 646 void ShenandoahConcurrentEvacThreadClosure::do_thread(Thread* thread) {
 647   JavaThread* const jt = JavaThread::cast(thread);
 648   StackWatermarkSet::finish_processing(jt, _oops, StackWatermarkKind::gc);

 649 }
 650 
 651 class ShenandoahConcurrentEvacUpdateThreadTask : public WorkerTask {
 652 private:
 653   ShenandoahJavaThreadsIterator _java_threads;
 654 
 655 public:
 656   ShenandoahConcurrentEvacUpdateThreadTask(uint n_workers) :
 657     WorkerTask("Shenandoah Evacuate/Update Concurrent Thread Roots"),
 658     _java_threads(ShenandoahPhaseTimings::conc_thread_roots, n_workers) {
 659   }
 660 
 661   void work(uint worker_id) {



 662     // ShenandoahEvacOOMScope has to be setup by ShenandoahContextEvacuateUpdateRootsClosure.
 663     // Otherwise, may deadlock with watermark lock
 664     ShenandoahContextEvacuateUpdateRootsClosure oops_cl;
 665     ShenandoahConcurrentEvacThreadClosure thr_cl(&oops_cl);
 666     _java_threads.threads_do(&thr_cl, worker_id);
 667   }
 668 };
 669 
 670 void ShenandoahConcurrentGC::op_thread_roots() {
 671   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 672   assert(heap->is_evacuation_in_progress(), "Checked by caller");
 673   ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_thread_roots);
 674   ShenandoahConcurrentEvacUpdateThreadTask task(heap->workers()->active_workers());
 675   heap->workers()->run_task(&task);
 676 }
 677 
 678 void ShenandoahConcurrentGC::op_weak_refs() {
 679   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 680   assert(heap->is_concurrent_weak_root_in_progress(), "Only during this phase");
 681   // Concurrent weak refs processing
 682   ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_refs);
 683   if (heap->gc_cause() == GCCause::_wb_breakpoint) {
 684     ShenandoahBreakpoint::at_after_reference_processing_started();
 685   }
 686   heap->ref_processor()->process_references(ShenandoahPhaseTimings::conc_weak_refs, heap->workers(), true /* concurrent */);
 687 }
 688 
 689 class ShenandoahEvacUpdateCleanupOopStorageRootsClosure : public BasicOopIterateClosure {
 690 private:
 691   ShenandoahHeap* const _heap;
 692   ShenandoahMarkingContext* const _mark_context;
 693   bool  _evac_in_progress;
 694   Thread* const _thread;
 695 
 696 public:
 697   ShenandoahEvacUpdateCleanupOopStorageRootsClosure();
 698   void do_oop(oop* p);
 699   void do_oop(narrowOop* p);
 700 };
 701 
 702 ShenandoahEvacUpdateCleanupOopStorageRootsClosure::ShenandoahEvacUpdateCleanupOopStorageRootsClosure() :
 703   _heap(ShenandoahHeap::heap()),
 704   _mark_context(ShenandoahHeap::heap()->marking_context()),
 705   _evac_in_progress(ShenandoahHeap::heap()->is_evacuation_in_progress()),
 706   _thread(Thread::current()) {
 707 }
 708 
 709 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(oop* p) {
 710   const oop obj = RawAccess<>::oop_load(p);
 711   if (!CompressedOops::is_null(obj)) {
 712     if (!_mark_context->is_marked(obj)) {
 713       shenandoah_assert_correct(p, obj);
 714       ShenandoahHeap::atomic_clear_oop(p, obj);







 715     } else if (_evac_in_progress && _heap->in_collection_set(obj)) {
 716       oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
 717       if (resolved == obj) {
 718         resolved = _heap->evacuate_object(obj, _thread);
 719       }
 720       shenandoah_assert_not_in_cset_except(p, resolved, _heap->cancelled_gc());
 721       ShenandoahHeap::atomic_update_oop(resolved, p, obj);
 722     }
 723   }
 724 }
 725 
 726 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(narrowOop* p) {
 727   ShouldNotReachHere();
 728 }
 729 
 730 class ShenandoahIsCLDAliveClosure : public CLDClosure {
 731 public:
 732   void do_cld(ClassLoaderData* cld) {
 733     cld->is_alive();
 734   }

 918   assert(heap->is_concurrent_strong_root_in_progress(), "Checked by caller");
 919   ShenandoahConcurrentRootsEvacUpdateTask task(ShenandoahPhaseTimings::conc_strong_roots);
 920   heap->workers()->run_task(&task);
 921   heap->set_concurrent_strong_root_in_progress(false);
 922 }
 923 
 924 void ShenandoahConcurrentGC::op_cleanup_early() {
 925   ShenandoahHeap::heap()->free_set()->recycle_trash();
 926 }
 927 
 928 void ShenandoahConcurrentGC::op_evacuate() {
 929   ShenandoahHeap::heap()->evacuate_collection_set(true /*concurrent*/);
 930 }
 931 
 932 void ShenandoahConcurrentGC::op_init_updaterefs() {
 933   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 934   heap->set_evacuation_in_progress(false);
 935   heap->set_concurrent_weak_root_in_progress(false);
 936   heap->prepare_update_heap_references(true /*concurrent*/);
 937   heap->set_update_refs_in_progress(true);
 938 


 939   if (ShenandoahPacing) {
 940     heap->pacer()->setup_for_updaterefs();
 941   }
 942 }
 943 
 944 void ShenandoahConcurrentGC::op_updaterefs() {
 945   ShenandoahHeap::heap()->update_heap_references(true /*concurrent*/);
 946 }
 947 
 948 class ShenandoahUpdateThreadClosure : public HandshakeClosure {
 949 private:
 950   ShenandoahUpdateRefsClosure _cl;
 951 public:
 952   ShenandoahUpdateThreadClosure();
 953   void do_thread(Thread* thread);
 954 };
 955 
 956 ShenandoahUpdateThreadClosure::ShenandoahUpdateThreadClosure() :
 957   HandshakeClosure("Shenandoah Update Thread Roots") {
 958 }

 963     ResourceMark rm;
 964     jt->oops_do(&_cl, nullptr);
 965   }
 966 }
 967 
 968 void ShenandoahConcurrentGC::op_update_thread_roots() {
 969   ShenandoahUpdateThreadClosure cl;
 970   Handshake::execute(&cl);
 971 }
 972 
 973 void ShenandoahConcurrentGC::op_final_updaterefs() {
 974   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 975   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
 976   assert(!heap->_update_refs_iterator.has_next(), "Should have finished update references");
 977 
 978   heap->finish_concurrent_roots();
 979 
 980   // Clear cancelled GC, if set. On cancellation path, the block before would handle
 981   // everything.
 982   if (heap->cancelled_gc()) {
 983     heap->clear_cancelled_gc();
 984   }
 985 
 986   // Has to be done before cset is clear
 987   if (ShenandoahVerify) {
 988     heap->verifier()->verify_roots_in_to_space();
 989   }
 990 


















 991   heap->update_heap_region_states(true /*concurrent*/);
 992 
 993   heap->set_update_refs_in_progress(false);
 994   heap->set_has_forwarded_objects(false);
 995 




 996   if (ShenandoahVerify) {
 997     heap->verifier()->verify_after_updaterefs();
 998   }
 999 
1000   if (VerifyAfterGC) {
1001     Universe::verify();
1002   }
1003 
1004   heap->rebuild_free_set(true /*concurrent*/);
1005 }
1006 
1007 void ShenandoahConcurrentGC::op_final_roots() {
1008   ShenandoahHeap::heap()->set_concurrent_weak_root_in_progress(false);


























1009 }
1010 
1011 void ShenandoahConcurrentGC::op_cleanup_complete() {
1012   ShenandoahHeap::heap()->free_set()->recycle_trash();
1013 }
1014 
1015 bool ShenandoahConcurrentGC::check_cancellation_and_abort(ShenandoahDegenPoint point) {
1016   if (ShenandoahHeap::heap()->cancelled_gc()) {
1017     _degen_point = point;
1018     return true;
1019   }
1020   return false;
1021 }
1022 
1023 const char* ShenandoahConcurrentGC::init_mark_event_message() const {
1024   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1025   assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here");
1026   if (heap->unload_classes()) {
1027     return "Pause Init Mark (unload classes)";
1028   } else {
1029     return "Pause Init Mark";
1030   }
1031 }
1032 
1033 const char* ShenandoahConcurrentGC::final_mark_event_message() const {
1034   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1035   assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here");


1036   if (heap->unload_classes()) {
1037     return "Pause Final Mark (unload classes)";
1038   } else {
1039     return "Pause Final Mark";
1040   }
1041 }
1042 
1043 const char* ShenandoahConcurrentGC::conc_mark_event_message() const {
1044   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1045   assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here");

1046   if (heap->unload_classes()) {
1047     return "Concurrent marking (unload classes)";
1048   } else {
1049     return "Concurrent marking";
1050   }
1051 }

   1 /*
   2  * Copyright (c) 2021, 2022, Red Hat, Inc. All rights reserved.
   3  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 
  28 #include "gc/shared/barrierSetNMethod.hpp"
  29 #include "gc/shared/collectorCounters.hpp"
  30 #include "gc/shared/continuationGCSupport.inline.hpp"
  31 #include "gc/shenandoah/shenandoahBreakpoint.hpp"
  32 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  33 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
  34 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  35 #include "gc/shenandoah/shenandoahGeneration.hpp"
  36 #include "gc/shenandoah/shenandoahGenerationalHeap.hpp"
  37 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
  38 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
  39 #include "gc/shenandoah/shenandoahLock.hpp"
  40 #include "gc/shenandoah/shenandoahMark.inline.hpp"
  41 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  42 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  43 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  44 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
  45 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
  46 #include "gc/shenandoah/shenandoahStackWatermark.hpp"
  47 #include "gc/shenandoah/shenandoahUtils.hpp"
  48 #include "gc/shenandoah/shenandoahVerifier.hpp"
  49 #include "gc/shenandoah/shenandoahVMOperations.hpp"
  50 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
  51 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  52 #include "memory/allocation.hpp"
  53 #include "prims/jvmtiTagMap.hpp"
  54 #include "runtime/vmThread.hpp"
  55 #include "utilities/events.hpp"
  56 
  57 // Breakpoint support
  58 class ShenandoahBreakpointGCScope : public StackObj {

  73   }
  74 };
  75 
  76 class ShenandoahBreakpointMarkScope : public StackObj {
  77 private:
  78   const GCCause::Cause _cause;
  79 public:
  80   ShenandoahBreakpointMarkScope(GCCause::Cause cause) : _cause(cause) {
  81     if (_cause == GCCause::_wb_breakpoint) {
  82       ShenandoahBreakpoint::at_after_marking_started();
  83     }
  84   }
  85 
  86   ~ShenandoahBreakpointMarkScope() {
  87     if (_cause == GCCause::_wb_breakpoint) {
  88       ShenandoahBreakpoint::at_before_marking_completed();
  89     }
  90   }
  91 };
  92 
  93 ShenandoahConcurrentGC::ShenandoahConcurrentGC(ShenandoahGeneration* generation, bool do_old_gc_bootstrap) :
  94   _mark(generation),
  95   _degen_point(ShenandoahDegenPoint::_degenerated_unset),
  96   _abbreviated(false),
  97   _do_old_gc_bootstrap(do_old_gc_bootstrap),
  98   _generation(generation) {
  99 }
 100 
 101 ShenandoahGC::ShenandoahDegenPoint ShenandoahConcurrentGC::degen_point() const {
 102   return _degen_point;
 103 }
 104 




 105 bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) {
 106   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 107 
 108   ShenandoahBreakpointGCScope breakpoint_gc_scope(cause);
 109 
 110   // Reset for upcoming marking
 111   entry_reset();
 112 
 113   // Start initial mark under STW
 114   vmop_entry_init_mark();
 115 
 116   {
 117     ShenandoahBreakpointMarkScope breakpoint_mark_scope(cause);
 118 
 119     // Reset task queue stats here, rather than in mark_concurrent_roots,
 120     // because remembered set scan will `push` oops into the queues and
 121     // resetting after this happens will lose those counts.
 122     TASKQUEUE_STATS_ONLY(_mark.task_queues()->reset_taskqueue_stats());
 123 
 124     // Concurrent remembered set scanning
 125     entry_scan_remembered_set();
 126     // TODO: When RS scanning yields, we will need a check_cancellation_and_abort() degeneration point here.
 127 
 128     // Concurrent mark roots
 129     entry_mark_roots();
 130     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_roots)) {
 131       return false;
 132     }
 133 
 134     // Continue concurrent mark
 135     entry_mark();
 136     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_mark)) {
 137       return false;
 138     }
 139   }
 140 
 141   // Complete marking under STW, and start evacuation
 142   vmop_entry_final_mark();
 143 
 144   // If GC was cancelled before final mark, then the safepoint operation will do nothing
 145   // and the concurrent mark will still be in progress. In this case it is safe to resume
 146   // the degenerated cycle from the marking phase. On the other hand, if the GC is cancelled
 147   // after final mark (but before this check), then the final mark safepoint operation
 148   // will have finished the mark (setting concurrent mark in progress to false). Final mark
 149   // will also have setup state (in concurrent stack processing) that will not be safe to
 150   // resume from the marking phase in the degenerated cycle. That is, if the cancellation
 151   // occurred after final mark, we must resume the degenerated cycle after the marking phase.
 152   if (_generation->is_concurrent_mark_in_progress() && check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_mark)) {
 153     assert(!heap->is_concurrent_weak_root_in_progress(), "Weak roots should not be in progress when concurrent mark is in progress");
 154     return false;
 155   }
 156 
 157   // Concurrent stack processing
 158   if (heap->is_evacuation_in_progress()) {
 159     entry_thread_roots();
 160   }
 161 
 162   // Process weak roots that might still point to regions that would be broken by cleanup
 163   if (heap->is_concurrent_weak_root_in_progress()) {
 164     entry_weak_refs();
 165     entry_weak_roots();
 166   }
 167 
 168   // Final mark might have reclaimed some immediate garbage, kick cleanup to reclaim
 169   // the space. This would be the last action if there is nothing to evacuate.  Note that
 170   // we will not age young-gen objects in the case that we skip evacuation.
 171   entry_cleanup_early();
 172 
 173   {
 174     // TODO: Not sure there is value in logging free-set status right here.  Note that whenever the free set is rebuilt,
 175     // it logs the newly rebuilt status.
 176     ShenandoahHeapLocker locker(heap->lock());
 177     heap->free_set()->log_status();
 178   }
 179 
 180   // Perform concurrent class unloading
 181   if (heap->unload_classes() &&
 182       heap->is_concurrent_weak_root_in_progress()) {
 183     entry_class_unloading();
 184   }
 185 
 186   // Processing strong roots
 187   // This may be skipped if there is nothing to update/evacuate.
 188   // If so, strong_root_in_progress would be unset.
 189   if (heap->is_concurrent_strong_root_in_progress()) {
 190     entry_strong_roots();
 191   }
 192 
 193   // Continue the cycle with evacuation and optional update-refs.
 194   // This may be skipped if there is nothing to evacuate.
 195   // If so, evac_in_progress would be unset by collection set preparation code.
 196   if (heap->is_evacuation_in_progress()) {
 197     // Concurrently evacuate
 198     entry_evacuate();
 199     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_evac)) {
 200       return false;
 201     }
 202   }
 203 
 204   if (heap->has_forwarded_objects()) {
 205     // Perform update-refs phase.
 206     vmop_entry_init_updaterefs();
 207     entry_updaterefs();
 208     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_updaterefs)) {
 209       return false;
 210     }
 211 
 212     // Concurrent update thread roots
 213     entry_update_thread_roots();
 214     if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_updaterefs)) {
 215       return false;
 216     }
 217 
 218     vmop_entry_final_updaterefs();
 219 
 220     // Update references freed up collection set, kick the cleanup to reclaim the space.
 221     entry_cleanup_complete();
 222   } else {
 223     // We chose not to evacuate because we found sufficient immediate garbage. Note that we
 224     // do not check for cancellation here because, at this point, the cycle is effectively
 225     // complete. If the cycle has been cancelled here, the control thread will detect it
 226     // on its next iteration and run a degenerated young cycle.
 227     vmop_entry_final_roots();
 228     _abbreviated = true;
 229   }
 230 
 231   // We defer generation resizing actions until after cset regions have been recycled.  We do this even following an
 232   // abbreviated cycle.
 233   if (heap->mode()->is_generational()) {
 234 
 235     ShenandoahGenerationalHeap::TransferResult result;
 236     {
 237       ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::heap();
 238       ShenandoahHeapLocker locker(gen_heap->lock());
 239 
 240       result = gen_heap->balance_generations();
 241       gen_heap->reset_generation_reserves();
 242     }
 243 
 244     LogTarget(Info, gc, ergo) lt;
 245     if (lt.is_enabled()) {
 246       LogStream ls(lt);
 247       result.print_on("Concurrent GC", &ls);
 248     }
 249   }
 250   return true;
 251 }
 252 
 253 void ShenandoahConcurrentGC::vmop_entry_init_mark() {
 254   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 255   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 256   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_mark_gross);
 257 
 258   heap->try_inject_alloc_failure();
 259   VM_ShenandoahInitMark op(this);
 260   VMThread::execute(&op); // jump to entry_init_mark() under safepoint
 261 }
 262 
 263 void ShenandoahConcurrentGC::vmop_entry_final_mark() {
 264   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 265   TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 266   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_mark_gross);
 267 
 268   heap->try_inject_alloc_failure();
 269   VM_ShenandoahFinalMarkStartEvac op(this);

 339   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_update_refs);
 340   EventMark em("%s", msg);
 341 
 342   ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
 343                               ShenandoahWorkerPolicy::calc_workers_for_final_update_ref(),
 344                               "final reference update");
 345 
 346   op_final_updaterefs();
 347 }
 348 
 349 void ShenandoahConcurrentGC::entry_final_roots() {
 350   static const char* msg = "Pause Final Roots";
 351   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_roots);
 352   EventMark em("%s", msg);
 353 
 354   op_final_roots();
 355 }
 356 
 357 void ShenandoahConcurrentGC::entry_reset() {
 358   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 359   heap->try_inject_alloc_failure();
 360 
 361   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 362   {
 363     static const char* msg = "Concurrent reset";
 364     ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_reset);
 365     EventMark em("%s", msg);
 366 
 367     ShenandoahWorkerScope scope(heap->workers(),
 368                                 ShenandoahWorkerPolicy::calc_workers_for_conc_reset(),
 369                                 msg);
 370     op_reset();
 371   }
 372 
 373   if (_do_old_gc_bootstrap) {
 374     static const char* msg = "Concurrent reset (OLD)";
 375     ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_reset_old);
 376     ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
 377                                 ShenandoahWorkerPolicy::calc_workers_for_conc_reset(),
 378                                 msg);
 379     EventMark em("%s", msg);
 380 
 381     heap->old_generation()->prepare_gc();
 382   }
 383 }
 384 
 385 void ShenandoahConcurrentGC::entry_scan_remembered_set() {
 386   if (_generation->is_young()) {
 387     ShenandoahHeap* const heap = ShenandoahHeap::heap();
 388     TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 389     const char* msg = "Concurrent remembered set scanning";
 390     ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::init_scan_rset);
 391     EventMark em("%s", msg);
 392 
 393     ShenandoahWorkerScope scope(heap->workers(),
 394                                 ShenandoahWorkerPolicy::calc_workers_for_rs_scanning(),
 395                                 msg);
 396 
 397     heap->try_inject_alloc_failure();
 398     _generation->scan_remembered_set(true /* is_concurrent */);
 399   }
 400 }
 401 
 402 void ShenandoahConcurrentGC::entry_mark_roots() {
 403   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 404   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 405   const char* msg = "Concurrent marking roots";
 406   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_mark_roots);
 407   EventMark em("%s", msg);
 408 
 409   ShenandoahWorkerScope scope(heap->workers(),
 410                               ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
 411                               "concurrent marking roots");
 412 
 413   heap->try_inject_alloc_failure();
 414   op_mark_roots();
 415 }
 416 
 417 void ShenandoahConcurrentGC::entry_mark() {
 418   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 419   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());

 560   op_updaterefs();
 561 }
 562 
 563 void ShenandoahConcurrentGC::entry_cleanup_complete() {
 564   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 565   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 566   static const char* msg = "Concurrent cleanup";
 567   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_cleanup_complete, true /* log_heap_usage */);
 568   EventMark em("%s", msg);
 569 
 570   // This phase does not use workers, no need for setup
 571   heap->try_inject_alloc_failure();
 572   op_cleanup_complete();
 573 }
 574 
 575 void ShenandoahConcurrentGC::op_reset() {
 576   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 577   if (ShenandoahPacing) {
 578     heap->pacer()->setup_for_reset();
 579   }
 580   _generation->prepare_gc();

 581 }
 582 
 583 class ShenandoahInitMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
 584 private:
 585   ShenandoahMarkingContext* const _ctx;
 586 public:
 587   ShenandoahInitMarkUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
 588 
 589   void heap_region_do(ShenandoahHeapRegion* r) {
 590     assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index());
 591     if (r->is_active()) {
 592       // Check if region needs updating its TAMS. We have updated it already during concurrent
 593       // reset, so it is very likely we don't need to do another write here.  Since most regions
 594       // are not "active", this path is relatively rare.
 595       if (_ctx->top_at_mark_start(r) != r->top()) {
 596         _ctx->capture_top_at_mark_start(r);
 597       }
 598     } else {
 599       assert(_ctx->top_at_mark_start(r) == r->top(),
 600              "Region " SIZE_FORMAT " should already have correct TAMS", r->index());
 601     }
 602   }
 603 
 604   bool is_thread_safe() { return true; }
 605 };
 606 
 607 void ShenandoahConcurrentGC::start_mark() {
 608   _mark.start_mark();
 609 }
 610 
 611 void ShenandoahConcurrentGC::op_init_mark() {
 612   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 613   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
 614   assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
 615 
 616   assert(_generation->is_bitmap_clear(), "need clear marking bitmap");
 617   assert(!_generation->is_mark_complete(), "should not be complete");
 618   assert(!heap->has_forwarded_objects(), "No forwarded objects on this path");
 619 
 620 
 621   if (heap->mode()->is_generational()) {
 622     if (_generation->is_young() || (_generation->is_global() && ShenandoahVerify)) {
 623       // The current implementation of swap_remembered_set() copies the write-card-table
 624       // to the read-card-table. The remembered sets are also swapped for GLOBAL collections
 625       // so that the verifier works with the correct copy of the card table when verifying.
 626       // TODO: This path should not really depend on ShenandoahVerify.
 627       ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_swap_rset);
 628       _generation->swap_remembered_set();
 629     }
 630 
 631     if (_generation->is_global()) {
 632       heap->cancel_old_gc();
 633     } else if (heap->is_concurrent_old_mark_in_progress()) {
 634       // Purge the SATB buffers, transferring any valid, old pointers to the
 635       // old generation mark queue. Any pointers in a young region will be
 636       // abandoned.
 637       ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_transfer_satb);
 638       heap->transfer_old_pointers_from_satb();
 639     }
 640   }
 641 
 642   if (ShenandoahVerify) {
 643     heap->verifier()->verify_before_concmark();
 644   }
 645 
 646   if (VerifyBeforeGC) {
 647     Universe::verify();
 648   }
 649 
 650   _generation->set_concurrent_mark_in_progress(true);
 651 
 652   start_mark();
 653 
 654   if (_do_old_gc_bootstrap) {
 655     // Update region state for both young and old regions
 656     // TODO: We should be able to pull this out of the safepoint for the bootstrap
 657     // cycle. The top of an old region will only move when a GC cycle evacuates
 658     // objects into it. When we start an old cycle, we know that nothing can touch
 659     // the top of old regions.
 660     ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states);
 661     ShenandoahInitMarkUpdateRegionStateClosure cl;
 662     heap->parallel_heap_region_iterate(&cl);
 663     heap->old_generation()->ref_processor()->reset_thread_locals();
 664   } else {
 665     // Update region state for only young regions
 666     ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states);
 667     ShenandoahInitMarkUpdateRegionStateClosure cl;
 668     _generation->parallel_heap_region_iterate(&cl);
 669   }
 670 
 671   // Weak reference processing
 672   ShenandoahReferenceProcessor* rp = _generation->ref_processor();
 673   rp->reset_thread_locals();
 674   rp->set_soft_reference_policy(heap->soft_ref_policy()->should_clear_all_soft_refs());
 675 
 676   // Make above changes visible to worker threads
 677   OrderAccess::fence();
 678 
 679   // Arm nmethods for concurrent mark
 680   ShenandoahCodeRoots::arm_nmethods_for_mark();
 681 
 682   ShenandoahStackWatermark::change_epoch_id();
 683   if (ShenandoahPacing) {
 684     heap->pacer()->setup_for_mark();
 685   }
 686 }
 687 
 688 void ShenandoahConcurrentGC::op_mark_roots() {
 689   _mark.mark_concurrent_roots();
 690 }
 691 
 692 void ShenandoahConcurrentGC::op_mark() {
 693   _mark.concurrent_mark();
 694 }
 695 
 696 void ShenandoahConcurrentGC::op_final_mark() {
 697   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 698   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
 699   assert(!heap->has_forwarded_objects(), "No forwarded objects on this path");
 700 
 701   if (ShenandoahVerify) {
 702     heap->verifier()->verify_roots_no_forwarded();
 703   }
 704 
 705   if (!heap->cancelled_gc()) {
 706     _mark.finish_mark();
 707     assert(!heap->cancelled_gc(), "STW mark cannot OOM");
 708 
 709     // Notify JVMTI that the tagmap table will need cleaning.
 710     JvmtiTagMap::set_needs_cleaning();
 711 
 712     // The collection set is chosen by prepare_regions_and_collection_set().
 713     //
 714     // TODO: Under severe memory overload conditions that can be checked here, we may want to limit
 715     // the inclusion of old-gen candidates within the collection set.  This would allow us to prioritize efforts on
 716     // evacuating young-gen,  This remediation is most appropriate when old-gen availability is very high (so there
 717     // are negligible negative impacts from delaying completion of old-gen evacuation) and when young-gen collections
 718     // are "under duress" (as signalled by very low availability of memory within young-gen, indicating that/ young-gen
 719     // collections are not triggering frequently enough).
 720     _generation->prepare_regions_and_collection_set(true /*concurrent*/);
 721 
 722     // Upon return from prepare_regions_and_collection_set(), certain parameters have been established to govern the
 723     // evacuation efforts that are about to begin.  In particular:
 724     //
 725     // heap->get_promoted_reserve() represents the amount of memory within old-gen's available memory that has
 726     //   been set aside to hold objects promoted from young-gen memory.  This represents an estimated percentage
 727     //   of the live young-gen memory within the collection set.  If there is more data ready to be promoted than
 728     //   can fit within this reserve, the promotion of some objects will be deferred until a subsequent evacuation
 729     //   pass.
 730     //
 731     // heap->get_old_evac_reserve() represents the amount of memory within old-gen's available memory that has been
 732     //  set aside to hold objects evacuated from the old-gen collection set.
 733     //
 734     // heap->get_young_evac_reserve() represents the amount of memory within young-gen's available memory that has
 735     //  been set aside to hold objects evacuated from the young-gen collection set.  Conservatively, this value
 736     //  equals the entire amount of live young-gen memory within the collection set, even though some of this memory
 737     //  will likely be promoted.
 738 
 739     // Has to be done after cset selection
 740     heap->prepare_concurrent_roots();
 741 
 742     if (heap->mode()->is_generational()) {
 743       if (!heap->collection_set()->is_empty() || heap->old_generation()->has_in_place_promotions()) {
 744         // Even if the collection set is empty, we need to do evacuation if there are regions to be promoted in place.
 745         // Concurrent evacuation takes responsibility for registering objects and setting the remembered set cards to dirty.
 746 
 747         LogTarget(Debug, gc, cset) lt;
 748         if (lt.is_enabled()) {
 749           ResourceMark rm;
 750           LogStream ls(lt);
 751           heap->collection_set()->print_on(&ls);
 752         }
 753 
 754         if (ShenandoahVerify) {
 755           heap->verifier()->verify_before_evacuation();
 756         }
 757 
 758         heap->set_evacuation_in_progress(true);
 759 
 760         // Verify before arming for concurrent processing.
 761         // Otherwise, verification can trigger stack processing.
 762         if (ShenandoahVerify) {
 763           heap->verifier()->verify_during_evacuation();
 764         }
 765 
 766         // Generational mode may promote objects in place during the evacuation phase.
 767         // If that is the only reason we are evacuating, we don't need to update references
 768         // and there will be no forwarded objects on the heap.
 769         heap->set_has_forwarded_objects(!heap->collection_set()->is_empty());
 770 
 771         // Arm nmethods/stack for concurrent processing
 772         if (!heap->collection_set()->is_empty()) {
 773           // Iff objects will be evaluated, arm the nmethod barriers. These will be disarmed
 774           // under the same condition (established in prepare_concurrent_roots) after strong
 775           // root evacuation has completed (see op_strong_roots).
 776           ShenandoahCodeRoots::arm_nmethods_for_evac();
 777           ShenandoahStackWatermark::change_epoch_id();
 778         }
 779 
 780         if (ShenandoahPacing) {
 781           heap->pacer()->setup_for_evac();
 782         }
 783       } else {
 784         if (ShenandoahVerify) {
 785           heap->verifier()->verify_after_concmark();
 786         }
 787 
 788         if (VerifyAfterGC) {
 789           Universe::verify();
 790         }
 791       }
 792     } else {
 793       // Not is_generational()
 794       if (!heap->collection_set()->is_empty()) {
 795         LogTarget(Debug, gc, ergo) lt;
 796         if (lt.is_enabled()) {
 797           ResourceMark rm;
 798           LogStream ls(lt);
 799           heap->collection_set()->print_on(&ls);
 800         }
 801 
 802         if (ShenandoahVerify) {
 803           heap->verifier()->verify_before_evacuation();
 804         }
 805 
 806         heap->set_evacuation_in_progress(true);
 807 
 808         // Verify before arming for concurrent processing.
 809         // Otherwise, verification can trigger stack processing.
 810         if (ShenandoahVerify) {
 811           heap->verifier()->verify_during_evacuation();
 812         }
 813 
 814         // From here on, we need to update references.
 815         heap->set_has_forwarded_objects(true);
 816 
 817         // Arm nmethods/stack for concurrent processing
 818         ShenandoahCodeRoots::arm_nmethods_for_evac();
 819         ShenandoahStackWatermark::change_epoch_id();
 820 
 821         if (ShenandoahPacing) {
 822           heap->pacer()->setup_for_evac();
 823         }
 824       } else {
 825         if (ShenandoahVerify) {
 826           heap->verifier()->verify_after_concmark();
 827         }
 828 
 829         if (VerifyAfterGC) {
 830           Universe::verify();
 831         }
 832       }
 833     }
 834   }
 835 }
 836 
 837 class ShenandoahConcurrentEvacThreadClosure : public ThreadClosure {
 838 private:
 839   OopClosure* const _oops;
 840 
 841 public:
 842   ShenandoahConcurrentEvacThreadClosure(OopClosure* oops);
 843   void do_thread(Thread* thread);
 844 };
 845 
 846 ShenandoahConcurrentEvacThreadClosure::ShenandoahConcurrentEvacThreadClosure(OopClosure* oops) :
 847   _oops(oops) {
 848 }
 849 
 850 void ShenandoahConcurrentEvacThreadClosure::do_thread(Thread* thread) {
 851   JavaThread* const jt = JavaThread::cast(thread);
 852   StackWatermarkSet::finish_processing(jt, _oops, StackWatermarkKind::gc);
 853   ShenandoahThreadLocalData::enable_plab_promotions(thread);
 854 }
 855 
 856 class ShenandoahConcurrentEvacUpdateThreadTask : public WorkerTask {
 857 private:
 858   ShenandoahJavaThreadsIterator _java_threads;
 859 
 860 public:
 861   ShenandoahConcurrentEvacUpdateThreadTask(uint n_workers) :
 862     WorkerTask("Shenandoah Evacuate/Update Concurrent Thread Roots"),
 863     _java_threads(ShenandoahPhaseTimings::conc_thread_roots, n_workers) {
 864   }
 865 
 866   void work(uint worker_id) {
 867     Thread* worker_thread = Thread::current();
 868     ShenandoahThreadLocalData::enable_plab_promotions(worker_thread);
 869 
 870     // ShenandoahEvacOOMScope has to be setup by ShenandoahContextEvacuateUpdateRootsClosure.
 871     // Otherwise, may deadlock with watermark lock
 872     ShenandoahContextEvacuateUpdateRootsClosure oops_cl;
 873     ShenandoahConcurrentEvacThreadClosure thr_cl(&oops_cl);
 874     _java_threads.threads_do(&thr_cl, worker_id);
 875   }
 876 };
 877 
 878 void ShenandoahConcurrentGC::op_thread_roots() {
 879   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 880   assert(heap->is_evacuation_in_progress(), "Checked by caller");
 881   ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_thread_roots);
 882   ShenandoahConcurrentEvacUpdateThreadTask task(heap->workers()->active_workers());
 883   heap->workers()->run_task(&task);
 884 }
 885 
 886 void ShenandoahConcurrentGC::op_weak_refs() {
 887   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 888   assert(heap->is_concurrent_weak_root_in_progress(), "Only during this phase");
 889   // Concurrent weak refs processing
 890   ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_refs);
 891   if (heap->gc_cause() == GCCause::_wb_breakpoint) {
 892     ShenandoahBreakpoint::at_after_reference_processing_started();
 893   }
 894   _generation->ref_processor()->process_references(ShenandoahPhaseTimings::conc_weak_refs, heap->workers(), true /* concurrent */);
 895 }
 896 
 897 class ShenandoahEvacUpdateCleanupOopStorageRootsClosure : public BasicOopIterateClosure {
 898 private:
 899   ShenandoahHeap* const _heap;
 900   ShenandoahMarkingContext* const _mark_context;
 901   bool  _evac_in_progress;
 902   Thread* const _thread;
 903 
 904 public:
 905   ShenandoahEvacUpdateCleanupOopStorageRootsClosure();
 906   void do_oop(oop* p);
 907   void do_oop(narrowOop* p);
 908 };
 909 
 910 ShenandoahEvacUpdateCleanupOopStorageRootsClosure::ShenandoahEvacUpdateCleanupOopStorageRootsClosure() :
 911   _heap(ShenandoahHeap::heap()),
 912   _mark_context(ShenandoahHeap::heap()->marking_context()),
 913   _evac_in_progress(ShenandoahHeap::heap()->is_evacuation_in_progress()),
 914   _thread(Thread::current()) {
 915 }
 916 
 917 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(oop* p) {
 918   const oop obj = RawAccess<>::oop_load(p);
 919   if (!CompressedOops::is_null(obj)) {
 920     if (!_mark_context->is_marked(obj)) {
 921       if (_heap->is_in_active_generation(obj)) {
 922         // TODO: This worries me. Here we are asserting that an unmarked from-space object is 'correct'.
 923         // Normally, I would call this a bogus assert, but there seems to be a legitimate use-case for
 924         // accessing from-space objects during class unloading. However, the from-space object may have
 925         // been "filled". We've made no effort to prevent old generation classes being unloaded by young
 926         // gen (and vice-versa).
 927         shenandoah_assert_correct(p, obj);
 928         ShenandoahHeap::atomic_clear_oop(p, obj);
 929       }
 930     } else if (_evac_in_progress && _heap->in_collection_set(obj)) {
 931       oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
 932       if (resolved == obj) {
 933         resolved = _heap->evacuate_object(obj, _thread);
 934       }
 935       shenandoah_assert_not_in_cset_except(p, resolved, _heap->cancelled_gc());
 936       ShenandoahHeap::atomic_update_oop(resolved, p, obj);
 937     }
 938   }
 939 }
 940 
 941 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(narrowOop* p) {
 942   ShouldNotReachHere();
 943 }
 944 
 945 class ShenandoahIsCLDAliveClosure : public CLDClosure {
 946 public:
 947   void do_cld(ClassLoaderData* cld) {
 948     cld->is_alive();
 949   }

1133   assert(heap->is_concurrent_strong_root_in_progress(), "Checked by caller");
1134   ShenandoahConcurrentRootsEvacUpdateTask task(ShenandoahPhaseTimings::conc_strong_roots);
1135   heap->workers()->run_task(&task);
1136   heap->set_concurrent_strong_root_in_progress(false);
1137 }
1138 
1139 void ShenandoahConcurrentGC::op_cleanup_early() {
1140   ShenandoahHeap::heap()->free_set()->recycle_trash();
1141 }
1142 
1143 void ShenandoahConcurrentGC::op_evacuate() {
1144   ShenandoahHeap::heap()->evacuate_collection_set(true /*concurrent*/);
1145 }
1146 
1147 void ShenandoahConcurrentGC::op_init_updaterefs() {
1148   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1149   heap->set_evacuation_in_progress(false);
1150   heap->set_concurrent_weak_root_in_progress(false);
1151   heap->prepare_update_heap_references(true /*concurrent*/);
1152   heap->set_update_refs_in_progress(true);
1153   if (ShenandoahVerify) {
1154     heap->verifier()->verify_before_updaterefs();
1155   }
1156   if (ShenandoahPacing) {
1157     heap->pacer()->setup_for_updaterefs();
1158   }
1159 }
1160 
1161 void ShenandoahConcurrentGC::op_updaterefs() {
1162   ShenandoahHeap::heap()->update_heap_references(true /*concurrent*/);
1163 }
1164 
1165 class ShenandoahUpdateThreadClosure : public HandshakeClosure {
1166 private:
1167   ShenandoahUpdateRefsClosure _cl;
1168 public:
1169   ShenandoahUpdateThreadClosure();
1170   void do_thread(Thread* thread);
1171 };
1172 
1173 ShenandoahUpdateThreadClosure::ShenandoahUpdateThreadClosure() :
1174   HandshakeClosure("Shenandoah Update Thread Roots") {
1175 }

1180     ResourceMark rm;
1181     jt->oops_do(&_cl, nullptr);
1182   }
1183 }
1184 
1185 void ShenandoahConcurrentGC::op_update_thread_roots() {
1186   ShenandoahUpdateThreadClosure cl;
1187   Handshake::execute(&cl);
1188 }
1189 
1190 void ShenandoahConcurrentGC::op_final_updaterefs() {
1191   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1192   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
1193   assert(!heap->_update_refs_iterator.has_next(), "Should have finished update references");
1194 
1195   heap->finish_concurrent_roots();
1196 
1197   // Clear cancelled GC, if set. On cancellation path, the block before would handle
1198   // everything.
1199   if (heap->cancelled_gc()) {
1200     heap->clear_cancelled_gc(true /* clear oom handler */);
1201   }
1202 
1203   // Has to be done before cset is clear
1204   if (ShenandoahVerify) {
1205     heap->verifier()->verify_roots_in_to_space();
1206   }
1207 
1208   if (heap->mode()->is_generational() && heap->is_concurrent_old_mark_in_progress()) {
1209     // When the SATB barrier is left on to support concurrent old gen mark, it may pick up writes to
1210     // objects in the collection set. After those objects are evacuated, the pointers in the
1211     // SATB are no longer safe. Once we have finished update references, we are guaranteed that
1212     // no more writes to the collection set are possible.
1213     //
1214     // This will transfer any old pointers in _active_ regions from the SATB to the old gen
1215     // mark queues. All other pointers will be discarded. This would also discard any pointers
1216     // in old regions that were included in a mixed evacuation. We aren't using the SATB filter
1217     // methods here because we cannot control when they execute. If the SATB filter runs _after_
1218     // a region has been recycled, we will not be able to detect the bad pointer.
1219     //
1220     // We are not concerned about skipping this step in abbreviated cycles because regions
1221     // with no live objects cannot have been written to and so cannot have entries in the SATB
1222     // buffers.
1223     heap->transfer_old_pointers_from_satb();
1224   }
1225 
1226   heap->update_heap_region_states(true /*concurrent*/);
1227 
1228   heap->set_update_refs_in_progress(false);
1229   heap->set_has_forwarded_objects(false);
1230 
1231   // Aging_cycle is only relevant during evacuation cycle for individual objects and during final mark for
1232   // entire regions.  Both of these relevant operations occur before final update refs.
1233   heap->set_aging_cycle(false);
1234 
1235   if (ShenandoahVerify) {
1236     heap->verifier()->verify_after_updaterefs();
1237   }
1238 
1239   if (VerifyAfterGC) {
1240     Universe::verify();
1241   }
1242 
1243   heap->rebuild_free_set(true /*concurrent*/);
1244 }
1245 
1246 void ShenandoahConcurrentGC::op_final_roots() {
1247 
1248   ShenandoahHeap *heap = ShenandoahHeap::heap();
1249   heap->set_concurrent_weak_root_in_progress(false);
1250   heap->set_evacuation_in_progress(false);
1251 
1252   if (heap->mode()->is_generational()) {
1253     // If the cycle was shortened for having enough immediate garbage, this could be
1254     // the last GC safepoint before concurrent marking of old resumes. We must be sure
1255     // that old mark threads don't see any pointers to garbage in the SATB buffers.
1256     if (heap->is_concurrent_old_mark_in_progress()) {
1257       heap->transfer_old_pointers_from_satb();
1258     }
1259 
1260     ShenandoahMarkingContext *ctx = heap->complete_marking_context();
1261     for (size_t i = 0; i < heap->num_regions(); i++) {
1262       ShenandoahHeapRegion *r = heap->get_region(i);
1263       if (r->is_active() && r->is_young()) {
1264         HeapWord* tams = ctx->top_at_mark_start(r);
1265         HeapWord* top = r->top();
1266         if (top > tams) {
1267           r->reset_age();
1268         } else if (heap->is_aging_cycle()) {
1269           r->increment_age();
1270         }
1271       }
1272     }
1273   }
1274 }
1275 
1276 void ShenandoahConcurrentGC::op_cleanup_complete() {
1277   ShenandoahHeap::heap()->free_set()->recycle_trash();
1278 }
1279 
1280 bool ShenandoahConcurrentGC::check_cancellation_and_abort(ShenandoahDegenPoint point) {
1281   if (ShenandoahHeap::heap()->cancelled_gc()) {
1282     _degen_point = point;
1283     return true;
1284   }
1285   return false;
1286 }
1287 
1288 const char* ShenandoahConcurrentGC::init_mark_event_message() const {
1289   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1290   assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here");
1291   if (heap->unload_classes()) {
1292     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Init Mark", " (unload classes)");
1293   } else {
1294     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Init Mark", "");
1295   }
1296 }
1297 
1298 const char* ShenandoahConcurrentGC::final_mark_event_message() const {
1299   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1300   assert(!heap->has_forwarded_objects() || heap->is_concurrent_old_mark_in_progress(),
1301          "Should not have forwarded objects during final mark, unless old gen concurrent mark is running");
1302 
1303   if (heap->unload_classes()) {
1304     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Final Mark", " (unload classes)");
1305   } else {
1306     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Final Mark", "");
1307   }
1308 }
1309 
1310 const char* ShenandoahConcurrentGC::conc_mark_event_message() const {
1311   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1312   assert(!heap->has_forwarded_objects() || heap->is_concurrent_old_mark_in_progress(),
1313          "Should not have forwarded objects concurrent mark, unless old gen concurrent mark is running");
1314   if (heap->unload_classes()) {
1315     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent marking", " (unload classes)");
1316   } else {
1317     SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent marking", "");
1318   }
1319 }
< prev index next >