< prev index next >

src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp

Print this page

 721   // Make above changes visible to worker threads
 722   OrderAccess::fence();
 723 
 724   // Arm nmethods for concurrent mark
 725   ShenandoahCodeRoots::arm_nmethods_for_mark();
 726 
 727   ShenandoahStackWatermark::change_epoch_id();
 728 
 729   {
 730     ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_propagate_gc_state);
 731     heap->propagate_gc_state_to_all_threads();
 732   }
 733 }
 734 
 735 void ShenandoahConcurrentGC::op_mark_roots() {
 736   _mark.mark_concurrent_roots();
 737 }
 738 
 739 void ShenandoahConcurrentGC::op_mark() {
 740   _mark.concurrent_mark();



 741 }
 742 
 743 void ShenandoahConcurrentGC::op_final_mark() {
 744   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 745   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
 746   assert(!heap->has_forwarded_objects(), "No forwarded objects on this path");
 747 
 748   if (ShenandoahVerify) {
 749     heap->verifier()->verify_roots_no_forwarded(_generation);
 750   }
 751 
 752   if (!heap->cancelled_gc()) {
 753     _mark.finish_mark();
 754     assert(!heap->cancelled_gc(), "STW mark cannot OOM");
 755 
 756     // Notify JVMTI that the tagmap table will need cleaning.
 757     JvmtiTagMap::set_needs_cleaning();
 758 
 759     // The collection set is chosen by prepare_regions_and_collection_set(). Additionally, certain parameters have been
 760     // established to govern the evacuation efforts that are about to begin.  Refer to comments on reserve members in

1075   }
1076 };
1077 
1078 void ShenandoahConcurrentGC::op_strong_roots() {
1079   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1080   assert(heap->is_concurrent_strong_root_in_progress(), "Checked by caller");
1081   ShenandoahConcurrentRootsEvacUpdateTask task(ShenandoahPhaseTimings::conc_strong_roots);
1082   heap->workers()->run_task(&task);
1083   heap->set_concurrent_strong_root_in_progress(false);
1084 }
1085 
1086 void ShenandoahConcurrentGC::op_cleanup_early() {
1087   ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
1088                               ShenandoahWorkerPolicy::calc_workers_for_conc_cleanup(),
1089                               "cleanup early.");
1090   ShenandoahHeap::heap()->recycle_trash();
1091 }
1092 
1093 void ShenandoahConcurrentGC::op_evacuate() {
1094   ShenandoahHeap::heap()->evacuate_collection_set(_generation, true /*concurrent*/);



1095 }
1096 
1097 void ShenandoahConcurrentGC::op_init_update_refs() {
1098   if (ShenandoahVerify) {
1099     ShenandoahHeap* const heap = ShenandoahHeap::heap();
1100     ShenandoahTimingsTracker v(ShenandoahPhaseTimings::init_update_refs_verify);
1101     heap->verifier()->verify_before_update_refs(_generation);
1102   }
1103 }
1104 
1105 void ShenandoahConcurrentGC::op_update_refs() {
1106   ShenandoahHeap::heap()->update_heap_references(_generation, true /*concurrent*/);



1107 }
1108 
1109 class ShenandoahUpdateThreadHandshakeClosure : public HandshakeClosure {
1110 private:
1111   // This closure runs when thread is stopped for handshake, which means
1112   // we can use non-concurrent closure here, as long as it only updates
1113   // locations modified by the thread itself, i.e. stack locations.
1114   ShenandoahNonConcUpdateRefsClosure _cl;
1115 public:
1116   ShenandoahUpdateThreadHandshakeClosure();
1117   void do_thread(Thread* thread) override;
1118 };
1119 
1120 ShenandoahUpdateThreadHandshakeClosure::ShenandoahUpdateThreadHandshakeClosure() :
1121   HandshakeClosure("Shenandoah Update Thread Roots") {
1122 }
1123 
1124 void ShenandoahUpdateThreadHandshakeClosure::do_thread(Thread* thread) {
1125   if (thread->is_Java_thread()) {
1126     JavaThread* jt = JavaThread::cast(thread);

 721   // Make above changes visible to worker threads
 722   OrderAccess::fence();
 723 
 724   // Arm nmethods for concurrent mark
 725   ShenandoahCodeRoots::arm_nmethods_for_mark();
 726 
 727   ShenandoahStackWatermark::change_epoch_id();
 728 
 729   {
 730     ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_propagate_gc_state);
 731     heap->propagate_gc_state_to_all_threads();
 732   }
 733 }
 734 
 735 void ShenandoahConcurrentGC::op_mark_roots() {
 736   _mark.mark_concurrent_roots();
 737 }
 738 
 739 void ShenandoahConcurrentGC::op_mark() {
 740   _mark.concurrent_mark();
 741   if (ShenandoahDelayGC > 0) {
 742     os::naked_sleep(ShenandoahDelayGC);
 743   }
 744 }
 745 
 746 void ShenandoahConcurrentGC::op_final_mark() {
 747   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 748   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
 749   assert(!heap->has_forwarded_objects(), "No forwarded objects on this path");
 750 
 751   if (ShenandoahVerify) {
 752     heap->verifier()->verify_roots_no_forwarded(_generation);
 753   }
 754 
 755   if (!heap->cancelled_gc()) {
 756     _mark.finish_mark();
 757     assert(!heap->cancelled_gc(), "STW mark cannot OOM");
 758 
 759     // Notify JVMTI that the tagmap table will need cleaning.
 760     JvmtiTagMap::set_needs_cleaning();
 761 
 762     // The collection set is chosen by prepare_regions_and_collection_set(). Additionally, certain parameters have been
 763     // established to govern the evacuation efforts that are about to begin.  Refer to comments on reserve members in

1078   }
1079 };
1080 
1081 void ShenandoahConcurrentGC::op_strong_roots() {
1082   ShenandoahHeap* const heap = ShenandoahHeap::heap();
1083   assert(heap->is_concurrent_strong_root_in_progress(), "Checked by caller");
1084   ShenandoahConcurrentRootsEvacUpdateTask task(ShenandoahPhaseTimings::conc_strong_roots);
1085   heap->workers()->run_task(&task);
1086   heap->set_concurrent_strong_root_in_progress(false);
1087 }
1088 
1089 void ShenandoahConcurrentGC::op_cleanup_early() {
1090   ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
1091                               ShenandoahWorkerPolicy::calc_workers_for_conc_cleanup(),
1092                               "cleanup early.");
1093   ShenandoahHeap::heap()->recycle_trash();
1094 }
1095 
1096 void ShenandoahConcurrentGC::op_evacuate() {
1097   ShenandoahHeap::heap()->evacuate_collection_set(_generation, true /*concurrent*/);
1098   if (ShenandoahDelayGC > 0) {
1099     os::naked_sleep(ShenandoahDelayGC);
1100   }
1101 }
1102 
1103 void ShenandoahConcurrentGC::op_init_update_refs() {
1104   if (ShenandoahVerify) {
1105     ShenandoahHeap* const heap = ShenandoahHeap::heap();
1106     ShenandoahTimingsTracker v(ShenandoahPhaseTimings::init_update_refs_verify);
1107     heap->verifier()->verify_before_update_refs(_generation);
1108   }
1109 }
1110 
1111 void ShenandoahConcurrentGC::op_update_refs() {
1112   ShenandoahHeap::heap()->update_heap_references(_generation, true /*concurrent*/);
1113   if (ShenandoahDelayGC > 0) {
1114     os::naked_sleep(ShenandoahDelayGC);
1115   }
1116 }
1117 
1118 class ShenandoahUpdateThreadHandshakeClosure : public HandshakeClosure {
1119 private:
1120   // This closure runs when thread is stopped for handshake, which means
1121   // we can use non-concurrent closure here, as long as it only updates
1122   // locations modified by the thread itself, i.e. stack locations.
1123   ShenandoahNonConcUpdateRefsClosure _cl;
1124 public:
1125   ShenandoahUpdateThreadHandshakeClosure();
1126   void do_thread(Thread* thread) override;
1127 };
1128 
1129 ShenandoahUpdateThreadHandshakeClosure::ShenandoahUpdateThreadHandshakeClosure() :
1130   HandshakeClosure("Shenandoah Update Thread Roots") {
1131 }
1132 
1133 void ShenandoahUpdateThreadHandshakeClosure::do_thread(Thread* thread) {
1134   if (thread->is_Java_thread()) {
1135     JavaThread* jt = JavaThread::cast(thread);
< prev index next >