< prev index next >

src/hotspot/share/gc/shenandoah/shenandoahStackWatermark.cpp

Print this page

 44   assert(result, "NMethod on-stack must be alive");
 45 }
 46 
 47 ThreadLocalAllocStats& ShenandoahStackWatermark::stats() {
 48   return _stats;
 49 }
 50 
 51 uint32_t ShenandoahStackWatermark::epoch_id() const {
 52   return _epoch_id;
 53 }
 54 
 55 void ShenandoahStackWatermark::change_epoch_id() {
 56   shenandoah_assert_safepoint();
 57   _epoch_id++;
 58 }
 59 
 60 ShenandoahStackWatermark::ShenandoahStackWatermark(JavaThread* jt) :
 61   StackWatermark(jt, StackWatermarkKind::gc, _epoch_id),
 62   _heap(ShenandoahHeap::heap()),
 63   _stats(),

 64   _keep_alive_cl(),
 65   _evac_update_oop_cl(),
 66   _nm_cl() {}
 67 
 68 OopClosure* ShenandoahStackWatermark::closure_from_context(void* context) {
 69   if (context != nullptr) {
 70     assert(_heap->is_concurrent_weak_root_in_progress() ||
 71            _heap->is_concurrent_mark_in_progress(),
 72            "Only these two phases");
 73     assert(Thread::current()->is_Worker_thread(), "Unexpected thread passing in context: " PTR_FORMAT, p2i(context));
 74     return reinterpret_cast<OopClosure*>(context);
 75   } else {
 76     if (_heap->is_concurrent_weak_root_in_progress()) {
 77       assert(_heap->is_evacuation_in_progress(), "Nothing to evacuate");
 78       return &_evac_update_oop_cl;
 79     } else if (_heap->is_concurrent_mark_in_progress()) {
 80       return &_keep_alive_cl;


 81     } else {
 82       ShouldNotReachHere();
 83       return nullptr;
 84     }
 85   }
 86 }
 87 
 88 void ShenandoahStackWatermark::start_processing_impl(void* context) {
 89   NoSafepointVerifier nsv;
 90   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 91 
 92   // Process the non-frame part of the thread
 93   if (heap->is_concurrent_weak_root_in_progress()) {
 94     assert(heap->is_evacuation_in_progress(), "Should not be armed");
 95     // Retire the TLABs, which will force threads to reacquire their TLABs.
 96     // This is needed for two reasons. Strong one: new allocations would be with new freeset,
 97     // which would be outside the collection set, so no cset writes would happen there.
 98     // Weaker one: new allocations would happen past update watermark, and so less work would
 99     // be needed for reference updates (would update the large filler instead).
100     retire_tlab();
101 
102     _jt->oops_do_no_frames(closure_from_context(context), &_nm_cl);
103   } else if (heap->is_concurrent_mark_in_progress()) {
104     // We need to reset all TLABs because they might be below the TAMS, and we need to mark
105     // the objects in them. Do not let mutators allocate any new objects in their current TLABs.
106     // It is also a good place to resize the TLAB sizes for future allocations.
107     retire_tlab();
108 
109     _jt->oops_do_no_frames(closure_from_context(context), &_nm_cl);
110   } else {
111     ShouldNotReachHere();
112   }
113 



114   // Publishes the processing start to concurrent threads
115   StackWatermark::start_processing_impl(context);
116 }
117 
118 void ShenandoahStackWatermark::retire_tlab() {
119   // Retire TLAB
120   if (UseTLAB) {
121     _stats.reset();
122     _jt->retire_tlab(&_stats);
123     if (ResizeTLAB) {
124       _jt->tlab().resize();
125     }
126   }
127 }
128 
129 void ShenandoahStackWatermark::process(const frame& fr, RegisterMap& register_map, void* context) {
130   OopClosure* oops = closure_from_context(context);
131   assert(oops != nullptr, "Should not get to here");
132   ShenandoahHeap* const heap = ShenandoahHeap::heap();
133   assert((heap->is_concurrent_weak_root_in_progress() && heap->is_evacuation_in_progress()) ||
134          heap->is_concurrent_mark_in_progress(),
135          "Only these two phases");
136   fr.oops_do(oops, &_nm_cl, &register_map, DerivedPointerIterationMode::_directly);
137 }

 44   assert(result, "NMethod on-stack must be alive");
 45 }
 46 
 47 ThreadLocalAllocStats& ShenandoahStackWatermark::stats() {
 48   return _stats;
 49 }
 50 
 51 uint32_t ShenandoahStackWatermark::epoch_id() const {
 52   return _epoch_id;
 53 }
 54 
 55 void ShenandoahStackWatermark::change_epoch_id() {
 56   shenandoah_assert_safepoint();
 57   _epoch_id++;
 58 }
 59 
 60 ShenandoahStackWatermark::ShenandoahStackWatermark(JavaThread* jt) :
 61   StackWatermark(jt, StackWatermarkKind::gc, _epoch_id),
 62   _heap(ShenandoahHeap::heap()),
 63   _stats(),
 64   _no_op_cl(),
 65   _keep_alive_cl(),
 66   _evac_update_oop_cl(),
 67   _nm_cl() {}
 68 
 69 OopClosure* ShenandoahStackWatermark::closure_from_context(void* context) {
 70   if (context != nullptr) {
 71     assert(_heap->is_concurrent_weak_root_in_progress() ||
 72            _heap->is_concurrent_mark_in_progress(),
 73            "Only these two phases");
 74     assert(Thread::current()->is_Worker_thread(), "Unexpected thread passing in context: " PTR_FORMAT, p2i(context));
 75     return reinterpret_cast<OopClosure*>(context);
 76   } else {
 77     if (_heap->is_concurrent_weak_root_in_progress() && _heap->is_evacuation_in_progress()) {

 78       return &_evac_update_oop_cl;
 79     } else if (_heap->is_concurrent_mark_in_progress()) {
 80       return &_keep_alive_cl;
 81     } else if (ShenandoahGCStateCheckHotpatch) {
 82       return &_no_op_cl;
 83     } else {
 84       ShouldNotReachHere();
 85       return nullptr;
 86     }
 87   }
 88 }
 89 
 90 void ShenandoahStackWatermark::start_processing_impl(void* context) {
 91   NoSafepointVerifier nsv;

 92 
 93   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 94   if (_heap->is_concurrent_weak_root_in_progress() && heap->is_evacuation_in_progress()) {

 95     // Retire the TLABs, which will force threads to reacquire their TLABs.
 96     // This is needed for two reasons. Strong one: new allocations would be with new freeset,
 97     // which would be outside the collection set, so no cset writes would happen there.
 98     // Weaker one: new allocations would happen past update watermark, and so less work would
 99     // be needed for reference updates (would update the large filler instead).
100     retire_tlab();


101   } else if (heap->is_concurrent_mark_in_progress()) {
102     // We need to reset all TLABs because they might be below the TAMS, and we need to mark
103     // the objects in them. Do not let mutators allocate any new objects in their current TLABs.
104     // It is also a good place to resize the TLAB sizes for future allocations.
105     retire_tlab();
106   } else if (ShenandoahGCStateCheckHotpatch) {
107     // Can be here for updating barriers. No TLAB retirement is needed.
108   } else {
109     ShouldNotReachHere();
110   }
111 
112   // Process the non-frame part of the thread
113   _jt->oops_do_no_frames(closure_from_context(context), &_nm_cl);
114 
115   // Publishes the processing start to concurrent threads
116   StackWatermark::start_processing_impl(context);
117 }
118 
119 void ShenandoahStackWatermark::retire_tlab() {
120   // Retire TLAB
121   if (UseTLAB) {
122     _stats.reset();
123     _jt->retire_tlab(&_stats);
124     if (ResizeTLAB) {
125       _jt->tlab().resize();
126     }
127   }
128 }
129 
130 void ShenandoahStackWatermark::process(const frame& fr, RegisterMap& register_map, void* context) {
131   OopClosure* oops = closure_from_context(context);
132   assert(oops != nullptr, "Should not get to here");
133   ShenandoahHeap* const heap = ShenandoahHeap::heap();
134   assert((heap->is_concurrent_weak_root_in_progress() && heap->is_evacuation_in_progress()) ||
135          heap->is_concurrent_mark_in_progress() || ShenandoahGCStateCheckHotpatch,
136          "Only these phases");
137   fr.oops_do(oops, &_nm_cl, &register_map, DerivedPointerIterationMode::_directly);
138 }
< prev index next >