58 shenandoah_assert_safepoint();
59 _epoch_id++;
60 }
61
62 ShenandoahStackWatermark::ShenandoahStackWatermark(JavaThread* jt) :
63 StackWatermark(jt, StackWatermarkKind::gc, _epoch_id),
64 _heap(ShenandoahHeap::heap()),
65 _stats(),
66 _keep_alive_cl(),
67 _evac_update_oop_cl(),
68 _cb_cl() {}
69
70 OopClosure* ShenandoahStackWatermark::closure_from_context(void* context) {
71 if (context != NULL) {
72 assert(_heap->is_concurrent_weak_root_in_progress() ||
73 _heap->is_concurrent_mark_in_progress(),
74 "Only these two phases");
75 assert(Thread::current()->is_Worker_thread(), "Unexpected thread passing in context: " PTR_FORMAT, p2i(context));
76 return reinterpret_cast<OopClosure*>(context);
77 } else {
78 if (_heap->is_concurrent_mark_in_progress()) {
79 return &_keep_alive_cl;
80 } else if (_heap->is_concurrent_weak_root_in_progress()) {
81 assert(_heap->is_evacuation_in_progress(), "Nothing to evacuate");
82 return &_evac_update_oop_cl;
83 } else {
84 ShouldNotReachHere();
85 return NULL;
86 }
87 }
88 }
89
90 void ShenandoahStackWatermark::start_processing_impl(void* context) {
91 NoSafepointVerifier nsv;
92 ShenandoahHeap* const heap = ShenandoahHeap::heap();
93
94 // Process the non-frame part of the thread
95 if (heap->is_concurrent_mark_in_progress()) {
96 // We need to reset all TLABs because they might be below the TAMS, and we need to mark
97 // the objects in them. Do not let mutators allocate any new objects in their current TLABs.
98 // It is also a good place to resize the TLAB sizes for future allocations.
99 retire_tlab();
100
101 _jt->oops_do_no_frames(closure_from_context(context), &_cb_cl);
102 } else if (heap->is_concurrent_weak_root_in_progress()) {
103 assert(heap->is_evacuation_in_progress(), "Should not be armed");
104 // Retire the TLABs, which will force threads to reacquire their TLABs.
105 // This is needed for two reasons. Strong one: new allocations would be with new freeset,
106 // which would be outside the collection set, so no cset writes would happen there.
107 // Weaker one: new allocations would happen past update watermark, and so less work would
108 // be needed for reference updates (would update the large filler instead).
109 retire_tlab();
110
111 _jt->oops_do_no_frames(closure_from_context(context), &_cb_cl);
112 } else {
113 ShouldNotReachHere();
114 }
115
116 // Publishes the processing start to concurrent threads
117 StackWatermark::start_processing_impl(context);
118 }
119
120 void ShenandoahStackWatermark::retire_tlab() {
121 // Retire TLAB
122 if (UseTLAB) {
123 _stats.reset();
124 _jt->tlab().retire(&_stats);
125 if (ResizeTLAB) {
126 _jt->tlab().resize();
127 }
128 }
129 }
130
|
58 shenandoah_assert_safepoint();
59 _epoch_id++;
60 }
61
62 ShenandoahStackWatermark::ShenandoahStackWatermark(JavaThread* jt) :
63 StackWatermark(jt, StackWatermarkKind::gc, _epoch_id),
64 _heap(ShenandoahHeap::heap()),
65 _stats(),
66 _keep_alive_cl(),
67 _evac_update_oop_cl(),
68 _cb_cl() {}
69
70 OopClosure* ShenandoahStackWatermark::closure_from_context(void* context) {
71 if (context != NULL) {
72 assert(_heap->is_concurrent_weak_root_in_progress() ||
73 _heap->is_concurrent_mark_in_progress(),
74 "Only these two phases");
75 assert(Thread::current()->is_Worker_thread(), "Unexpected thread passing in context: " PTR_FORMAT, p2i(context));
76 return reinterpret_cast<OopClosure*>(context);
77 } else {
78 if (_heap->is_concurrent_weak_root_in_progress()) {
79 assert(_heap->is_evacuation_in_progress(), "Nothing to evacuate");
80 return &_evac_update_oop_cl;
81 } else if (_heap->is_concurrent_mark_in_progress()) {
82 return &_keep_alive_cl;
83 } else {
84 ShouldNotReachHere();
85 return NULL;
86 }
87 }
88 }
89
90 void ShenandoahStackWatermark::start_processing_impl(void* context) {
91 NoSafepointVerifier nsv;
92 ShenandoahHeap* const heap = ShenandoahHeap::heap();
93
94 // Process the non-frame part of the thread
95 if (heap->is_concurrent_weak_root_in_progress()) {
96 assert(heap->is_evacuation_in_progress(), "Should not be armed");
97 // Retire the TLABs, which will force threads to reacquire their TLABs.
98 // This is needed for two reasons. Strong one: new allocations would be with new freeset,
99 // which would be outside the collection set, so no cset writes would happen there.
100 // Weaker one: new allocations would happen past update watermark, and so less work would
101 // be needed for reference updates (would update the large filler instead).
102 retire_tlab();
103
104 _jt->oops_do_no_frames(closure_from_context(context), &_cb_cl);
105 } else if (heap->is_concurrent_mark_in_progress()) {
106 // We need to reset all TLABs because they might be below the TAMS, and we need to mark
107 // the objects in them. Do not let mutators allocate any new objects in their current TLABs.
108 // It is also a good place to resize the TLAB sizes for future allocations.
109 retire_tlab();
110
111 _jt->oops_do_no_frames(closure_from_context(context), &_cb_cl);
112 } else {
113 ShouldNotReachHere();
114 }
115
116 // Publishes the processing start to concurrent threads
117 StackWatermark::start_processing_impl(context);
118 }
119
120 void ShenandoahStackWatermark::retire_tlab() {
121 // Retire TLAB
122 if (UseTLAB) {
123 _stats.reset();
124 _jt->tlab().retire(&_stats);
125 if (ResizeTLAB) {
126 _jt->tlab().resize();
127 }
128 }
129 }
130
|