10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25
26 #include "precompiled.hpp"
27
28 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
29 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
30 #include "gc/shenandoah/shenandoahMark.inline.hpp"
31 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
32 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
33 #include "gc/shenandoah/shenandoahTaskqueue.inline.hpp"
34 #include "gc/shenandoah/shenandoahUtils.hpp"
35 #include "gc/shenandoah/shenandoahVerifier.hpp"
36
37 ShenandoahMarkRefsSuperClosure::ShenandoahMarkRefsSuperClosure(ShenandoahObjToScanQueue* q, ShenandoahReferenceProcessor* rp) :
38 MetadataVisitingOopIterateClosure(rp),
39 _queue(q),
40 _mark_context(ShenandoahHeap::heap()->marking_context()),
41 _weak(false)
42 { }
43
44 ShenandoahMark::ShenandoahMark() :
45 _task_queues(ShenandoahHeap::heap()->marking_context()->task_queues()) {
46 }
47
48 void ShenandoahMark::clear() {
49 // Clean up marking stacks.
50 ShenandoahObjToScanQueueSet* queues = ShenandoahHeap::heap()->marking_context()->task_queues();
51 queues->clear();
52
53 // Cancel SATB buffers.
54 ShenandoahBarrierSet::satb_mark_queue_set().abandon_partial_marking();
55 }
56
57 template <bool CANCELLABLE, StringDedupMode STRING_DEDUP>
58 void ShenandoahMark::mark_loop_prework(uint w, TaskTerminator *t, ShenandoahReferenceProcessor *rp, StringDedup::Requests* const req) {
59 ShenandoahObjToScanQueue* q = get_queue(w);
60
61 ShenandoahHeap* const heap = ShenandoahHeap::heap();
62 ShenandoahLiveData* ld = heap->get_liveness_cache(w);
63
64 // TODO: We can clean up this if we figure out how to do templated oop closures that
65 // play nice with specialized_oop_iterators.
66 if (heap->unload_classes()) {
67 if (heap->has_forwarded_objects()) {
68 using Closure = ShenandoahMarkUpdateRefsMetadataClosure;
69 Closure cl(q, rp);
70 mark_loop_work<Closure, CANCELLABLE, STRING_DEDUP>(&cl, ld, w, t, req);
71 } else {
72 using Closure = ShenandoahMarkRefsMetadataClosure;
73 Closure cl(q, rp);
74 mark_loop_work<Closure, CANCELLABLE, STRING_DEDUP>(&cl, ld, w, t, req);
75 }
76 } else {
77 if (heap->has_forwarded_objects()) {
78 using Closure = ShenandoahMarkUpdateRefsClosure;
79 Closure cl(q, rp);
80 mark_loop_work<Closure, CANCELLABLE, STRING_DEDUP>(&cl, ld, w, t, req);
81 } else {
82 using Closure = ShenandoahMarkRefsClosure;
83 Closure cl(q, rp);
84 mark_loop_work<Closure, CANCELLABLE, STRING_DEDUP>(&cl, ld, w, t, req);
85 }
86 }
87
88 heap->flush_liveness_cache(w);
89 }
90
91 void ShenandoahMark::mark_loop(uint worker_id, TaskTerminator* terminator, ShenandoahReferenceProcessor *rp,
92 bool cancellable, StringDedupMode dedup_mode, StringDedup::Requests* const req) {
93 if (cancellable) {
94 switch(dedup_mode) {
95 case NO_DEDUP:
96 mark_loop_prework<true, NO_DEDUP>(worker_id, terminator, rp, req);
97 break;
98 case ENQUEUE_DEDUP:
99 mark_loop_prework<true, ENQUEUE_DEDUP>(worker_id, terminator, rp, req);
100 break;
101 case ALWAYS_DEDUP:
102 mark_loop_prework<true, ALWAYS_DEDUP>(worker_id, terminator, rp, req);
103 break;
104 }
105 } else {
106 switch(dedup_mode) {
107 case NO_DEDUP:
108 mark_loop_prework<false, NO_DEDUP>(worker_id, terminator, rp, req);
109 break;
110 case ENQUEUE_DEDUP:
111 mark_loop_prework<false, ENQUEUE_DEDUP>(worker_id, terminator, rp, req);
112 break;
113 case ALWAYS_DEDUP:
114 mark_loop_prework<false, ALWAYS_DEDUP>(worker_id, terminator, rp, req);
115 break;
116 }
117 }
118 }
119
120 template <class T, bool CANCELLABLE, StringDedupMode STRING_DEDUP>
121 void ShenandoahMark::mark_loop_work(T* cl, ShenandoahLiveData* live_data, uint worker_id, TaskTerminator *terminator, StringDedup::Requests* const req) {
122 uintx stride = ShenandoahMarkLoopStride;
123
124 ShenandoahHeap* heap = ShenandoahHeap::heap();
125 ShenandoahObjToScanQueueSet* queues = task_queues();
126 ShenandoahObjToScanQueue* q;
127 ShenandoahMarkTask t;
128
129 heap->ref_processor()->set_mark_closure(worker_id, cl);
130
131 /*
132 * Process outstanding queues, if any.
133 *
134 * There can be more queues than workers. To deal with the imbalance, we claim
135 * extra queues first. Since marking can push new tasks into the queue associated
136 * with this worker id, we come back to process this queue in the normal loop.
137 */
138 assert(queues->get_reserved() == heap->workers()->active_workers(),
139 "Need to reserve proper number of queues: reserved: %u, active: %u", queues->get_reserved(), heap->workers()->active_workers());
140
141 q = queues->claim_next();
142 while (q != NULL) {
143 if (CANCELLABLE && heap->check_cancelled_gc_and_yield()) {
144 return;
145 }
146
147 for (uint i = 0; i < stride; i++) {
148 if (q->pop(t)) {
149 do_task<T, STRING_DEDUP>(q, cl, live_data, req, &t);
150 } else {
151 assert(q->is_empty(), "Must be empty");
152 q = queues->claim_next();
153 break;
154 }
155 }
156 }
157 q = get_queue(worker_id);
158
159 ShenandoahSATBBufferClosure drain_satb(q);
160 SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
161
162 /*
163 * Normal marking loop:
164 */
165 while (true) {
166 if (CANCELLABLE && heap->check_cancelled_gc_and_yield()) {
167 return;
168 }
169
170 while (satb_mq_set.completed_buffers_num() > 0) {
171 satb_mq_set.apply_closure_to_completed_buffer(&drain_satb);
172 }
173
174 uint work = 0;
175 for (uint i = 0; i < stride; i++) {
176 if (q->pop(t) ||
177 queues->steal(worker_id, t)) {
178 do_task<T, STRING_DEDUP>(q, cl, live_data, req, &t);
179 work++;
180 } else {
181 break;
182 }
183 }
184
185 if (work == 0) {
186 // No work encountered in current stride, try to terminate.
187 // Need to leave the STS here otherwise it might block safepoints.
188 ShenandoahSuspendibleThreadSetLeaver stsl(CANCELLABLE && ShenandoahSuspendibleWorkers);
189 ShenandoahTerminatorTerminator tt(heap);
|
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25
26 #include "precompiled.hpp"
27
28 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
29 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
30 #include "gc/shenandoah/shenandoahGeneration.hpp"
31 #include "gc/shenandoah/shenandoahMark.inline.hpp"
32 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
33 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
34 #include "gc/shenandoah/shenandoahTaskqueue.inline.hpp"
35 #include "gc/shenandoah/shenandoahUtils.hpp"
36 #include "gc/shenandoah/shenandoahVerifier.hpp"
37
38 ShenandoahMarkRefsSuperClosure::ShenandoahMarkRefsSuperClosure(ShenandoahObjToScanQueue* q, ShenandoahReferenceProcessor* rp, ShenandoahObjToScanQueue* old_q) :
39 MetadataVisitingOopIterateClosure(rp),
40 _queue(q),
41 _old_queue(old_q),
42 _mark_context(ShenandoahHeap::heap()->marking_context()),
43 _weak(false)
44 { }
45
46 ShenandoahMark::ShenandoahMark(ShenandoahGeneration* generation) :
47 _generation(generation),
48 _task_queues(generation->task_queues()),
49 _old_gen_task_queues(generation->old_gen_task_queues()) {
50 }
51
52 template <GenerationMode GENERATION, bool CANCELLABLE, StringDedupMode STRING_DEDUP>
53 void ShenandoahMark::mark_loop_prework(uint w, TaskTerminator *t, ShenandoahReferenceProcessor *rp, StringDedup::Requests* const req, bool update_refs) {
54 ShenandoahObjToScanQueue* q = get_queue(w);
55 ShenandoahObjToScanQueue* old = get_old_queue(w);
56
57 ShenandoahHeap* const heap = ShenandoahHeap::heap();
58 ShenandoahLiveData* ld = heap->get_liveness_cache(w);
59
60 // TODO: We can clean up this if we figure out how to do templated oop closures that
61 // play nice with specialized_oop_iterators.
62 if (heap->unload_classes()) {
63 if (update_refs) {
64 using Closure = ShenandoahMarkUpdateRefsMetadataClosure<GENERATION>;
65 Closure cl(q, rp, old);
66 mark_loop_work<Closure, GENERATION, CANCELLABLE, STRING_DEDUP>(&cl, ld, w, t, req);
67 } else {
68 using Closure = ShenandoahMarkRefsMetadataClosure<GENERATION>;
69 Closure cl(q, rp, old);
70 mark_loop_work<Closure, GENERATION, CANCELLABLE, STRING_DEDUP>(&cl, ld, w, t, req);
71 }
72 } else {
73 if (update_refs) {
74 using Closure = ShenandoahMarkUpdateRefsClosure<GENERATION>;
75 Closure cl(q, rp, old);
76 mark_loop_work<Closure, GENERATION, CANCELLABLE, STRING_DEDUP>(&cl, ld, w, t, req);
77 } else {
78 using Closure = ShenandoahMarkRefsClosure<GENERATION>;
79 Closure cl(q, rp, old);
80 mark_loop_work<Closure, GENERATION, CANCELLABLE, STRING_DEDUP>(&cl, ld, w, t, req);
81 }
82 }
83
84 heap->flush_liveness_cache(w);
85 }
86
87 template<bool CANCELLABLE, StringDedupMode STRING_DEDUP>
88 void ShenandoahMark::mark_loop(GenerationMode generation, uint worker_id, TaskTerminator* terminator, ShenandoahReferenceProcessor *rp, StringDedup::Requests* const req) {
89 bool update_refs = ShenandoahHeap::heap()->has_forwarded_objects();
90 switch (generation) {
91 case YOUNG:
92 mark_loop_prework<YOUNG, CANCELLABLE, STRING_DEDUP>(worker_id, terminator, rp, req, update_refs);
93 break;
94 case OLD:
95 // Old generation collection only performs marking, it should not update references.
96 mark_loop_prework<OLD, CANCELLABLE, STRING_DEDUP>(worker_id, terminator, rp, req, false);
97 break;
98 case GLOBAL:
99 mark_loop_prework<GLOBAL, CANCELLABLE, STRING_DEDUP>(worker_id, terminator, rp, req, update_refs);
100 break;
101 default:
102 ShouldNotReachHere();
103 break;
104 }
105 }
106
107 void ShenandoahMark::mark_loop(GenerationMode generation, uint worker_id, TaskTerminator* terminator, ShenandoahReferenceProcessor *rp,
108 bool cancellable, StringDedupMode dedup_mode, StringDedup::Requests* const req) {
109 if (cancellable) {
110 switch(dedup_mode) {
111 case NO_DEDUP:
112 mark_loop<true, NO_DEDUP>(generation, worker_id, terminator, rp, req);
113 break;
114 case ENQUEUE_DEDUP:
115 mark_loop<true, ENQUEUE_DEDUP>(generation, worker_id, terminator, rp, req);
116 break;
117 case ALWAYS_DEDUP:
118 mark_loop<true, ALWAYS_DEDUP>(generation, worker_id, terminator, rp, req);
119 break;
120 }
121 } else {
122 switch(dedup_mode) {
123 case NO_DEDUP:
124 mark_loop<false, NO_DEDUP>(generation, worker_id, terminator, rp, req);
125 break;
126 case ENQUEUE_DEDUP:
127 mark_loop<false, ENQUEUE_DEDUP>(generation, worker_id, terminator, rp, req);
128 break;
129 case ALWAYS_DEDUP:
130 mark_loop<false, ALWAYS_DEDUP>(generation, worker_id, terminator, rp, req);
131 break;
132 }
133 }
134 }
135
136 template <class T, GenerationMode GENERATION, bool CANCELLABLE, StringDedupMode STRING_DEDUP>
137 void ShenandoahMark::mark_loop_work(T* cl, ShenandoahLiveData* live_data, uint worker_id, TaskTerminator *terminator, StringDedup::Requests* const req) {
138 uintx stride = ShenandoahMarkLoopStride;
139
140 ShenandoahHeap* heap = ShenandoahHeap::heap();
141 ShenandoahObjToScanQueueSet* queues = task_queues();
142 ShenandoahObjToScanQueue* q;
143 ShenandoahMarkTask t;
144
145 assert(heap->active_generation()->generation_mode() == GENERATION, "Sanity");
146 heap->active_generation()->ref_processor()->set_mark_closure(worker_id, cl);
147
148 /*
149 * Process outstanding queues, if any.
150 *
151 * There can be more queues than workers. To deal with the imbalance, we claim
152 * extra queues first. Since marking can push new tasks into the queue associated
153 * with this worker id, we come back to process this queue in the normal loop.
154 */
155 assert(queues->get_reserved() == heap->workers()->active_workers(),
156 "Need to reserve proper number of queues: reserved: %u, active: %u", queues->get_reserved(), heap->workers()->active_workers());
157
158 q = queues->claim_next();
159 while (q != NULL) {
160 if (CANCELLABLE && heap->check_cancelled_gc_and_yield()) {
161 return;
162 }
163
164 for (uint i = 0; i < stride; i++) {
165 if (q->pop(t)) {
166 do_task<T, STRING_DEDUP>(q, cl, live_data, req, &t);
167 } else {
168 assert(q->is_empty(), "Must be empty");
169 q = queues->claim_next();
170 break;
171 }
172 }
173 }
174 q = get_queue(worker_id);
175 ShenandoahObjToScanQueue* old = get_old_queue(worker_id);
176
177 ShenandoahSATBBufferClosure<GENERATION> drain_satb(q, old);
178 SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
179
180 /*
181 * Normal marking loop:
182 */
183 while (true) {
184 if (CANCELLABLE && heap->check_cancelled_gc_and_yield()) {
185 return;
186 }
187 while (satb_mq_set.completed_buffers_num() > 0) {
188 satb_mq_set.apply_closure_to_completed_buffer(&drain_satb);
189 }
190
191 uint work = 0;
192 for (uint i = 0; i < stride; i++) {
193 if (q->pop(t) ||
194 queues->steal(worker_id, t)) {
195 do_task<T, STRING_DEDUP>(q, cl, live_data, req, &t);
196 work++;
197 } else {
198 break;
199 }
200 }
201
202 if (work == 0) {
203 // No work encountered in current stride, try to terminate.
204 // Need to leave the STS here otherwise it might block safepoints.
205 ShenandoahSuspendibleThreadSetLeaver stsl(CANCELLABLE && ShenandoahSuspendibleWorkers);
206 ShenandoahTerminatorTerminator tt(heap);
|