1 /*
2 * Copyright (c) 2021, 2022, Red Hat, Inc. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25
26 #include "precompiled.hpp"
27
28 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
29 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
30 #include "gc/shenandoah/shenandoahMark.inline.hpp"
31 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
32 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
33 #include "gc/shenandoah/shenandoahTaskqueue.inline.hpp"
34 #include "gc/shenandoah/shenandoahUtils.hpp"
35 #include "gc/shenandoah/shenandoahVerifier.hpp"
36
37 ShenandoahMarkRefsSuperClosure::ShenandoahMarkRefsSuperClosure(ShenandoahObjToScanQueue* q, ShenandoahReferenceProcessor* rp) :
38 MetadataVisitingOopIterateClosure(rp),
39 _queue(q),
40 _mark_context(ShenandoahHeap::heap()->marking_context()),
41 _weak(false)
42 { }
43
44 ShenandoahMark::ShenandoahMark() :
45 _task_queues(ShenandoahHeap::heap()->marking_context()->task_queues()) {
46 }
47
48 void ShenandoahMark::start_mark() {
49 if (!CodeCache::is_gc_marking_cycle_active()) {
50 CodeCache::on_gc_marking_cycle_start();
51 }
52 }
53
54 void ShenandoahMark::end_mark() {
55 // Unlike other GCs, we do not arm the nmethods
56 // when marking terminates.
57 CodeCache::on_gc_marking_cycle_finish();
58 }
59
60 void ShenandoahMark::clear() {
61 // Clean up marking stacks.
62 ShenandoahObjToScanQueueSet* queues = ShenandoahHeap::heap()->marking_context()->task_queues();
63 queues->clear();
64
65 // Cancel SATB buffers.
66 ShenandoahBarrierSet::satb_mark_queue_set().abandon_partial_marking();
67 }
68
69 template <bool CANCELLABLE, StringDedupMode STRING_DEDUP>
70 void ShenandoahMark::mark_loop_prework(uint w, TaskTerminator *t, ShenandoahReferenceProcessor *rp, StringDedup::Requests* const req) {
71 ShenandoahObjToScanQueue* q = get_queue(w);
72
73 ShenandoahHeap* const heap = ShenandoahHeap::heap();
74 ShenandoahLiveData* ld = heap->get_liveness_cache(w);
75
76 // TODO: We can clean up this if we figure out how to do templated oop closures that
77 // play nice with specialized_oop_iterators.
78 if (heap->has_forwarded_objects()) {
79 using Closure = ShenandoahMarkUpdateRefsClosure;
80 Closure cl(q, rp);
81 mark_loop_work<Closure, CANCELLABLE, STRING_DEDUP>(&cl, ld, w, t, req);
82 } else {
83 using Closure = ShenandoahMarkRefsClosure;
84 Closure cl(q, rp);
85 mark_loop_work<Closure, CANCELLABLE, STRING_DEDUP>(&cl, ld, w, t, req);
86 }
87
88 heap->flush_liveness_cache(w);
89 }
90
91 void ShenandoahMark::mark_loop(uint worker_id, TaskTerminator* terminator, ShenandoahReferenceProcessor *rp,
92 bool cancellable, StringDedupMode dedup_mode, StringDedup::Requests* const req) {
93 if (cancellable) {
94 switch(dedup_mode) {
95 case NO_DEDUP:
96 mark_loop_prework<true, NO_DEDUP>(worker_id, terminator, rp, req);
97 break;
98 case ENQUEUE_DEDUP:
99 mark_loop_prework<true, ENQUEUE_DEDUP>(worker_id, terminator, rp, req);
100 break;
101 case ALWAYS_DEDUP:
102 mark_loop_prework<true, ALWAYS_DEDUP>(worker_id, terminator, rp, req);
103 break;
104 }
105 } else {
106 switch(dedup_mode) {
107 case NO_DEDUP:
108 mark_loop_prework<false, NO_DEDUP>(worker_id, terminator, rp, req);
109 break;
110 case ENQUEUE_DEDUP:
111 mark_loop_prework<false, ENQUEUE_DEDUP>(worker_id, terminator, rp, req);
112 break;
113 case ALWAYS_DEDUP:
114 mark_loop_prework<false, ALWAYS_DEDUP>(worker_id, terminator, rp, req);
115 break;
116 }
117 }
118 }
119
120 template <class T, bool CANCELLABLE, StringDedupMode STRING_DEDUP>
121 void ShenandoahMark::mark_loop_work(T* cl, ShenandoahLiveData* live_data, uint worker_id, TaskTerminator *terminator, StringDedup::Requests* const req) {
122 uintx stride = ShenandoahMarkLoopStride;
123
124 ShenandoahHeap* heap = ShenandoahHeap::heap();
125 ShenandoahObjToScanQueueSet* queues = task_queues();
126 ShenandoahObjToScanQueue* q;
127 ShenandoahMarkTask t;
128
129 heap->ref_processor()->set_mark_closure(worker_id, cl);
130
131 /*
132 * Process outstanding queues, if any.
133 *
134 * There can be more queues than workers. To deal with the imbalance, we claim
135 * extra queues first. Since marking can push new tasks into the queue associated
136 * with this worker id, we come back to process this queue in the normal loop.
137 */
138 assert(queues->get_reserved() == heap->workers()->active_workers(),
139 "Need to reserve proper number of queues: reserved: %u, active: %u", queues->get_reserved(), heap->workers()->active_workers());
140
141 q = queues->claim_next();
142 while (q != nullptr) {
143 if (CANCELLABLE && heap->check_cancelled_gc_and_yield()) {
144 return;
145 }
146
147 for (uint i = 0; i < stride; i++) {
148 if (q->pop(t)) {
149 do_task<T, STRING_DEDUP>(q, cl, live_data, req, &t);
150 } else {
151 assert(q->is_empty(), "Must be empty");
152 q = queues->claim_next();
153 break;
154 }
155 }
156 }
157 q = get_queue(worker_id);
158
159 ShenandoahSATBBufferClosure drain_satb(q);
160 SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
161
162 /*
163 * Normal marking loop:
164 */
165 while (true) {
166 if (CANCELLABLE && heap->check_cancelled_gc_and_yield()) {
167 return;
168 }
169
170 while (satb_mq_set.completed_buffers_num() > 0) {
171 satb_mq_set.apply_closure_to_completed_buffer(&drain_satb);
172 }
173
174 uint work = 0;
175 for (uint i = 0; i < stride; i++) {
176 if (q->pop(t) ||
177 queues->steal(worker_id, t)) {
178 do_task<T, STRING_DEDUP>(q, cl, live_data, req, &t);
179 work++;
180 } else {
181 break;
182 }
183 }
184
185 if (work == 0) {
186 // No work encountered in current stride, try to terminate.
187 // Need to leave the STS here otherwise it might block safepoints.
188 ShenandoahSuspendibleThreadSetLeaver stsl(CANCELLABLE);
189 ShenandoahTerminatorTerminator tt(heap);
190 if (terminator->offer_termination(&tt)) return;
191 }
192 }
193 }
|
1 /*
2 * Copyright (c) 2021, 2022, Red Hat, Inc. All rights reserved.
3 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26
27 #include "precompiled.hpp"
28
29 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
30 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
31 #include "gc/shenandoah/shenandoahGeneration.hpp"
32 #include "gc/shenandoah/shenandoahMark.inline.hpp"
33 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
34 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
35 #include "gc/shenandoah/shenandoahTaskqueue.inline.hpp"
36 #include "gc/shenandoah/shenandoahUtils.hpp"
37 #include "gc/shenandoah/shenandoahVerifier.hpp"
38
39 void ShenandoahMark::start_mark() {
40 if (!CodeCache::is_gc_marking_cycle_active()) {
41 CodeCache::on_gc_marking_cycle_start();
42 }
43 }
44
45 void ShenandoahMark::end_mark() {
46 // Unlike other GCs, we do not arm the nmethods
47 // when marking terminates.
48 if (!ShenandoahHeap::heap()->is_concurrent_old_mark_in_progress()) {
49 CodeCache::on_gc_marking_cycle_finish();
50 }
51 }
52
53 ShenandoahMarkRefsSuperClosure::ShenandoahMarkRefsSuperClosure(ShenandoahObjToScanQueue* q, ShenandoahReferenceProcessor* rp, ShenandoahObjToScanQueue* old_q) :
54 MetadataVisitingOopIterateClosure(rp),
55 _queue(q),
56 _old_queue(old_q),
57 _mark_context(ShenandoahHeap::heap()->marking_context()),
58 _weak(false)
59 { }
60
61 ShenandoahMark::ShenandoahMark(ShenandoahGeneration* generation) :
62 _generation(generation),
63 _task_queues(generation->task_queues()),
64 _old_gen_task_queues(generation->old_gen_task_queues()) {
65 }
66
67 template <ShenandoahGenerationType GENERATION, bool CANCELLABLE, StringDedupMode STRING_DEDUP>
68 void ShenandoahMark::mark_loop_prework(uint w, TaskTerminator *t, ShenandoahReferenceProcessor *rp, StringDedup::Requests* const req, bool update_refs) {
69 ShenandoahObjToScanQueue* q = get_queue(w);
70 ShenandoahObjToScanQueue* old_q = get_old_queue(w);
71
72 ShenandoahHeap* const heap = ShenandoahHeap::heap();
73 ShenandoahLiveData* ld = heap->get_liveness_cache(w);
74
75 // TODO: We can clean up this if we figure out how to do templated oop closures that
76 // play nice with specialized_oop_iterators.
77 if (update_refs) {
78 using Closure = ShenandoahMarkUpdateRefsClosure<GENERATION>;
79 Closure cl(q, rp, old_q);
80 mark_loop_work<Closure, GENERATION, CANCELLABLE, STRING_DEDUP>(&cl, ld, w, t, req);
81 } else {
82 using Closure = ShenandoahMarkRefsClosure<GENERATION>;
83 Closure cl(q, rp, old_q);
84 mark_loop_work<Closure, GENERATION, CANCELLABLE, STRING_DEDUP>(&cl, ld, w, t, req);
85 }
86
87 heap->flush_liveness_cache(w);
88 }
89
90 template<bool CANCELLABLE, StringDedupMode STRING_DEDUP>
91 void ShenandoahMark::mark_loop(uint worker_id, TaskTerminator* terminator, ShenandoahReferenceProcessor *rp,
92 ShenandoahGenerationType generation, StringDedup::Requests* const req) {
93 bool update_refs = ShenandoahHeap::heap()->has_forwarded_objects();
94 switch (generation) {
95 case YOUNG:
96 mark_loop_prework<YOUNG, CANCELLABLE, STRING_DEDUP>(worker_id, terminator, rp, req, update_refs);
97 break;
98 case OLD:
99 // Old generation collection only performs marking, it should not update references.
100 mark_loop_prework<OLD, CANCELLABLE, STRING_DEDUP>(worker_id, terminator, rp, req, false);
101 break;
102 case GLOBAL:
103 mark_loop_prework<GLOBAL, CANCELLABLE, STRING_DEDUP>(worker_id, terminator, rp, req, update_refs);
104 break;
105 case NON_GEN:
106 mark_loop_prework<NON_GEN, CANCELLABLE, STRING_DEDUP>(worker_id, terminator, rp, req, update_refs);
107 break;
108 default:
109 ShouldNotReachHere();
110 break;
111 }
112 }
113
114 void ShenandoahMark::mark_loop(uint worker_id, TaskTerminator* terminator, ShenandoahReferenceProcessor *rp,
115 ShenandoahGenerationType generation, bool cancellable, StringDedupMode dedup_mode, StringDedup::Requests* const req) {
116 if (cancellable) {
117 switch(dedup_mode) {
118 case NO_DEDUP:
119 mark_loop<true, NO_DEDUP>(worker_id, terminator, rp, generation, req);
120 break;
121 case ENQUEUE_DEDUP:
122 mark_loop<true, ENQUEUE_DEDUP>(worker_id, terminator, rp, generation, req);
123 break;
124 case ALWAYS_DEDUP:
125 mark_loop<true, ALWAYS_DEDUP>(worker_id, terminator, rp, generation, req);
126 break;
127 }
128 } else {
129 switch(dedup_mode) {
130 case NO_DEDUP:
131 mark_loop<false, NO_DEDUP>(worker_id, terminator, rp, generation, req);
132 break;
133 case ENQUEUE_DEDUP:
134 mark_loop<false, ENQUEUE_DEDUP>(worker_id, terminator, rp, generation, req);
135 break;
136 case ALWAYS_DEDUP:
137 mark_loop<false, ALWAYS_DEDUP>(worker_id, terminator, rp, generation, req);
138 break;
139 }
140 }
141 }
142
143 template <class T, ShenandoahGenerationType GENERATION, bool CANCELLABLE, StringDedupMode STRING_DEDUP>
144 void ShenandoahMark::mark_loop_work(T* cl, ShenandoahLiveData* live_data, uint worker_id, TaskTerminator *terminator, StringDedup::Requests* const req) {
145 uintx stride = ShenandoahMarkLoopStride;
146
147 ShenandoahHeap* heap = ShenandoahHeap::heap();
148 ShenandoahObjToScanQueueSet* queues = task_queues();
149 ShenandoahObjToScanQueue* q;
150 ShenandoahMarkTask t;
151
152 // Do not use active_generation() : we must use the gc_generation() set by
153 // ShenandoahGCScope on the ControllerThread's stack; no safepoint may
154 // intervene to update active_generation, so we can't
155 // shenandoah_assert_generations_reconciled() here.
156 assert(heap->gc_generation()->type() == GENERATION, "Sanity: %d != %d", heap->gc_generation()->type(), GENERATION);
157 heap->gc_generation()->ref_processor()->set_mark_closure(worker_id, cl);
158
159 /*
160 * Process outstanding queues, if any.
161 *
162 * There can be more queues than workers. To deal with the imbalance, we claim
163 * extra queues first. Since marking can push new tasks into the queue associated
164 * with this worker id, we come back to process this queue in the normal loop.
165 */
166 assert(queues->get_reserved() == heap->workers()->active_workers(),
167 "Need to reserve proper number of queues: reserved: %u, active: %u", queues->get_reserved(), heap->workers()->active_workers());
168
169 q = queues->claim_next();
170 while (q != nullptr) {
171 if (CANCELLABLE && heap->check_cancelled_gc_and_yield()) {
172 return;
173 }
174
175 for (uint i = 0; i < stride; i++) {
176 if (q->pop(t)) {
177 do_task<T, GENERATION, STRING_DEDUP>(q, cl, live_data, req, &t, worker_id);
178 } else {
179 assert(q->is_empty(), "Must be empty");
180 q = queues->claim_next();
181 break;
182 }
183 }
184 }
185 q = get_queue(worker_id);
186 ShenandoahObjToScanQueue* old_q = get_old_queue(worker_id);
187
188 ShenandoahSATBBufferClosure<GENERATION> drain_satb(q, old_q);
189 SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
190
191 /*
192 * Normal marking loop:
193 */
194 while (true) {
195 if (CANCELLABLE && heap->check_cancelled_gc_and_yield()) {
196 return;
197 }
198 while (satb_mq_set.completed_buffers_num() > 0) {
199 satb_mq_set.apply_closure_to_completed_buffer(&drain_satb);
200 }
201
202 uint work = 0;
203 for (uint i = 0; i < stride; i++) {
204 if (q->pop(t) ||
205 queues->steal(worker_id, t)) {
206 do_task<T, GENERATION, STRING_DEDUP>(q, cl, live_data, req, &t, worker_id);
207 work++;
208 } else {
209 break;
210 }
211 }
212
213 if (work == 0) {
214 // No work encountered in current stride, try to terminate.
215 // Need to leave the STS here otherwise it might block safepoints.
216 ShenandoahSuspendibleThreadSetLeaver stsl(CANCELLABLE);
217 ShenandoahTerminatorTerminator tt(heap);
218 if (terminator->offer_termination(&tt)) return;
219 }
220 }
221 }
|