1 /*
2 * Copyright (c) 2021, 2022, Red Hat, Inc. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25
26 #include "precompiled.hpp"
27
28 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
29 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
30 #include "gc/shenandoah/shenandoahMark.inline.hpp"
31 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
32 #include "gc/shenandoah/shenandoahTaskqueue.inline.hpp"
33 #include "gc/shenandoah/shenandoahUtils.hpp"
34 #include "gc/shenandoah/shenandoahVerifier.hpp"
35
36 ShenandoahMark::ShenandoahMark() :
37 _task_queues(ShenandoahHeap::heap()->marking_context()->task_queues()) {
38 }
39
40 void ShenandoahMark::start_mark() {
41 if (!CodeCache::is_gc_marking_cycle_active()) {
42 CodeCache::on_gc_marking_cycle_start();
43 }
44 }
45
46 void ShenandoahMark::end_mark() {
47 // Unlike other GCs, we do not arm the nmethods
48 // when marking terminates.
49 CodeCache::on_gc_marking_cycle_finish();
50 }
51
52 void ShenandoahMark::clear() {
53 // Clean up marking stacks.
54 ShenandoahObjToScanQueueSet* queues = ShenandoahHeap::heap()->marking_context()->task_queues();
55 queues->clear();
56
57 // Cancel SATB buffers.
58 ShenandoahBarrierSet::satb_mark_queue_set().abandon_partial_marking();
59 }
60
61 template <ShenandoahGenerationType GENERATION, bool CANCELLABLE, StringDedupMode STRING_DEDUP>
62 void ShenandoahMark::mark_loop_prework(uint w, TaskTerminator *t, ShenandoahReferenceProcessor *rp, StringDedup::Requests* const req) {
63 ShenandoahObjToScanQueue* q = get_queue(w);
64
65 ShenandoahHeap* const heap = ShenandoahHeap::heap();
66 ShenandoahLiveData* ld = heap->get_liveness_cache(w);
67
68 // TODO: We can clean up this if we figure out how to do templated oop closures that
69 // play nice with specialized_oop_iterators.
70 if (heap->has_forwarded_objects()) {
71 using Closure = ShenandoahMarkUpdateRefsClosure<GENERATION>;
72 Closure cl(q, rp);
73 mark_loop_work<Closure, GENERATION, CANCELLABLE, STRING_DEDUP>(&cl, ld, w, t, req);
74 } else {
75 using Closure = ShenandoahMarkRefsClosure<GENERATION>;
76 Closure cl(q, rp);
77 mark_loop_work<Closure, GENERATION, CANCELLABLE, STRING_DEDUP>(&cl, ld, w, t, req);
78 }
79
80 heap->flush_liveness_cache(w);
81 }
82
83 template<bool CANCELLABLE, StringDedupMode STRING_DEDUP>
84 void ShenandoahMark::mark_loop(uint worker_id, TaskTerminator* terminator, ShenandoahReferenceProcessor *rp,
85 ShenandoahGenerationType generation, StringDedup::Requests* const req) {
86 mark_loop_prework<NON_GEN, CANCELLABLE, STRING_DEDUP>(worker_id, terminator, rp, req);
87 }
88
89 void ShenandoahMark::mark_loop(uint worker_id, TaskTerminator* terminator, ShenandoahReferenceProcessor *rp,
90 ShenandoahGenerationType generation, bool cancellable, StringDedupMode dedup_mode,
91 StringDedup::Requests* const req) {
92 if (cancellable) {
93 switch(dedup_mode) {
94 case NO_DEDUP:
95 mark_loop<true, NO_DEDUP>(worker_id, terminator, rp, generation, req);
96 break;
97 case ENQUEUE_DEDUP:
98 mark_loop<true, ENQUEUE_DEDUP>(worker_id, terminator, rp, generation, req);
99 break;
100 case ALWAYS_DEDUP:
101 mark_loop<true, ALWAYS_DEDUP>(worker_id, terminator, rp, generation, req);
102 break;
103 }
104 } else {
105 switch(dedup_mode) {
106 case NO_DEDUP:
107 mark_loop<false, NO_DEDUP>(worker_id, terminator, rp, generation, req);
108 break;
109 case ENQUEUE_DEDUP:
110 mark_loop<false, ENQUEUE_DEDUP>(worker_id, terminator, rp, generation, req);
111 break;
112 case ALWAYS_DEDUP:
113 mark_loop<false, ALWAYS_DEDUP>(worker_id, terminator, rp, generation, req);
114 break;
115 }
116 }
117 }
118
119 template <class T, ShenandoahGenerationType GENERATION, bool CANCELLABLE, StringDedupMode STRING_DEDUP>
120 void ShenandoahMark::mark_loop_work(T* cl, ShenandoahLiveData* live_data, uint worker_id, TaskTerminator *terminator, StringDedup::Requests* const req) {
121 uintx stride = ShenandoahMarkLoopStride;
122
123 ShenandoahHeap* heap = ShenandoahHeap::heap();
124 ShenandoahObjToScanQueueSet* queues = task_queues();
125 ShenandoahObjToScanQueue* q;
126 ShenandoahMarkTask t;
127
128 heap->ref_processor()->set_mark_closure(worker_id, cl);
129
130 /*
131 * Process outstanding queues, if any.
132 *
133 * There can be more queues than workers. To deal with the imbalance, we claim
134 * extra queues first. Since marking can push new tasks into the queue associated
135 * with this worker id, we come back to process this queue in the normal loop.
136 */
137 assert(queues->get_reserved() == heap->workers()->active_workers(),
138 "Need to reserve proper number of queues: reserved: %u, active: %u", queues->get_reserved(), heap->workers()->active_workers());
139
140 q = queues->claim_next();
141 while (q != nullptr) {
142 if (CANCELLABLE && heap->check_cancelled_gc_and_yield()) {
143 return;
144 }
145
146 for (uint i = 0; i < stride; i++) {
147 if (q->pop(t)) {
148 do_task<T, GENERATION, STRING_DEDUP>(q, cl, live_data, req, &t);
149 } else {
150 assert(q->is_empty(), "Must be empty");
151 q = queues->claim_next();
152 break;
153 }
154 }
155 }
156 q = get_queue(worker_id);
157
158 ShenandoahSATBBufferClosure<GENERATION> drain_satb(q);
159 SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
160
161 /*
162 * Normal marking loop:
163 */
164 while (true) {
165 if (CANCELLABLE && heap->check_cancelled_gc_and_yield()) {
166 return;
167 }
168
169 while (satb_mq_set.completed_buffers_num() > 0) {
170 satb_mq_set.apply_closure_to_completed_buffer(&drain_satb);
171 }
172
173 uint work = 0;
174 for (uint i = 0; i < stride; i++) {
175 if (q->pop(t) ||
176 queues->steal(worker_id, t)) {
177 do_task<T, GENERATION, STRING_DEDUP>(q, cl, live_data, req, &t);
178 work++;
179 } else {
180 break;
181 }
182 }
183
184 if (work == 0) {
185 // No work encountered in current stride, try to terminate.
186 // Need to leave the STS here otherwise it might block safepoints.
187 ShenandoahSuspendibleThreadSetLeaver stsl(CANCELLABLE);
188 ShenandoahTerminatorTerminator tt(heap);
189 if (terminator->offer_termination(&tt)) return;
190 }
191 }
192 }
|
1 /*
2 * Copyright (c) 2021, 2022, Red Hat, Inc. All rights reserved.
3 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26
27 #include "precompiled.hpp"
28
29 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
30 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
31 #include "gc/shenandoah/shenandoahGeneration.hpp"
32 #include "gc/shenandoah/shenandoahMark.inline.hpp"
33 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
34 #include "gc/shenandoah/shenandoahTaskqueue.inline.hpp"
35 #include "gc/shenandoah/shenandoahUtils.hpp"
36 #include "gc/shenandoah/shenandoahVerifier.hpp"
37
38 void ShenandoahMark::start_mark() {
39 if (!CodeCache::is_gc_marking_cycle_active()) {
40 CodeCache::on_gc_marking_cycle_start();
41 }
42 }
43
44 void ShenandoahMark::end_mark() {
45 // Unlike other GCs, we do not arm the nmethods
46 // when marking terminates.
47 if (!ShenandoahHeap::heap()->is_concurrent_old_mark_in_progress()) {
48 CodeCache::on_gc_marking_cycle_finish();
49 }
50 }
51
52 ShenandoahMark::ShenandoahMark(ShenandoahGeneration* generation) :
53 _generation(generation),
54 _task_queues(generation->task_queues()),
55 _old_gen_task_queues(generation->old_gen_task_queues()) {
56 }
57
58 template <ShenandoahGenerationType GENERATION, bool CANCELLABLE, StringDedupMode STRING_DEDUP>
59 void ShenandoahMark::mark_loop_prework(uint w, TaskTerminator *t, ShenandoahReferenceProcessor *rp, StringDedup::Requests* const req, bool update_refs) {
60 ShenandoahObjToScanQueue* q = get_queue(w);
61 ShenandoahObjToScanQueue* old_q = get_old_queue(w);
62
63 ShenandoahHeap* const heap = ShenandoahHeap::heap();
64 ShenandoahLiveData* ld = heap->get_liveness_cache(w);
65
66 // TODO: We can clean up this if we figure out how to do templated oop closures that
67 // play nice with specialized_oop_iterators.
68 if (update_refs) {
69 using Closure = ShenandoahMarkUpdateRefsClosure<GENERATION>;
70 Closure cl(q, rp, old_q);
71 mark_loop_work<Closure, GENERATION, CANCELLABLE, STRING_DEDUP>(&cl, ld, w, t, req);
72 } else {
73 using Closure = ShenandoahMarkRefsClosure<GENERATION>;
74 Closure cl(q, rp, old_q);
75 mark_loop_work<Closure, GENERATION, CANCELLABLE, STRING_DEDUP>(&cl, ld, w, t, req);
76 }
77
78 heap->flush_liveness_cache(w);
79 }
80
81 template<bool CANCELLABLE, StringDedupMode STRING_DEDUP>
82 void ShenandoahMark::mark_loop(uint worker_id, TaskTerminator* terminator, ShenandoahReferenceProcessor *rp,
83 ShenandoahGenerationType generation, StringDedup::Requests* const req) {
84 bool update_refs = ShenandoahHeap::heap()->has_forwarded_objects();
85 switch (generation) {
86 case YOUNG:
87 mark_loop_prework<YOUNG, CANCELLABLE, STRING_DEDUP>(worker_id, terminator, rp, req, update_refs);
88 break;
89 case OLD:
90 // Old generation collection only performs marking, it should not update references.
91 mark_loop_prework<OLD, CANCELLABLE, STRING_DEDUP>(worker_id, terminator, rp, req, false);
92 break;
93 case GLOBAL:
94 mark_loop_prework<GLOBAL, CANCELLABLE, STRING_DEDUP>(worker_id, terminator, rp, req, update_refs);
95 break;
96 case NON_GEN:
97 mark_loop_prework<NON_GEN, CANCELLABLE, STRING_DEDUP>(worker_id, terminator, rp, req, update_refs);
98 break;
99 default:
100 ShouldNotReachHere();
101 break;
102 }
103 }
104
105 void ShenandoahMark::mark_loop(uint worker_id, TaskTerminator* terminator, ShenandoahReferenceProcessor *rp,
106 ShenandoahGenerationType generation, bool cancellable, StringDedupMode dedup_mode, StringDedup::Requests* const req) {
107 if (cancellable) {
108 switch(dedup_mode) {
109 case NO_DEDUP:
110 mark_loop<true, NO_DEDUP>(worker_id, terminator, rp, generation, req);
111 break;
112 case ENQUEUE_DEDUP:
113 mark_loop<true, ENQUEUE_DEDUP>(worker_id, terminator, rp, generation, req);
114 break;
115 case ALWAYS_DEDUP:
116 mark_loop<true, ALWAYS_DEDUP>(worker_id, terminator, rp, generation, req);
117 break;
118 }
119 } else {
120 switch(dedup_mode) {
121 case NO_DEDUP:
122 mark_loop<false, NO_DEDUP>(worker_id, terminator, rp, generation, req);
123 break;
124 case ENQUEUE_DEDUP:
125 mark_loop<false, ENQUEUE_DEDUP>(worker_id, terminator, rp, generation, req);
126 break;
127 case ALWAYS_DEDUP:
128 mark_loop<false, ALWAYS_DEDUP>(worker_id, terminator, rp, generation, req);
129 break;
130 }
131 }
132 }
133
134 template <class T, ShenandoahGenerationType GENERATION, bool CANCELLABLE, StringDedupMode STRING_DEDUP>
135 void ShenandoahMark::mark_loop_work(T* cl, ShenandoahLiveData* live_data, uint worker_id, TaskTerminator *terminator, StringDedup::Requests* const req) {
136 uintx stride = ShenandoahMarkLoopStride;
137
138 ShenandoahHeap* heap = ShenandoahHeap::heap();
139 ShenandoahObjToScanQueueSet* queues = task_queues();
140 ShenandoahObjToScanQueue* q;
141 ShenandoahMarkTask t;
142
143 // Do not use active_generation() : we must use the gc_generation() set by
144 // ShenandoahGCScope on the ControllerThread's stack; no safepoint may
145 // intervene to update active_generation, so we can't
146 // shenandoah_assert_generations_reconciled() here.
147 assert(heap->gc_generation()->type() == GENERATION, "Sanity: %d != %d", heap->gc_generation()->type(), GENERATION);
148 heap->gc_generation()->ref_processor()->set_mark_closure(worker_id, cl);
149
150 /*
151 * Process outstanding queues, if any.
152 *
153 * There can be more queues than workers. To deal with the imbalance, we claim
154 * extra queues first. Since marking can push new tasks into the queue associated
155 * with this worker id, we come back to process this queue in the normal loop.
156 */
157 assert(queues->get_reserved() == heap->workers()->active_workers(),
158 "Need to reserve proper number of queues: reserved: %u, active: %u", queues->get_reserved(), heap->workers()->active_workers());
159
160 q = queues->claim_next();
161 while (q != nullptr) {
162 if (CANCELLABLE && heap->check_cancelled_gc_and_yield()) {
163 return;
164 }
165
166 for (uint i = 0; i < stride; i++) {
167 if (q->pop(t)) {
168 do_task<T, GENERATION, STRING_DEDUP>(q, cl, live_data, req, &t, worker_id);
169 } else {
170 assert(q->is_empty(), "Must be empty");
171 q = queues->claim_next();
172 break;
173 }
174 }
175 }
176 q = get_queue(worker_id);
177 ShenandoahObjToScanQueue* old_q = get_old_queue(worker_id);
178
179 ShenandoahSATBBufferClosure<GENERATION> drain_satb(q, old_q);
180 SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
181
182 /*
183 * Normal marking loop:
184 */
185 while (true) {
186 if (CANCELLABLE && heap->check_cancelled_gc_and_yield()) {
187 return;
188 }
189 while (satb_mq_set.completed_buffers_num() > 0) {
190 satb_mq_set.apply_closure_to_completed_buffer(&drain_satb);
191 }
192
193 uint work = 0;
194 for (uint i = 0; i < stride; i++) {
195 if (q->pop(t) ||
196 queues->steal(worker_id, t)) {
197 do_task<T, GENERATION, STRING_DEDUP>(q, cl, live_data, req, &t, worker_id);
198 work++;
199 } else {
200 break;
201 }
202 }
203
204 if (work == 0) {
205 // No work encountered in current stride, try to terminate.
206 // Need to leave the STS here otherwise it might block safepoints.
207 ShenandoahSuspendibleThreadSetLeaver stsl(CANCELLABLE);
208 ShenandoahTerminatorTerminator tt(heap);
209 if (terminator->offer_termination(&tt)) return;
210 }
211 }
212 }
|