1 /*
2 * Copyright (c) 2021, 2022, Red Hat, Inc. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25
26 #include "precompiled.hpp"
27
28 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
29 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
30 #include "gc/shenandoah/shenandoahMark.inline.hpp"
31 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
32 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
33 #include "gc/shenandoah/shenandoahTaskqueue.inline.hpp"
34 #include "gc/shenandoah/shenandoahUtils.hpp"
35 #include "gc/shenandoah/shenandoahVerifier.hpp"
36
37 ShenandoahMarkRefsSuperClosure::ShenandoahMarkRefsSuperClosure(ShenandoahObjToScanQueue* q, ShenandoahReferenceProcessor* rp) :
38 MetadataVisitingOopIterateClosure(rp),
39 _queue(q),
40 _mark_context(ShenandoahHeap::heap()->marking_context()),
41 _weak(false)
42 { }
43
44 ShenandoahMark::ShenandoahMark() :
45 _task_queues(ShenandoahHeap::heap()->marking_context()->task_queues()) {
46 }
47
48 void ShenandoahMark::start_mark() {
49 if (!CodeCache::is_gc_marking_cycle_active()) {
50 CodeCache::on_gc_marking_cycle_start();
51 }
52 }
53
54 void ShenandoahMark::end_mark() {
55 // Unlike other GCs, we do not arm the nmethods
56 // when marking terminates.
57 CodeCache::on_gc_marking_cycle_finish();
58 }
59
60 void ShenandoahMark::clear() {
61 // Clean up marking stacks.
62 ShenandoahObjToScanQueueSet* queues = ShenandoahHeap::heap()->marking_context()->task_queues();
63 queues->clear();
64
65 // Cancel SATB buffers.
66 ShenandoahBarrierSet::satb_mark_queue_set().abandon_partial_marking();
67 }
68
69 template <bool CANCELLABLE, StringDedupMode STRING_DEDUP>
70 void ShenandoahMark::mark_loop_prework(uint w, TaskTerminator *t, ShenandoahReferenceProcessor *rp, StringDedup::Requests* const req) {
71 ShenandoahObjToScanQueue* q = get_queue(w);
72
73 ShenandoahHeap* const heap = ShenandoahHeap::heap();
74 ShenandoahLiveData* ld = heap->get_liveness_cache(w);
75
76 // TODO: We can clean up this if we figure out how to do templated oop closures that
77 // play nice with specialized_oop_iterators.
78 if (heap->has_forwarded_objects()) {
79 using Closure = ShenandoahMarkUpdateRefsClosure;
80 Closure cl(q, rp);
81 mark_loop_work<Closure, CANCELLABLE, STRING_DEDUP>(&cl, ld, w, t, req);
82 } else {
83 using Closure = ShenandoahMarkRefsClosure;
84 Closure cl(q, rp);
85 mark_loop_work<Closure, CANCELLABLE, STRING_DEDUP>(&cl, ld, w, t, req);
86 }
87
88 heap->flush_liveness_cache(w);
89 }
90
91 void ShenandoahMark::mark_loop(uint worker_id, TaskTerminator* terminator, ShenandoahReferenceProcessor *rp,
92 bool cancellable, StringDedupMode dedup_mode, StringDedup::Requests* const req) {
93 if (cancellable) {
94 switch(dedup_mode) {
95 case NO_DEDUP:
96 mark_loop_prework<true, NO_DEDUP>(worker_id, terminator, rp, req);
97 break;
98 case ENQUEUE_DEDUP:
99 mark_loop_prework<true, ENQUEUE_DEDUP>(worker_id, terminator, rp, req);
100 break;
101 case ALWAYS_DEDUP:
102 mark_loop_prework<true, ALWAYS_DEDUP>(worker_id, terminator, rp, req);
103 break;
104 }
105 } else {
106 switch(dedup_mode) {
107 case NO_DEDUP:
108 mark_loop_prework<false, NO_DEDUP>(worker_id, terminator, rp, req);
109 break;
110 case ENQUEUE_DEDUP:
111 mark_loop_prework<false, ENQUEUE_DEDUP>(worker_id, terminator, rp, req);
112 break;
113 case ALWAYS_DEDUP:
114 mark_loop_prework<false, ALWAYS_DEDUP>(worker_id, terminator, rp, req);
115 break;
116 }
117 }
118 }
119
120 template <class T, bool CANCELLABLE, StringDedupMode STRING_DEDUP>
121 void ShenandoahMark::mark_loop_work(T* cl, ShenandoahLiveData* live_data, uint worker_id, TaskTerminator *terminator, StringDedup::Requests* const req) {
122 uintx stride = ShenandoahMarkLoopStride;
123
124 ShenandoahHeap* heap = ShenandoahHeap::heap();
125 ShenandoahObjToScanQueueSet* queues = task_queues();
126 ShenandoahObjToScanQueue* q;
127 ShenandoahMarkTask t;
128
129 heap->ref_processor()->set_mark_closure(worker_id, cl);
130
131 /*
132 * Process outstanding queues, if any.
133 *
134 * There can be more queues than workers. To deal with the imbalance, we claim
135 * extra queues first. Since marking can push new tasks into the queue associated
136 * with this worker id, we come back to process this queue in the normal loop.
137 */
138 assert(queues->get_reserved() == heap->workers()->active_workers(),
139 "Need to reserve proper number of queues: reserved: %u, active: %u", queues->get_reserved(), heap->workers()->active_workers());
140
141 q = queues->claim_next();
142 while (q != nullptr) {
143 if (CANCELLABLE && heap->check_cancelled_gc_and_yield()) {
144 return;
145 }
146
147 for (uint i = 0; i < stride; i++) {
148 if (q->pop(t)) {
149 do_task<T, STRING_DEDUP>(q, cl, live_data, req, &t);
150 } else {
151 assert(q->is_empty(), "Must be empty");
152 q = queues->claim_next();
153 break;
154 }
155 }
156 }
157 q = get_queue(worker_id);
158
159 ShenandoahSATBBufferClosure drain_satb(q);
160 SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
161
162 /*
163 * Normal marking loop:
164 */
165 while (true) {
166 if (CANCELLABLE && heap->check_cancelled_gc_and_yield()) {
167 return;
168 }
169
170 while (satb_mq_set.completed_buffers_num() > 0) {
171 satb_mq_set.apply_closure_to_completed_buffer(&drain_satb);
172 }
173
174 uint work = 0;
175 for (uint i = 0; i < stride; i++) {
176 if (q->pop(t) ||
177 queues->steal(worker_id, t)) {
178 do_task<T, STRING_DEDUP>(q, cl, live_data, req, &t);
179 work++;
180 } else {
181 break;
182 }
183 }
184
185 if (work == 0) {
186 // No work encountered in current stride, try to terminate.
187 // Need to leave the STS here otherwise it might block safepoints.
188 ShenandoahSuspendibleThreadSetLeaver stsl(CANCELLABLE);
189 ShenandoahTerminatorTerminator tt(heap);
190 if (terminator->offer_termination(&tt)) return;
191 }
192 }
193 }