1 /*
2 * Copyright (c) 2018, 2022, Red Hat, Inc. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "c1/c1_IR.hpp"
27 #include "gc/shared/satbMarkQueue.hpp"
28 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
29 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
30 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
31 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
32 #include "gc/shenandoah/shenandoahRuntime.hpp"
33 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
34 #include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp"
35
36 #ifdef ASSERT
37 #define __ gen->lir(__FILE__, __LINE__)->
38 #else
39 #define __ gen->lir()->
40 #endif
41
42 void ShenandoahPreBarrierStub::emit_code(LIR_Assembler* ce) {
43 ShenandoahBarrierSetAssembler* bs = (ShenandoahBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
44 bs->gen_pre_barrier_stub(ce, this);
45 }
46
47 void ShenandoahLoadReferenceBarrierStub::emit_code(LIR_Assembler* ce) {
48 ShenandoahBarrierSetAssembler* bs = (ShenandoahBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
49 bs->gen_load_reference_barrier_stub(ce, this);
50 }
51
52 ShenandoahBarrierSetC1::ShenandoahBarrierSetC1() :
53 _pre_barrier_c1_runtime_code_blob(nullptr),
54 _load_reference_barrier_strong_rt_code_blob(nullptr),
55 _load_reference_barrier_strong_native_rt_code_blob(nullptr),
56 _load_reference_barrier_weak_rt_code_blob(nullptr),
57 _load_reference_barrier_phantom_rt_code_blob(nullptr) {}
58
59 void ShenandoahBarrierSetC1::pre_barrier(LIRGenerator* gen, CodeEmitInfo* info, DecoratorSet decorators, LIR_Opr addr_opr, LIR_Opr pre_val) {
60 // First we test whether marking is in progress.
61 BasicType flag_type;
62 bool patch = (decorators & C1_NEEDS_PATCHING) != 0;
63 bool do_load = pre_val == LIR_OprFact::illegalOpr;
64 if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
65 flag_type = T_INT;
66 } else {
67 guarantee(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1,
68 "Assumption");
69 // Use unsigned type T_BOOLEAN here rather than signed T_BYTE since some platforms, eg. ARM,
70 // need to use unsigned instructions to use the large offset to load the satb_mark_queue.
71 flag_type = T_BOOLEAN;
72 }
73 LIR_Opr thrd = gen->getThreadPointer();
74 LIR_Address* mark_active_flag_addr =
75 new LIR_Address(thrd,
76 in_bytes(ShenandoahThreadLocalData::satb_mark_queue_active_offset()),
77 flag_type);
78 // Read the marking-in-progress flag.
79 LIR_Opr flag_val = gen->new_register(T_INT);
80 __ load(mark_active_flag_addr, flag_val);
81 __ cmp(lir_cond_notEqual, flag_val, LIR_OprFact::intConst(0));
82
83 LIR_PatchCode pre_val_patch_code = lir_patch_none;
84
85 CodeStub* slow;
86
87 if (do_load) {
88 assert(pre_val == LIR_OprFact::illegalOpr, "sanity");
89 assert(addr_opr != LIR_OprFact::illegalOpr, "sanity");
90
91 if (patch)
92 pre_val_patch_code = lir_patch_normal;
93
94 pre_val = gen->new_register(T_OBJECT);
95
96 if (!addr_opr->is_address()) {
97 assert(addr_opr->is_register(), "must be");
98 addr_opr = LIR_OprFact::address(new LIR_Address(addr_opr, T_OBJECT));
99 }
100 slow = new ShenandoahPreBarrierStub(addr_opr, pre_val, pre_val_patch_code, info ? new CodeEmitInfo(info) : nullptr);
101 } else {
102 assert(addr_opr == LIR_OprFact::illegalOpr, "sanity");
103 assert(pre_val->is_register(), "must be");
104 assert(pre_val->type() == T_OBJECT, "must be an object");
105
106 slow = new ShenandoahPreBarrierStub(pre_val);
107 }
108
109 __ branch(lir_cond_notEqual, slow);
110 __ branch_destination(slow->continuation());
111 }
112
113 LIR_Opr ShenandoahBarrierSetC1::load_reference_barrier(LIRGenerator* gen, LIR_Opr obj, LIR_Opr addr, DecoratorSet decorators) {
114 if (ShenandoahLoadRefBarrier) {
115 return load_reference_barrier_impl(gen, obj, addr, decorators);
116 } else {
117 return obj;
118 }
119 }
120
121 LIR_Opr ShenandoahBarrierSetC1::load_reference_barrier_impl(LIRGenerator* gen, LIR_Opr obj, LIR_Opr addr, DecoratorSet decorators) {
122 assert(ShenandoahLoadRefBarrier, "Should be enabled");
123
124 obj = ensure_in_register(gen, obj, T_OBJECT);
125 assert(obj->is_register(), "must be a register at this point");
126 addr = ensure_in_register(gen, addr, T_ADDRESS);
127 assert(addr->is_register(), "must be a register at this point");
128 LIR_Opr result = gen->result_register_for(obj->value_type());
129 __ move(obj, result);
130 LIR_Opr tmp1 = gen->new_register(T_ADDRESS);
131 LIR_Opr tmp2 = gen->new_register(T_ADDRESS);
132
133 LIR_Opr thrd = gen->getThreadPointer();
134 LIR_Address* active_flag_addr =
135 new LIR_Address(thrd,
136 in_bytes(ShenandoahThreadLocalData::gc_state_offset()),
137 T_BYTE);
138 // Read and check the gc-state-flag.
139 LIR_Opr flag_val = gen->new_register(T_INT);
140 __ load(active_flag_addr, flag_val);
141 int flags = ShenandoahHeap::HAS_FORWARDED;
142 if (!ShenandoahBarrierSet::is_strong_access(decorators)) {
143 flags |= ShenandoahHeap::WEAK_ROOTS;
144 }
145 LIR_Opr mask = LIR_OprFact::intConst(flags);
146 LIR_Opr mask_reg = gen->new_register(T_INT);
147 __ move(mask, mask_reg);
148
149 if (two_operand_lir_form) {
150 __ logical_and(flag_val, mask_reg, flag_val);
151 } else {
152 LIR_Opr masked_flag = gen->new_register(T_INT);
153 __ logical_and(flag_val, mask_reg, masked_flag);
154 flag_val = masked_flag;
155 }
156 __ cmp(lir_cond_notEqual, flag_val, LIR_OprFact::intConst(0));
157
158 CodeStub* slow = new ShenandoahLoadReferenceBarrierStub(obj, addr, result, tmp1, tmp2, decorators);
159 __ branch(lir_cond_notEqual, slow);
160 __ branch_destination(slow->continuation());
161
162 return result;
163 }
164
165 LIR_Opr ShenandoahBarrierSetC1::ensure_in_register(LIRGenerator* gen, LIR_Opr obj, BasicType type) {
166 if (!obj->is_register()) {
167 LIR_Opr obj_reg;
168 if (obj->is_constant()) {
169 obj_reg = gen->new_register(type);
170 __ move(obj, obj_reg);
171 } else {
172 obj_reg = gen->new_pointer_register();
173 __ leal(obj, obj_reg);
174 }
175 obj = obj_reg;
176 }
177 return obj;
178 }
179
180 LIR_Opr ShenandoahBarrierSetC1::iu_barrier(LIRGenerator* gen, LIR_Opr obj, CodeEmitInfo* info, DecoratorSet decorators) {
181 if (ShenandoahIUBarrier) {
182 obj = ensure_in_register(gen, obj, T_OBJECT);
183 pre_barrier(gen, info, decorators, LIR_OprFact::illegalOpr, obj);
184 }
185 return obj;
186 }
187
188 void ShenandoahBarrierSetC1::store_at_resolved(LIRAccess& access, LIR_Opr value) {
189 if (access.is_oop()) {
190 if (ShenandoahSATBBarrier) {
191 pre_barrier(access.gen(), access.access_emit_info(), access.decorators(), access.resolved_addr(), LIR_OprFact::illegalOpr /* pre_val */);
192 }
193 value = iu_barrier(access.gen(), value, access.access_emit_info(), access.decorators());
194 }
195 BarrierSetC1::store_at_resolved(access, value);
196 }
197
198 LIR_Opr ShenandoahBarrierSetC1::resolve_address(LIRAccess& access, bool resolve_in_register) {
199 // We must resolve in register when patching. This is to avoid
200 // having a patch area in the load barrier stub, since the call
201 // into the runtime to patch will not have the proper oop map.
202 const bool patch_before_barrier = access.is_oop() && (access.decorators() & C1_NEEDS_PATCHING) != 0;
203 return BarrierSetC1::resolve_address(access, resolve_in_register || patch_before_barrier);
204 }
205
206 void ShenandoahBarrierSetC1::load_at_resolved(LIRAccess& access, LIR_Opr result) {
207 // 1: non-reference load, no additional barrier is needed
208 if (!access.is_oop()) {
209 BarrierSetC1::load_at_resolved(access, result);
210 return;
211 }
212
213 LIRGenerator* gen = access.gen();
214 DecoratorSet decorators = access.decorators();
215 BasicType type = access.type();
216
217 // 2: load a reference from src location and apply LRB if ShenandoahLoadRefBarrier is set
218 if (ShenandoahBarrierSet::need_load_reference_barrier(decorators, type)) {
219 LIR_Opr tmp = gen->new_register(T_OBJECT);
220 BarrierSetC1::load_at_resolved(access, tmp);
221 tmp = load_reference_barrier(gen, tmp, access.resolved_addr(), decorators);
222 __ move(tmp, result);
223 } else {
224 BarrierSetC1::load_at_resolved(access, result);
225 }
226
227 // 3: apply keep-alive barrier for java.lang.ref.Reference if needed
228 if (ShenandoahBarrierSet::need_keep_alive_barrier(decorators, type)) {
229 bool is_anonymous = (decorators & ON_UNKNOWN_OOP_REF) != 0;
230
231 // Register the value in the referent field with the pre-barrier
232 LabelObj *Lcont_anonymous;
233 if (is_anonymous) {
234 Lcont_anonymous = new LabelObj();
235 generate_referent_check(access, Lcont_anonymous);
236 }
237 pre_barrier(gen, access.access_emit_info(), decorators, LIR_OprFact::illegalOpr /* addr_opr */,
238 result /* pre_val */);
239 if (is_anonymous) {
240 __ branch_destination(Lcont_anonymous->label());
241 }
242 }
243 }
244
245 class C1ShenandoahPreBarrierCodeGenClosure : public StubAssemblerCodeGenClosure {
246 virtual OopMapSet* generate_code(StubAssembler* sasm) {
247 ShenandoahBarrierSetAssembler* bs = (ShenandoahBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
248 bs->generate_c1_pre_barrier_runtime_stub(sasm);
249 return nullptr;
250 }
251 };
252
253 class C1ShenandoahLoadReferenceBarrierCodeGenClosure : public StubAssemblerCodeGenClosure {
254 private:
255 const DecoratorSet _decorators;
256
257 public:
258 C1ShenandoahLoadReferenceBarrierCodeGenClosure(DecoratorSet decorators) : _decorators(decorators) {}
259
260 virtual OopMapSet* generate_code(StubAssembler* sasm) {
261 ShenandoahBarrierSetAssembler* bs = (ShenandoahBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
262 bs->generate_c1_load_reference_barrier_runtime_stub(sasm, _decorators);
263 return nullptr;
264 }
265 };
266
267 void ShenandoahBarrierSetC1::generate_c1_runtime_stubs(BufferBlob* buffer_blob) {
268 C1ShenandoahPreBarrierCodeGenClosure pre_code_gen_cl;
269 _pre_barrier_c1_runtime_code_blob = Runtime1::generate_blob(buffer_blob, -1,
270 "shenandoah_pre_barrier_slow",
271 false, &pre_code_gen_cl);
272 if (ShenandoahLoadRefBarrier) {
273 C1ShenandoahLoadReferenceBarrierCodeGenClosure lrb_strong_code_gen_cl(ON_STRONG_OOP_REF);
274 _load_reference_barrier_strong_rt_code_blob = Runtime1::generate_blob(buffer_blob, -1,
275 "shenandoah_load_reference_barrier_strong_slow",
276 false, &lrb_strong_code_gen_cl);
277
278 C1ShenandoahLoadReferenceBarrierCodeGenClosure lrb_strong_native_code_gen_cl(ON_STRONG_OOP_REF | IN_NATIVE);
279 _load_reference_barrier_strong_native_rt_code_blob = Runtime1::generate_blob(buffer_blob, -1,
280 "shenandoah_load_reference_barrier_strong_native_slow",
281 false, &lrb_strong_native_code_gen_cl);
282
283 C1ShenandoahLoadReferenceBarrierCodeGenClosure lrb_weak_code_gen_cl(ON_WEAK_OOP_REF);
284 _load_reference_barrier_weak_rt_code_blob = Runtime1::generate_blob(buffer_blob, -1,
285 "shenandoah_load_reference_barrier_weak_slow",
286 false, &lrb_weak_code_gen_cl);
287
288 C1ShenandoahLoadReferenceBarrierCodeGenClosure lrb_phantom_code_gen_cl(ON_PHANTOM_OOP_REF | IN_NATIVE);
289 _load_reference_barrier_phantom_rt_code_blob = Runtime1::generate_blob(buffer_blob, -1,
290 "shenandoah_load_reference_barrier_phantom_slow",
291 false, &lrb_phantom_code_gen_cl);
292 }
293 }