1 /*
2 * Copyright (c) 2018, 2022, Red Hat, Inc. All rights reserved.
3 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "c1/c1_IR.hpp"
28 #include "gc/shared/satbMarkQueue.hpp"
29 #include "gc/shenandoah/mode/shenandoahMode.hpp"
30 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
31 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
32 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
33 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
34 #include "gc/shenandoah/shenandoahRuntime.hpp"
35 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
36 #include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp"
37
38 #ifdef ASSERT
39 #define __ gen->lir(__FILE__, __LINE__)->
40 #else
41 #define __ gen->lir()->
42 #endif
43
44 void ShenandoahPreBarrierStub::emit_code(LIR_Assembler* ce) {
45 ShenandoahBarrierSetAssembler* bs = (ShenandoahBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
46 bs->gen_pre_barrier_stub(ce, this);
47 }
48
49 void ShenandoahLoadReferenceBarrierStub::emit_code(LIR_Assembler* ce) {
50 ShenandoahBarrierSetAssembler* bs = (ShenandoahBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
51 bs->gen_load_reference_barrier_stub(ce, this);
52 }
53
54 ShenandoahBarrierSetC1::ShenandoahBarrierSetC1() :
55 _pre_barrier_c1_runtime_code_blob(nullptr),
56 _load_reference_barrier_strong_rt_code_blob(nullptr),
57 _load_reference_barrier_strong_native_rt_code_blob(nullptr),
58 _load_reference_barrier_weak_rt_code_blob(nullptr),
59 _load_reference_barrier_phantom_rt_code_blob(nullptr) {}
60
61 void ShenandoahBarrierSetC1::pre_barrier(LIRGenerator* gen, CodeEmitInfo* info, DecoratorSet decorators, LIR_Opr addr_opr, LIR_Opr pre_val) {
62 // First we test whether marking is in progress.
63 BasicType flag_type;
64 bool patch = (decorators & C1_NEEDS_PATCHING) != 0;
65 bool do_load = pre_val == LIR_OprFact::illegalOpr;
66 if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
67 flag_type = T_INT;
68 } else {
69 guarantee(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1,
70 "Assumption");
71 // Use unsigned type T_BOOLEAN here rather than signed T_BYTE since some platforms, eg. ARM,
72 // need to use unsigned instructions to use the large offset to load the satb_mark_queue.
73 flag_type = T_BOOLEAN;
74 }
75 LIR_Opr thrd = gen->getThreadPointer();
76 LIR_Address* mark_active_flag_addr =
77 new LIR_Address(thrd,
78 in_bytes(ShenandoahThreadLocalData::satb_mark_queue_active_offset()),
79 flag_type);
80 // Read the marking-in-progress flag.
81 LIR_Opr flag_val = gen->new_register(T_INT);
82 __ load(mark_active_flag_addr, flag_val);
83 __ cmp(lir_cond_notEqual, flag_val, LIR_OprFact::intConst(0));
84
85 LIR_PatchCode pre_val_patch_code = lir_patch_none;
86
87 CodeStub* slow;
88
89 if (do_load) {
90 assert(pre_val == LIR_OprFact::illegalOpr, "sanity");
91 assert(addr_opr != LIR_OprFact::illegalOpr, "sanity");
92
93 if (patch)
94 pre_val_patch_code = lir_patch_normal;
95
96 pre_val = gen->new_register(T_OBJECT);
97
98 if (!addr_opr->is_address()) {
99 assert(addr_opr->is_register(), "must be");
100 addr_opr = LIR_OprFact::address(new LIR_Address(addr_opr, T_OBJECT));
101 }
102 slow = new ShenandoahPreBarrierStub(addr_opr, pre_val, pre_val_patch_code, info ? new CodeEmitInfo(info) : nullptr);
103 } else {
104 assert(addr_opr == LIR_OprFact::illegalOpr, "sanity");
105 assert(pre_val->is_register(), "must be");
106 assert(pre_val->type() == T_OBJECT, "must be an object");
107
108 slow = new ShenandoahPreBarrierStub(pre_val);
109 }
110
111 __ branch(lir_cond_notEqual, slow);
112 __ branch_destination(slow->continuation());
113 }
114
115 LIR_Opr ShenandoahBarrierSetC1::load_reference_barrier(LIRGenerator* gen, LIR_Opr obj, LIR_Opr addr, DecoratorSet decorators) {
116 if (ShenandoahLoadRefBarrier) {
117 return load_reference_barrier_impl(gen, obj, addr, decorators);
118 } else {
119 return obj;
120 }
121 }
122
123 LIR_Opr ShenandoahBarrierSetC1::load_reference_barrier_impl(LIRGenerator* gen, LIR_Opr obj, LIR_Opr addr, DecoratorSet decorators) {
124 assert(ShenandoahLoadRefBarrier, "Should be enabled");
125
126 obj = ensure_in_register(gen, obj, T_OBJECT);
127 assert(obj->is_register(), "must be a register at this point");
128 addr = ensure_in_register(gen, addr, T_ADDRESS);
129 assert(addr->is_register(), "must be a register at this point");
130 LIR_Opr result = gen->result_register_for(obj->value_type());
131 __ move(obj, result);
132 LIR_Opr tmp1 = gen->new_register(T_ADDRESS);
133 LIR_Opr tmp2 = gen->new_register(T_ADDRESS);
134
135 LIR_Opr thrd = gen->getThreadPointer();
136 LIR_Address* active_flag_addr =
137 new LIR_Address(thrd,
138 in_bytes(ShenandoahThreadLocalData::gc_state_offset()),
139 T_BYTE);
140 // Read and check the gc-state-flag.
141 LIR_Opr flag_val = gen->new_register(T_INT);
142 __ load(active_flag_addr, flag_val);
143 int flags = ShenandoahHeap::HAS_FORWARDED;
144 if (!ShenandoahBarrierSet::is_strong_access(decorators)) {
145 flags |= ShenandoahHeap::WEAK_ROOTS;
146 }
147 LIR_Opr mask = LIR_OprFact::intConst(flags);
148 LIR_Opr mask_reg = gen->new_register(T_INT);
149 __ move(mask, mask_reg);
150
151 if (two_operand_lir_form) {
152 __ logical_and(flag_val, mask_reg, flag_val);
153 } else {
154 LIR_Opr masked_flag = gen->new_register(T_INT);
155 __ logical_and(flag_val, mask_reg, masked_flag);
156 flag_val = masked_flag;
157 }
158 __ cmp(lir_cond_notEqual, flag_val, LIR_OprFact::intConst(0));
159
160 CodeStub* slow = new ShenandoahLoadReferenceBarrierStub(obj, addr, result, tmp1, tmp2, decorators);
161 __ branch(lir_cond_notEqual, slow);
162 __ branch_destination(slow->continuation());
163
164 return result;
165 }
166
167 LIR_Opr ShenandoahBarrierSetC1::ensure_in_register(LIRGenerator* gen, LIR_Opr obj, BasicType type) {
168 if (!obj->is_register()) {
169 LIR_Opr obj_reg;
170 if (obj->is_constant()) {
171 obj_reg = gen->new_register(type);
172 __ move(obj, obj_reg);
173 } else {
174 obj_reg = gen->new_pointer_register();
175 __ leal(obj, obj_reg);
176 }
177 obj = obj_reg;
178 }
179 return obj;
180 }
181
182 void ShenandoahBarrierSetC1::store_at_resolved(LIRAccess& access, LIR_Opr value) {
183 if (access.is_oop()) {
184 if (ShenandoahSATBBarrier) {
185 pre_barrier(access.gen(), access.access_emit_info(), access.decorators(), access.resolved_addr(), LIR_OprFact::illegalOpr /* pre_val */);
186 }
187 }
188 BarrierSetC1::store_at_resolved(access, value);
189
190 if (ShenandoahCardBarrier && access.is_oop()) {
191 DecoratorSet decorators = access.decorators();
192 bool is_array = (decorators & IS_ARRAY) != 0;
193 bool on_anonymous = (decorators & ON_UNKNOWN_OOP_REF) != 0;
194
195 bool precise = is_array || on_anonymous;
196 LIR_Opr post_addr = precise ? access.resolved_addr() : access.base().opr();
197 post_barrier(access, post_addr, value);
198 }
199 }
200
201 LIR_Opr ShenandoahBarrierSetC1::resolve_address(LIRAccess& access, bool resolve_in_register) {
202 // We must resolve in register when patching. This is to avoid
203 // having a patch area in the load barrier stub, since the call
204 // into the runtime to patch will not have the proper oop map.
205 const bool patch_before_barrier = access.is_oop() && (access.decorators() & C1_NEEDS_PATCHING) != 0;
206 return BarrierSetC1::resolve_address(access, resolve_in_register || patch_before_barrier);
207 }
208
209 void ShenandoahBarrierSetC1::load_at_resolved(LIRAccess& access, LIR_Opr result) {
210 // 1: non-reference load, no additional barrier is needed
211 if (!access.is_oop()) {
212 BarrierSetC1::load_at_resolved(access, result);
213 return;
214 }
215
216 LIRGenerator* gen = access.gen();
217 DecoratorSet decorators = access.decorators();
218 BasicType type = access.type();
219
220 // 2: load a reference from src location and apply LRB if ShenandoahLoadRefBarrier is set
221 if (ShenandoahBarrierSet::need_load_reference_barrier(decorators, type)) {
222 LIR_Opr tmp = gen->new_register(T_OBJECT);
223 BarrierSetC1::load_at_resolved(access, tmp);
224 tmp = load_reference_barrier(gen, tmp, access.resolved_addr(), decorators);
225 __ move(tmp, result);
226 } else {
227 BarrierSetC1::load_at_resolved(access, result);
228 }
229
230 // 3: apply keep-alive barrier for java.lang.ref.Reference if needed
231 if (ShenandoahBarrierSet::need_keep_alive_barrier(decorators, type)) {
232 bool is_anonymous = (decorators & ON_UNKNOWN_OOP_REF) != 0;
233
234 // Register the value in the referent field with the pre-barrier
235 LabelObj *Lcont_anonymous;
236 if (is_anonymous) {
237 Lcont_anonymous = new LabelObj();
238 generate_referent_check(access, Lcont_anonymous);
239 }
240 pre_barrier(gen, access.access_emit_info(), decorators, LIR_OprFact::illegalOpr /* addr_opr */,
241 result /* pre_val */);
242 if (is_anonymous) {
243 __ branch_destination(Lcont_anonymous->label());
244 }
245 }
246 }
247
248 class C1ShenandoahPreBarrierCodeGenClosure : public StubAssemblerCodeGenClosure {
249 virtual OopMapSet* generate_code(StubAssembler* sasm) {
250 ShenandoahBarrierSetAssembler* bs = (ShenandoahBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
251 bs->generate_c1_pre_barrier_runtime_stub(sasm);
252 return nullptr;
253 }
254 };
255
256 class C1ShenandoahLoadReferenceBarrierCodeGenClosure : public StubAssemblerCodeGenClosure {
257 private:
258 const DecoratorSet _decorators;
259
260 public:
261 C1ShenandoahLoadReferenceBarrierCodeGenClosure(DecoratorSet decorators) : _decorators(decorators) {}
262
263 virtual OopMapSet* generate_code(StubAssembler* sasm) {
264 ShenandoahBarrierSetAssembler* bs = (ShenandoahBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
265 bs->generate_c1_load_reference_barrier_runtime_stub(sasm, _decorators);
266 return nullptr;
267 }
268 };
269
270 void ShenandoahBarrierSetC1::generate_c1_runtime_stubs(BufferBlob* buffer_blob) {
271 C1ShenandoahPreBarrierCodeGenClosure pre_code_gen_cl;
272 _pre_barrier_c1_runtime_code_blob = Runtime1::generate_blob(buffer_blob, -1,
273 "shenandoah_pre_barrier_slow",
274 false, &pre_code_gen_cl);
275 if (ShenandoahLoadRefBarrier) {
276 C1ShenandoahLoadReferenceBarrierCodeGenClosure lrb_strong_code_gen_cl(ON_STRONG_OOP_REF);
277 _load_reference_barrier_strong_rt_code_blob = Runtime1::generate_blob(buffer_blob, -1,
278 "shenandoah_load_reference_barrier_strong_slow",
279 false, &lrb_strong_code_gen_cl);
280
281 C1ShenandoahLoadReferenceBarrierCodeGenClosure lrb_strong_native_code_gen_cl(ON_STRONG_OOP_REF | IN_NATIVE);
282 _load_reference_barrier_strong_native_rt_code_blob = Runtime1::generate_blob(buffer_blob, -1,
283 "shenandoah_load_reference_barrier_strong_native_slow",
284 false, &lrb_strong_native_code_gen_cl);
285
286 C1ShenandoahLoadReferenceBarrierCodeGenClosure lrb_weak_code_gen_cl(ON_WEAK_OOP_REF);
287 _load_reference_barrier_weak_rt_code_blob = Runtime1::generate_blob(buffer_blob, -1,
288 "shenandoah_load_reference_barrier_weak_slow",
289 false, &lrb_weak_code_gen_cl);
290
291 C1ShenandoahLoadReferenceBarrierCodeGenClosure lrb_phantom_code_gen_cl(ON_PHANTOM_OOP_REF | IN_NATIVE);
292 _load_reference_barrier_phantom_rt_code_blob = Runtime1::generate_blob(buffer_blob, -1,
293 "shenandoah_load_reference_barrier_phantom_slow",
294 false, &lrb_phantom_code_gen_cl);
295 }
296 }
297
298 void ShenandoahBarrierSetC1::post_barrier(LIRAccess& access, LIR_Opr addr, LIR_Opr new_val) {
299 assert(ShenandoahCardBarrier, "Should have been checked by caller");
300
301 DecoratorSet decorators = access.decorators();
302 LIRGenerator* gen = access.gen();
303 bool in_heap = (decorators & IN_HEAP) != 0;
304 if (!in_heap) {
305 return;
306 }
307
308 LIR_Opr thrd = gen->getThreadPointer();
309 const int curr_ct_holder_offset = in_bytes(ShenandoahThreadLocalData::card_table_offset());
310 LIR_Address* curr_ct_holder_addr = new LIR_Address(thrd, curr_ct_holder_offset, T_ADDRESS);
311 LIR_Opr curr_ct_holder_ptr_reg = gen->new_register(T_ADDRESS);
312 __ move(curr_ct_holder_addr, curr_ct_holder_ptr_reg);
313
314 if (addr->is_address()) {
315 LIR_Address* address = addr->as_address_ptr();
316 // ptr cannot be an object because we use this barrier for array card marks
317 // and addr can point in the middle of an array.
318 LIR_Opr ptr = gen->new_pointer_register();
319 if (!address->index()->is_valid() && address->disp() == 0) {
320 __ move(address->base(), ptr);
321 } else {
322 assert(address->disp() != max_jint, "lea doesn't support patched addresses!");
323 __ leal(addr, ptr);
324 }
325 addr = ptr;
326 }
327 assert(addr->is_register(), "must be a register at this point");
328
329 LIR_Opr tmp = gen->new_pointer_register();
330 if (two_operand_lir_form) {
331 __ move(addr, tmp);
332 __ unsigned_shift_right(tmp, CardTable::card_shift(), tmp);
333 } else {
334 __ unsigned_shift_right(addr, CardTable::card_shift(), tmp);
335 }
336
337 LIR_Address* card_addr = new LIR_Address(curr_ct_holder_ptr_reg, tmp, T_BYTE);
338 LIR_Opr dirty = LIR_OprFact::intConst(CardTable::dirty_card_val());
339 if (UseCondCardMark) {
340 LIR_Opr cur_value = gen->new_register(T_INT);
341 __ move(card_addr, cur_value);
342
343 LabelObj* L_already_dirty = new LabelObj();
344 __ cmp(lir_cond_equal, cur_value, dirty);
345 __ branch(lir_cond_equal, L_already_dirty->label());
346 __ move(dirty, card_addr);
347 __ branch_destination(L_already_dirty->label());
348 } else {
349 __ move(dirty, card_addr);
350 }
351 }