1 /*
2 * Copyright (c) 2018, 2024, Red Hat, Inc. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "c1/c1_IR.hpp"
27 #include "gc/shared/satbMarkQueue.hpp"
28 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
29 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
30 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
31 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
32 #include "gc/shenandoah/shenandoahRuntime.hpp"
33 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
34 #include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp"
35
36 #ifdef ASSERT
37 #define __ gen->lir(__FILE__, __LINE__)->
38 #else
39 #define __ gen->lir()->
40 #endif
41
42 void ShenandoahPreBarrierStub::emit_code(LIR_Assembler* ce) {
43 ShenandoahBarrierSetAssembler* bs = (ShenandoahBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
44 bs->gen_pre_barrier_stub(ce, this);
45 }
46
47 void ShenandoahLoadReferenceBarrierStub::emit_code(LIR_Assembler* ce) {
55 _load_reference_barrier_strong_native_rt_code_blob(nullptr),
56 _load_reference_barrier_weak_rt_code_blob(nullptr),
57 _load_reference_barrier_phantom_rt_code_blob(nullptr) {}
58
59 void ShenandoahBarrierSetC1::pre_barrier(LIRGenerator* gen, CodeEmitInfo* info, DecoratorSet decorators, LIR_Opr addr_opr, LIR_Opr pre_val) {
60 // First we test whether marking is in progress.
61
62 bool patch = (decorators & C1_NEEDS_PATCHING) != 0;
63 bool do_load = pre_val == LIR_OprFact::illegalOpr;
64
65 LIR_Opr thrd = gen->getThreadPointer();
66 LIR_Address* gc_state_addr =
67 new LIR_Address(thrd,
68 in_bytes(ShenandoahThreadLocalData::gc_state_offset()),
69 T_BYTE);
70 // Read the gc_state flag.
71 LIR_Opr flag_val = gen->new_register(T_INT);
72 __ load(gc_state_addr, flag_val);
73
74 // Create a mask to test if the marking bit is set.
75 // TODO: can we directly test if bit is set?
76 LIR_Opr mask = LIR_OprFact::intConst(ShenandoahHeap::MARKING);
77 LIR_Opr mask_reg = gen->new_register(T_INT);
78 __ move(mask, mask_reg);
79
80 if (two_operand_lir_form) {
81 __ logical_and(flag_val, mask_reg, flag_val);
82 } else {
83 LIR_Opr masked_flag = gen->new_register(T_INT);
84 __ logical_and(flag_val, mask_reg, masked_flag);
85 flag_val = masked_flag;
86 }
87 __ cmp(lir_cond_notEqual, flag_val, LIR_OprFact::intConst(0));
88
89 LIR_PatchCode pre_val_patch_code = lir_patch_none;
90
91 CodeStub* slow;
92
93 if (do_load) {
94 assert(pre_val == LIR_OprFact::illegalOpr, "sanity");
95 assert(addr_opr != LIR_OprFact::illegalOpr, "sanity");
173 LIR_Opr obj_reg;
174 if (obj->is_constant()) {
175 obj_reg = gen->new_register(type);
176 __ move(obj, obj_reg);
177 } else {
178 obj_reg = gen->new_pointer_register();
179 __ leal(obj, obj_reg);
180 }
181 obj = obj_reg;
182 }
183 return obj;
184 }
185
186 void ShenandoahBarrierSetC1::store_at_resolved(LIRAccess& access, LIR_Opr value) {
187 if (access.is_oop()) {
188 if (ShenandoahSATBBarrier) {
189 pre_barrier(access.gen(), access.access_emit_info(), access.decorators(), access.resolved_addr(), LIR_OprFact::illegalOpr /* pre_val */);
190 }
191 }
192 BarrierSetC1::store_at_resolved(access, value);
193 }
194
195 LIR_Opr ShenandoahBarrierSetC1::resolve_address(LIRAccess& access, bool resolve_in_register) {
196 // We must resolve in register when patching. This is to avoid
197 // having a patch area in the load barrier stub, since the call
198 // into the runtime to patch will not have the proper oop map.
199 const bool patch_before_barrier = access.is_oop() && (access.decorators() & C1_NEEDS_PATCHING) != 0;
200 return BarrierSetC1::resolve_address(access, resolve_in_register || patch_before_barrier);
201 }
202
203 void ShenandoahBarrierSetC1::load_at_resolved(LIRAccess& access, LIR_Opr result) {
204 // 1: non-reference load, no additional barrier is needed
205 if (!access.is_oop()) {
206 BarrierSetC1::load_at_resolved(access, result);
207 return;
208 }
209
210 LIRGenerator* gen = access.gen();
211 DecoratorSet decorators = access.decorators();
212 BasicType type = access.type();
271 _load_reference_barrier_strong_rt_code_blob = Runtime1::generate_blob(buffer_blob, C1StubId::NO_STUBID,
272 "shenandoah_load_reference_barrier_strong_slow",
273 false, &lrb_strong_code_gen_cl);
274
275 C1ShenandoahLoadReferenceBarrierCodeGenClosure lrb_strong_native_code_gen_cl(ON_STRONG_OOP_REF | IN_NATIVE);
276 _load_reference_barrier_strong_native_rt_code_blob = Runtime1::generate_blob(buffer_blob, C1StubId::NO_STUBID,
277 "shenandoah_load_reference_barrier_strong_native_slow",
278 false, &lrb_strong_native_code_gen_cl);
279
280 C1ShenandoahLoadReferenceBarrierCodeGenClosure lrb_weak_code_gen_cl(ON_WEAK_OOP_REF);
281 _load_reference_barrier_weak_rt_code_blob = Runtime1::generate_blob(buffer_blob, C1StubId::NO_STUBID,
282 "shenandoah_load_reference_barrier_weak_slow",
283 false, &lrb_weak_code_gen_cl);
284
285 C1ShenandoahLoadReferenceBarrierCodeGenClosure lrb_phantom_code_gen_cl(ON_PHANTOM_OOP_REF | IN_NATIVE);
286 _load_reference_barrier_phantom_rt_code_blob = Runtime1::generate_blob(buffer_blob, C1StubId::NO_STUBID,
287 "shenandoah_load_reference_barrier_phantom_slow",
288 false, &lrb_phantom_code_gen_cl);
289 }
290 }
|
1 /*
2 * Copyright (c) 2018, 2024, Red Hat, Inc. All rights reserved.
3 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "c1/c1_IR.hpp"
28 #include "gc/shared/satbMarkQueue.hpp"
29 #include "gc/shenandoah/mode/shenandoahMode.hpp"
30 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
31 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
32 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
33 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
34 #include "gc/shenandoah/shenandoahRuntime.hpp"
35 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
36 #include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp"
37
38 #ifdef ASSERT
39 #define __ gen->lir(__FILE__, __LINE__)->
40 #else
41 #define __ gen->lir()->
42 #endif
43
44 void ShenandoahPreBarrierStub::emit_code(LIR_Assembler* ce) {
45 ShenandoahBarrierSetAssembler* bs = (ShenandoahBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
46 bs->gen_pre_barrier_stub(ce, this);
47 }
48
49 void ShenandoahLoadReferenceBarrierStub::emit_code(LIR_Assembler* ce) {
57 _load_reference_barrier_strong_native_rt_code_blob(nullptr),
58 _load_reference_barrier_weak_rt_code_blob(nullptr),
59 _load_reference_barrier_phantom_rt_code_blob(nullptr) {}
60
61 void ShenandoahBarrierSetC1::pre_barrier(LIRGenerator* gen, CodeEmitInfo* info, DecoratorSet decorators, LIR_Opr addr_opr, LIR_Opr pre_val) {
62 // First we test whether marking is in progress.
63
64 bool patch = (decorators & C1_NEEDS_PATCHING) != 0;
65 bool do_load = pre_val == LIR_OprFact::illegalOpr;
66
67 LIR_Opr thrd = gen->getThreadPointer();
68 LIR_Address* gc_state_addr =
69 new LIR_Address(thrd,
70 in_bytes(ShenandoahThreadLocalData::gc_state_offset()),
71 T_BYTE);
72 // Read the gc_state flag.
73 LIR_Opr flag_val = gen->new_register(T_INT);
74 __ load(gc_state_addr, flag_val);
75
76 // Create a mask to test if the marking bit is set.
77 LIR_Opr mask = LIR_OprFact::intConst(ShenandoahHeap::MARKING);
78 LIR_Opr mask_reg = gen->new_register(T_INT);
79 __ move(mask, mask_reg);
80
81 if (two_operand_lir_form) {
82 __ logical_and(flag_val, mask_reg, flag_val);
83 } else {
84 LIR_Opr masked_flag = gen->new_register(T_INT);
85 __ logical_and(flag_val, mask_reg, masked_flag);
86 flag_val = masked_flag;
87 }
88 __ cmp(lir_cond_notEqual, flag_val, LIR_OprFact::intConst(0));
89
90 LIR_PatchCode pre_val_patch_code = lir_patch_none;
91
92 CodeStub* slow;
93
94 if (do_load) {
95 assert(pre_val == LIR_OprFact::illegalOpr, "sanity");
96 assert(addr_opr != LIR_OprFact::illegalOpr, "sanity");
174 LIR_Opr obj_reg;
175 if (obj->is_constant()) {
176 obj_reg = gen->new_register(type);
177 __ move(obj, obj_reg);
178 } else {
179 obj_reg = gen->new_pointer_register();
180 __ leal(obj, obj_reg);
181 }
182 obj = obj_reg;
183 }
184 return obj;
185 }
186
187 void ShenandoahBarrierSetC1::store_at_resolved(LIRAccess& access, LIR_Opr value) {
188 if (access.is_oop()) {
189 if (ShenandoahSATBBarrier) {
190 pre_barrier(access.gen(), access.access_emit_info(), access.decorators(), access.resolved_addr(), LIR_OprFact::illegalOpr /* pre_val */);
191 }
192 }
193 BarrierSetC1::store_at_resolved(access, value);
194
195 if (ShenandoahCardBarrier && access.is_oop()) {
196 DecoratorSet decorators = access.decorators();
197 bool is_array = (decorators & IS_ARRAY) != 0;
198 bool on_anonymous = (decorators & ON_UNKNOWN_OOP_REF) != 0;
199
200 bool precise = is_array || on_anonymous;
201 LIR_Opr post_addr = precise ? access.resolved_addr() : access.base().opr();
202 post_barrier(access, post_addr, value);
203 }
204 }
205
206 LIR_Opr ShenandoahBarrierSetC1::resolve_address(LIRAccess& access, bool resolve_in_register) {
207 // We must resolve in register when patching. This is to avoid
208 // having a patch area in the load barrier stub, since the call
209 // into the runtime to patch will not have the proper oop map.
210 const bool patch_before_barrier = access.is_oop() && (access.decorators() & C1_NEEDS_PATCHING) != 0;
211 return BarrierSetC1::resolve_address(access, resolve_in_register || patch_before_barrier);
212 }
213
214 void ShenandoahBarrierSetC1::load_at_resolved(LIRAccess& access, LIR_Opr result) {
215 // 1: non-reference load, no additional barrier is needed
216 if (!access.is_oop()) {
217 BarrierSetC1::load_at_resolved(access, result);
218 return;
219 }
220
221 LIRGenerator* gen = access.gen();
222 DecoratorSet decorators = access.decorators();
223 BasicType type = access.type();
282 _load_reference_barrier_strong_rt_code_blob = Runtime1::generate_blob(buffer_blob, C1StubId::NO_STUBID,
283 "shenandoah_load_reference_barrier_strong_slow",
284 false, &lrb_strong_code_gen_cl);
285
286 C1ShenandoahLoadReferenceBarrierCodeGenClosure lrb_strong_native_code_gen_cl(ON_STRONG_OOP_REF | IN_NATIVE);
287 _load_reference_barrier_strong_native_rt_code_blob = Runtime1::generate_blob(buffer_blob, C1StubId::NO_STUBID,
288 "shenandoah_load_reference_barrier_strong_native_slow",
289 false, &lrb_strong_native_code_gen_cl);
290
291 C1ShenandoahLoadReferenceBarrierCodeGenClosure lrb_weak_code_gen_cl(ON_WEAK_OOP_REF);
292 _load_reference_barrier_weak_rt_code_blob = Runtime1::generate_blob(buffer_blob, C1StubId::NO_STUBID,
293 "shenandoah_load_reference_barrier_weak_slow",
294 false, &lrb_weak_code_gen_cl);
295
296 C1ShenandoahLoadReferenceBarrierCodeGenClosure lrb_phantom_code_gen_cl(ON_PHANTOM_OOP_REF | IN_NATIVE);
297 _load_reference_barrier_phantom_rt_code_blob = Runtime1::generate_blob(buffer_blob, C1StubId::NO_STUBID,
298 "shenandoah_load_reference_barrier_phantom_slow",
299 false, &lrb_phantom_code_gen_cl);
300 }
301 }
302
303 void ShenandoahBarrierSetC1::post_barrier(LIRAccess& access, LIR_Opr addr, LIR_Opr new_val) {
304 assert(ShenandoahCardBarrier, "Should have been checked by caller");
305
306 DecoratorSet decorators = access.decorators();
307 LIRGenerator* gen = access.gen();
308 bool in_heap = (decorators & IN_HEAP) != 0;
309 if (!in_heap) {
310 return;
311 }
312
313 BarrierSet* bs = BarrierSet::barrier_set();
314 ShenandoahBarrierSet* ctbs = barrier_set_cast<ShenandoahBarrierSet>(bs);
315 CardTable* ct = ctbs->card_table();
316 LIR_Const* card_table_base = new LIR_Const(ct->byte_map_base());
317 if (addr->is_address()) {
318 LIR_Address* address = addr->as_address_ptr();
319 // ptr cannot be an object because we use this barrier for array card marks
320 // and addr can point in the middle of an array.
321 LIR_Opr ptr = gen->new_pointer_register();
322 if (!address->index()->is_valid() && address->disp() == 0) {
323 __ move(address->base(), ptr);
324 } else {
325 assert(address->disp() != max_jint, "lea doesn't support patched addresses!");
326 __ leal(addr, ptr);
327 }
328 addr = ptr;
329 }
330 assert(addr->is_register(), "must be a register at this point");
331
332 LIR_Opr tmp = gen->new_pointer_register();
333 if (two_operand_lir_form) {
334 __ move(addr, tmp);
335 __ unsigned_shift_right(tmp, CardTable::card_shift(), tmp);
336 } else {
337 __ unsigned_shift_right(addr, CardTable::card_shift(), tmp);
338 }
339
340 LIR_Address* card_addr;
341 if (gen->can_inline_as_constant(card_table_base)) {
342 card_addr = new LIR_Address(tmp, card_table_base->as_jint(), T_BYTE);
343 } else {
344 card_addr = new LIR_Address(tmp, gen->load_constant(card_table_base), T_BYTE);
345 }
346
347 LIR_Opr dirty = LIR_OprFact::intConst(CardTable::dirty_card_val());
348 if (UseCondCardMark) {
349 LIR_Opr cur_value = gen->new_register(T_INT);
350 __ move(card_addr, cur_value);
351
352 LabelObj* L_already_dirty = new LabelObj();
353 __ cmp(lir_cond_equal, cur_value, dirty);
354 __ branch(lir_cond_equal, L_already_dirty->label());
355 __ move(dirty, card_addr);
356 __ branch_destination(L_already_dirty->label());
357 } else {
358 __ move(dirty, card_addr);
359 }
360 }
|