1 /*
2 * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "c1/c1_CodeStubs.hpp"
26 #include "c1/c1_LIRAssembler.hpp"
27 #include "c1/c1_LIRGenerator.hpp"
28 #include "c1/c1_MacroAssembler.hpp"
29 #include "gc/g1/c1/g1BarrierSetC1.hpp"
30 #include "gc/g1/g1BarrierSet.hpp"
31 #include "gc/g1/g1BarrierSetAssembler.hpp"
32 #include "gc/g1/g1HeapRegion.hpp"
33 #include "gc/g1/g1ThreadLocalData.hpp"
34 #include "utilities/formatBuffer.hpp"
35 #include "utilities/macros.hpp"
36
37 #ifdef ASSERT
38 #define __ gen->lir(__FILE__, __LINE__)->
39 #else
40 #define __ gen->lir()->
41 #endif
42
43 void G1PreBarrierStub::emit_code(LIR_Assembler* ce) {
44 G1BarrierSetAssembler* bs = (G1BarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
45 bs->gen_pre_barrier_stub(ce, this);
46 }
47
48 void G1BarrierSetC1::pre_barrier(LIRAccess& access, LIR_Opr addr_opr,
49 LIR_Opr pre_val, CodeEmitInfo* info) {
50 LIRGenerator* gen = access.gen();
51 DecoratorSet decorators = access.decorators();
52
53 // First we test whether marking is in progress.
54 BasicType flag_type;
55 bool patch = (decorators & C1_NEEDS_PATCHING) != 0;
56 bool do_load = pre_val == LIR_OprFact::illegalOpr;
57 if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
58 flag_type = T_INT;
59 } else {
60 guarantee(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1,
61 "Assumption");
62 // Use unsigned type T_BOOLEAN here rather than signed T_BYTE since some platforms, eg. ARM,
63 // need to use unsigned instructions to use the large offset to load the satb_mark_queue.
64 flag_type = T_BOOLEAN;
65 }
66 LIR_Opr thrd = gen->getThreadPointer();
67 LIR_Address* mark_active_flag_addr =
68 new LIR_Address(thrd,
69 in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset()),
70 flag_type);
71 // Read the marking-in-progress flag.
72 // Note: When loading pre_val requires patching, i.e. do_load == true &&
73 // patch == true, a safepoint can occur while patching. This makes the
74 // pre-barrier non-atomic and invalidates the marking-in-progress check.
75 // Therefore, in the presence of patching, we must repeat the same
76 // marking-in-progress checking before calling into the Runtime. For
77 // simplicity, we do this check unconditionally (regardless of the presence
78 // of patching) in the runtime stub
79 // (G1BarrierSetAssembler::generate_c1_pre_barrier_runtime_stub).
80 LIR_Opr flag_val = gen->new_register(T_INT);
81 __ load(mark_active_flag_addr, flag_val);
82 __ cmp(lir_cond_notEqual, flag_val, LIR_OprFact::intConst(0));
83
84 LIR_PatchCode pre_val_patch_code = lir_patch_none;
85
86 CodeStub* slow;
87
88 if (do_load) {
89 assert(pre_val == LIR_OprFact::illegalOpr, "sanity");
90 assert(addr_opr != LIR_OprFact::illegalOpr, "sanity");
91
92 if (patch)
93 pre_val_patch_code = lir_patch_normal;
94
95 pre_val = gen->new_register(T_OBJECT);
96
97 if (!addr_opr->is_address()) {
98 assert(addr_opr->is_register(), "must be");
99 addr_opr = LIR_OprFact::address(new LIR_Address(addr_opr, T_OBJECT));
100 }
101 slow = new G1PreBarrierStub(addr_opr, pre_val, pre_val_patch_code, info);
102 } else {
103 assert(addr_opr == LIR_OprFact::illegalOpr, "sanity");
104 assert(pre_val->is_register(), "must be");
105 assert(pre_val->type() == T_OBJECT, "must be an object");
106 assert(info == nullptr, "sanity");
107
108 slow = new G1PreBarrierStub(pre_val);
109 }
110
111 __ branch(lir_cond_notEqual, slow);
112 __ branch_destination(slow->continuation());
113 }
114
115 class LIR_OpG1PostBarrier : public LIR_Op {
116 friend class LIR_OpVisitState;
117
118 private:
119 LIR_Opr _addr;
120 LIR_Opr _new_val;
121 LIR_Opr _thread;
122 LIR_Opr _tmp1;
123 LIR_Opr _tmp2;
124
125 public:
126 LIR_OpG1PostBarrier(LIR_Opr addr,
127 LIR_Opr new_val,
128 LIR_Opr thread,
129 LIR_Opr tmp1,
130 LIR_Opr tmp2)
131 : LIR_Op(lir_none, lir_none, nullptr),
132 _addr(addr),
133 _new_val(new_val),
134 _thread(thread),
135 _tmp1(tmp1),
136 _tmp2(tmp2)
137 {}
138
139 virtual void visit(LIR_OpVisitState* state) {
140 state->do_input(_addr);
141 state->do_input(_new_val);
142 state->do_input(_thread);
143
144 // Use temps to enforce different registers.
145 state->do_temp(_addr);
146 state->do_temp(_new_val);
147 state->do_temp(_thread);
148 state->do_temp(_tmp1);
149 state->do_temp(_tmp2);
150
151 if (_info != nullptr) {
152 state->do_info(_info);
153 }
154 }
155
156 virtual void emit_code(LIR_Assembler* ce) {
157 if (_info != nullptr) {
158 ce->add_debug_info_for_null_check_here(_info);
159 }
160
161 Register addr = _addr->as_pointer_register();
162 Register new_val = _new_val->as_pointer_register();
163 Register thread = _thread->as_pointer_register();
164 Register tmp1 = _tmp1->as_pointer_register();
165 Register tmp2 = _tmp2->as_pointer_register();
166
167 // This may happen for a store of x.a = x - we do not need a post barrier for those
168 // as the cross-region test will always exit early anyway.
169 // The post barrier implementations can assume that addr and new_val are different
170 // then.
171 if (addr == new_val) {
172 ce->masm()->block_comment(err_msg("same addr/new_val due to self-referential store with imprecise card mark %s", addr->name()));
173 return;
174 }
175
176 G1BarrierSetAssembler* bs_asm = static_cast<G1BarrierSetAssembler*>(BarrierSet::barrier_set()->barrier_set_assembler());
177 bs_asm->g1_write_barrier_post_c1(ce->masm(), addr, new_val, thread, tmp1, tmp2);
178 }
179
180 virtual void print_instr(outputStream* out) const {
181 _addr->print(out); out->print(" ");
182 _new_val->print(out); out->print(" ");
183 _thread->print(out); out->print(" ");
184 _tmp1->print(out); out->print(" ");
185 _tmp2->print(out); out->print(" ");
186 out->cr();
187 }
188
189 #ifndef PRODUCT
190 virtual const char* name() const {
191 return "lir_g1_post_barrier";
192 }
193 #endif // PRODUCT
194 };
195
196 void G1BarrierSetC1::post_barrier(LIRAccess& access, LIR_Opr addr, LIR_Opr new_val) {
197 LIRGenerator* gen = access.gen();
198 DecoratorSet decorators = access.decorators();
199 bool in_heap = (decorators & IN_HEAP) != 0;
200 if (!in_heap) {
201 return;
202 }
203
204 // If the "new_val" is a constant null, no barrier is necessary.
205 if (new_val->is_constant() &&
206 new_val->as_constant_ptr()->as_jobject() == nullptr) return;
207
208 if (!new_val->is_register()) {
209 LIR_Opr new_val_reg = gen->new_register(T_OBJECT);
210 if (new_val->is_constant()) {
211 __ move(new_val, new_val_reg);
212 } else {
213 __ leal(new_val, new_val_reg);
214 }
215 new_val = new_val_reg;
216 }
217 assert(new_val->is_register(), "must be a register at this point");
218
219 if (addr->is_address()) {
220 LIR_Address* address = addr->as_address_ptr();
221 LIR_Opr ptr = gen->new_pointer_register();
222 if (!address->index()->is_valid() && address->disp() == 0) {
223 __ move(address->base(), ptr);
224 } else {
225 assert(address->disp() != max_jint, "lea doesn't support patched addresses!");
226 __ leal(addr, ptr);
227 }
228 addr = ptr;
229 }
230 assert(addr->is_register(), "must be a register at this point");
231
232 __ append(new LIR_OpG1PostBarrier(addr,
233 new_val,
234 gen->getThreadPointer() /* thread */,
235 gen->new_pointer_register() /* tmp1 */,
236 gen->new_pointer_register() /* tmp2 */));
237 }
238
239 void G1BarrierSetC1::load_at_resolved(LIRAccess& access, LIR_Opr result) {
240 DecoratorSet decorators = access.decorators();
241 bool is_weak = (decorators & ON_WEAK_OOP_REF) != 0;
242 bool is_phantom = (decorators & ON_PHANTOM_OOP_REF) != 0;
243 bool is_anonymous = (decorators & ON_UNKNOWN_OOP_REF) != 0;
244 LIRGenerator *gen = access.gen();
245
246 BarrierSetC1::load_at_resolved(access, result);
247
248 if (access.is_oop() && (is_weak || is_phantom || is_anonymous)) {
249 // Register the value in the referent field with the pre-barrier
250 LabelObj *Lcont_anonymous;
251 if (is_anonymous) {
252 Lcont_anonymous = new LabelObj();
253 generate_referent_check(access, Lcont_anonymous);
254 }
255 pre_barrier(access, LIR_OprFact::illegalOpr /* addr_opr */,
256 result /* pre_val */, access.patch_emit_info() /* info */);
257 if (is_anonymous) {
258 __ branch_destination(Lcont_anonymous->label());
259 }
260 }
261 }
262
263 class C1G1PreBarrierCodeGenClosure : public StubAssemblerCodeGenClosure {
264 virtual OopMapSet* generate_code(StubAssembler* sasm) {
265 G1BarrierSetAssembler* bs = (G1BarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
266 bs->generate_c1_pre_barrier_runtime_stub(sasm);
267 return nullptr;
268 }
269 };
270
271 bool G1BarrierSetC1::generate_c1_runtime_stubs(BufferBlob* buffer_blob) {
272 C1G1PreBarrierCodeGenClosure pre_code_gen_cl;
273 _pre_barrier_c1_runtime_code_blob = Runtime1::generate_blob(buffer_blob, StubId::NO_STUBID, "g1_pre_barrier_slow",
274 false, &pre_code_gen_cl);
275 return _pre_barrier_c1_runtime_code_blob != nullptr;
276 }