1 /*
2 * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "c1/c1_CodeStubs.hpp"
26 #include "c1/c1_LIRAssembler.hpp"
27 #include "c1/c1_LIRGenerator.hpp"
28 #include "c1/c1_MacroAssembler.hpp"
29 #include "code/aotCodeCache.hpp"
30 #include "gc/g1/c1/g1BarrierSetC1.hpp"
31 #include "gc/g1/g1BarrierSet.hpp"
32 #include "gc/g1/g1BarrierSetAssembler.hpp"
33 #include "gc/g1/g1HeapRegion.hpp"
34 #include "gc/g1/g1ThreadLocalData.hpp"
35 #include "utilities/formatBuffer.hpp"
36 #include "utilities/macros.hpp"
37
38 #ifdef ASSERT
39 #define __ gen->lir(__FILE__, __LINE__)->
40 #else
41 #define __ gen->lir()->
42 #endif
43
44 void G1PreBarrierStub::emit_code(LIR_Assembler* ce) {
45 G1BarrierSetAssembler* bs = (G1BarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
46 bs->gen_pre_barrier_stub(ce, this);
47 }
48
49 void G1BarrierSetC1::pre_barrier(LIRAccess& access, LIR_Opr addr_opr,
50 LIR_Opr pre_val, CodeEmitInfo* info) {
51 LIRGenerator* gen = access.gen();
52 DecoratorSet decorators = access.decorators();
53
54 // First we test whether marking is in progress.
55 BasicType flag_type;
56 bool patch = (decorators & C1_NEEDS_PATCHING) != 0;
57 bool do_load = pre_val == LIR_OprFact::illegalOpr;
58 if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
59 flag_type = T_INT;
60 } else {
61 guarantee(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1,
62 "Assumption");
63 // Use unsigned type T_BOOLEAN here rather than signed T_BYTE since some platforms, eg. ARM,
64 // need to use unsigned instructions to use the large offset to load the satb_mark_queue.
65 flag_type = T_BOOLEAN;
66 }
67 LIR_Opr thrd = gen->getThreadPointer();
68 LIR_Address* mark_active_flag_addr =
69 new LIR_Address(thrd,
70 in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset()),
71 flag_type);
72 // Read the marking-in-progress flag.
73 // Note: When loading pre_val requires patching, i.e. do_load == true &&
74 // patch == true, a safepoint can occur while patching. This makes the
75 // pre-barrier non-atomic and invalidates the marking-in-progress check.
76 // Therefore, in the presence of patching, we must repeat the same
77 // marking-in-progress checking before calling into the Runtime. For
78 // simplicity, we do this check unconditionally (regardless of the presence
79 // of patching) in the runtime stub
80 // (G1BarrierSetAssembler::generate_c1_pre_barrier_runtime_stub).
81 LIR_Opr flag_val = gen->new_register(T_INT);
82 __ load(mark_active_flag_addr, flag_val);
83 __ cmp(lir_cond_notEqual, flag_val, LIR_OprFact::intConst(0));
84
85 LIR_PatchCode pre_val_patch_code = lir_patch_none;
86
87 CodeStub* slow;
88
89 if (do_load) {
90 assert(pre_val == LIR_OprFact::illegalOpr, "sanity");
91 assert(addr_opr != LIR_OprFact::illegalOpr, "sanity");
92
93 if (patch)
94 pre_val_patch_code = lir_patch_normal;
95
96 pre_val = gen->new_register(T_OBJECT);
97
98 if (!addr_opr->is_address()) {
99 assert(addr_opr->is_register(), "must be");
100 addr_opr = LIR_OprFact::address(new LIR_Address(addr_opr, T_OBJECT));
101 }
102 slow = new G1PreBarrierStub(addr_opr, pre_val, pre_val_patch_code, info);
103 } else {
104 assert(addr_opr == LIR_OprFact::illegalOpr, "sanity");
105 assert(pre_val->is_register(), "must be");
106 assert(pre_val->type() == T_OBJECT, "must be an object");
107 assert(info == nullptr, "sanity");
108
109 slow = new G1PreBarrierStub(pre_val);
110 }
111
112 __ branch(lir_cond_notEqual, slow);
113 __ branch_destination(slow->continuation());
114 }
115
116 class LIR_OpG1PostBarrier : public LIR_Op {
117 friend class LIR_OpVisitState;
118
119 private:
120 LIR_Opr _addr;
121 LIR_Opr _new_val;
122 LIR_Opr _thread;
123 LIR_Opr _tmp1;
124 LIR_Opr _tmp2;
125
126 public:
127 LIR_OpG1PostBarrier(LIR_Opr addr,
128 LIR_Opr new_val,
129 LIR_Opr thread,
130 LIR_Opr tmp1,
131 LIR_Opr tmp2)
132 : LIR_Op(lir_none, lir_none, nullptr),
133 _addr(addr),
134 _new_val(new_val),
135 _thread(thread),
136 _tmp1(tmp1),
137 _tmp2(tmp2)
138 {}
139
140 virtual void visit(LIR_OpVisitState* state) {
141 state->do_input(_addr);
142 state->do_input(_new_val);
143 state->do_input(_thread);
144
145 // Use temps to enforce different registers.
146 state->do_temp(_addr);
147 state->do_temp(_new_val);
148 state->do_temp(_thread);
149 state->do_temp(_tmp1);
150 state->do_temp(_tmp2);
151
152 if (_info != nullptr) {
153 state->do_info(_info);
154 }
155 }
156
157 virtual void emit_code(LIR_Assembler* ce) {
158 if (_info != nullptr) {
159 ce->add_debug_info_for_null_check_here(_info);
160 }
161
162 Register addr = _addr->as_pointer_register();
163 Register new_val = _new_val->as_pointer_register();
164 Register thread = _thread->as_pointer_register();
165 Register tmp1 = _tmp1->as_pointer_register();
166 Register tmp2 = _tmp2->as_pointer_register();
167
168 // This may happen for a store of x.a = x - we do not need a post barrier for those
169 // as the cross-region test will always exit early anyway.
170 // The post barrier implementations can assume that addr and new_val are different
171 // then.
172 if (addr == new_val) {
173 ce->masm()->block_comment(err_msg("same addr/new_val due to self-referential store with imprecise card mark %s", addr->name()));
174 return;
175 }
176
177 G1BarrierSetAssembler* bs_asm = static_cast<G1BarrierSetAssembler*>(BarrierSet::barrier_set()->barrier_set_assembler());
178 bs_asm->g1_write_barrier_post_c1(ce->masm(), addr, new_val, thread, tmp1, tmp2);
179 }
180
181 virtual void print_instr(outputStream* out) const {
182 _addr->print(out); out->print(" ");
183 _new_val->print(out); out->print(" ");
184 _thread->print(out); out->print(" ");
185 _tmp1->print(out); out->print(" ");
186 _tmp2->print(out); out->print(" ");
187 out->cr();
188 }
189
190 #ifndef PRODUCT
191 virtual const char* name() const {
192 return "lir_g1_post_barrier";
193 }
194 #endif // PRODUCT
195 };
196
197 void G1BarrierSetC1::post_barrier(LIRAccess& access, LIR_Opr addr, LIR_Opr new_val) {
198 LIRGenerator* gen = access.gen();
199 DecoratorSet decorators = access.decorators();
200 bool in_heap = (decorators & IN_HEAP) != 0;
201 if (!in_heap) {
202 return;
203 }
204
205 // If the "new_val" is a constant null, no barrier is necessary.
206 if (new_val->is_constant() &&
207 new_val->as_constant_ptr()->as_jobject() == nullptr) return;
208
209 if (!new_val->is_register()) {
210 LIR_Opr new_val_reg = gen->new_register(T_OBJECT);
211 if (new_val->is_constant()) {
212 __ move(new_val, new_val_reg);
213 } else {
214 __ leal(new_val, new_val_reg);
215 }
216 new_val = new_val_reg;
217 }
218 assert(new_val->is_register(), "must be a register at this point");
219
220 if (addr->is_address()) {
221 LIR_Address* address = addr->as_address_ptr();
222 LIR_Opr ptr = gen->new_pointer_register();
223 if (!address->index()->is_valid() && address->disp() == 0) {
224 __ move(address->base(), ptr);
225 } else {
226 assert(address->disp() != max_jint, "lea doesn't support patched addresses!");
227 __ leal(addr, ptr);
228 }
229 addr = ptr;
230 }
231 assert(addr->is_register(), "must be a register at this point");
232
233 __ append(new LIR_OpG1PostBarrier(addr,
234 new_val,
235 gen->getThreadPointer() /* thread */,
236 gen->new_pointer_register() /* tmp1 */,
237 gen->new_pointer_register() /* tmp2 */));
238 }
239
240 void G1BarrierSetC1::load_at_resolved(LIRAccess& access, LIR_Opr result) {
241 DecoratorSet decorators = access.decorators();
242 bool is_weak = (decorators & ON_WEAK_OOP_REF) != 0;
243 bool is_phantom = (decorators & ON_PHANTOM_OOP_REF) != 0;
244 bool is_anonymous = (decorators & ON_UNKNOWN_OOP_REF) != 0;
245 LIRGenerator *gen = access.gen();
246
247 BarrierSetC1::load_at_resolved(access, result);
248
249 if (access.is_oop() && (is_weak || is_phantom || is_anonymous)) {
250 // Register the value in the referent field with the pre-barrier
251 LabelObj *Lcont_anonymous;
252 if (is_anonymous) {
253 Lcont_anonymous = new LabelObj();
254 generate_referent_check(access, Lcont_anonymous);
255 }
256 pre_barrier(access, LIR_OprFact::illegalOpr /* addr_opr */,
257 result /* pre_val */, access.patch_emit_info() /* info */);
258 if (is_anonymous) {
259 __ branch_destination(Lcont_anonymous->label());
260 }
261 }
262 }
263
264 class C1G1PreBarrierCodeGenClosure : public StubAssemblerCodeGenClosure {
265 virtual OopMapSet* generate_code(StubAssembler* sasm) {
266 G1BarrierSetAssembler* bs = (G1BarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
267 bs->generate_c1_pre_barrier_runtime_stub(sasm);
268 return nullptr;
269 }
270 };
271
272 bool G1BarrierSetC1::generate_c1_runtime_stubs(BufferBlob* buffer_blob) {
273 C1G1PreBarrierCodeGenClosure pre_code_gen_cl;
274 _pre_barrier_c1_runtime_code_blob = Runtime1::generate_blob(buffer_blob, StubId::NO_STUBID, "g1_pre_barrier_slow",
275 false, &pre_code_gen_cl);
276 return _pre_barrier_c1_runtime_code_blob != nullptr;
277 }