1 /*
  2  * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "c1/c1_LIRGenerator.hpp"
 26 #include "c1/c1_CodeStubs.hpp"
 27 #include "gc/g1/c1/g1BarrierSetC1.hpp"
 28 #include "gc/g1/g1BarrierSet.hpp"
 29 #include "gc/g1/g1BarrierSetAssembler.hpp"
 30 #include "gc/g1/g1HeapRegion.hpp"
 31 #include "gc/g1/g1ThreadLocalData.hpp"
 32 #include "utilities/macros.hpp"
 33 
 34 #ifdef ASSERT
 35 #define __ gen->lir(__FILE__, __LINE__)->
 36 #else
 37 #define __ gen->lir()->
 38 #endif
 39 
 40 void G1PreBarrierStub::emit_code(LIR_Assembler* ce) {
 41   G1BarrierSetAssembler* bs = (G1BarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
 42   bs->gen_pre_barrier_stub(ce, this);
 43 }
 44 
 45 void G1PostBarrierStub::emit_code(LIR_Assembler* ce) {
 46   G1BarrierSetAssembler* bs = (G1BarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
 47   bs->gen_post_barrier_stub(ce, this);
 48 }
 49 
 50 void G1BarrierSetC1::pre_barrier(LIRAccess& access, LIR_Opr addr_opr,
 51                                  LIR_Opr pre_val, CodeEmitInfo* info) {
 52   LIRGenerator* gen = access.gen();
 53   DecoratorSet decorators = access.decorators();
 54 
 55   // First we test whether marking is in progress.
 56   BasicType flag_type;
 57   bool patch = (decorators & C1_NEEDS_PATCHING) != 0;
 58   bool do_load = pre_val == LIR_OprFact::illegalOpr;
 59   if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
 60     flag_type = T_INT;
 61   } else {
 62     guarantee(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1,
 63               "Assumption");
 64     // Use unsigned type T_BOOLEAN here rather than signed T_BYTE since some platforms, eg. ARM,
 65     // need to use unsigned instructions to use the large offset to load the satb_mark_queue.
 66     flag_type = T_BOOLEAN;
 67   }
 68   LIR_Opr thrd = gen->getThreadPointer();
 69   LIR_Address* mark_active_flag_addr =
 70     new LIR_Address(thrd,
 71                     in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset()),
 72                     flag_type);
 73   // Read the marking-in-progress flag.
 74   // Note: When loading pre_val requires patching, i.e. do_load == true &&
 75   // patch == true, a safepoint can occur while patching. This makes the
 76   // pre-barrier non-atomic and invalidates the marking-in-progress check.
 77   // Therefore, in the presence of patching, we must repeat the same
 78   // marking-in-progress checking before calling into the Runtime. For
 79   // simplicity, we do this check unconditionally (regardless of the presence
 80   // of patching) in the runtime stub
 81   // (G1BarrierSetAssembler::generate_c1_pre_barrier_runtime_stub).
 82   LIR_Opr flag_val = gen->new_register(T_INT);
 83   __ load(mark_active_flag_addr, flag_val);
 84   __ cmp(lir_cond_notEqual, flag_val, LIR_OprFact::intConst(0));
 85 
 86   LIR_PatchCode pre_val_patch_code = lir_patch_none;
 87 
 88   CodeStub* slow;
 89 
 90   if (do_load) {
 91     assert(pre_val == LIR_OprFact::illegalOpr, "sanity");
 92     assert(addr_opr != LIR_OprFact::illegalOpr, "sanity");
 93 
 94     if (patch)
 95       pre_val_patch_code = lir_patch_normal;
 96 
 97     pre_val = gen->new_register(T_OBJECT);
 98 
 99     if (!addr_opr->is_address()) {
100       assert(addr_opr->is_register(), "must be");
101       addr_opr = LIR_OprFact::address(new LIR_Address(addr_opr, T_OBJECT));
102     }
103     slow = new G1PreBarrierStub(addr_opr, pre_val, pre_val_patch_code, info);
104   } else {
105     assert(addr_opr == LIR_OprFact::illegalOpr, "sanity");
106     assert(pre_val->is_register(), "must be");
107     assert(pre_val->type() == T_OBJECT, "must be an object");
108     assert(info == nullptr, "sanity");
109 
110     slow = new G1PreBarrierStub(pre_val);
111   }
112 
113   __ branch(lir_cond_notEqual, slow);
114   __ branch_destination(slow->continuation());
115 }
116 
117 void G1BarrierSetC1::post_barrier(LIRAccess& access, LIR_Opr addr, LIR_Opr new_val) {
118   LIRGenerator* gen = access.gen();
119   DecoratorSet decorators = access.decorators();
120   bool in_heap = (decorators & IN_HEAP) != 0;
121   if (!in_heap) {
122     return;
123   }
124 
125   // If the "new_val" is a constant null, no barrier is necessary.
126   if (new_val->is_constant() &&
127       new_val->as_constant_ptr()->as_jobject() == nullptr) return;
128 
129   if (!new_val->is_register()) {
130     LIR_Opr new_val_reg = gen->new_register(T_OBJECT);
131     if (new_val->is_constant()) {
132       __ move(new_val, new_val_reg);
133     } else {
134       __ leal(new_val, new_val_reg);
135     }
136     new_val = new_val_reg;
137   }
138   assert(new_val->is_register(), "must be a register at this point");
139 
140   if (addr->is_address()) {
141     LIR_Address* address = addr->as_address_ptr();
142     LIR_Opr ptr = gen->new_pointer_register();
143     if (!address->index()->is_valid() && address->disp() == 0) {
144       __ move(address->base(), ptr);
145     } else {
146       assert(address->disp() != max_jint, "lea doesn't support patched addresses!");
147       __ leal(addr, ptr);
148     }
149     addr = ptr;
150   }
151   assert(addr->is_register(), "must be a register at this point");
152 
153   LIR_Opr xor_res = gen->new_pointer_register();
154   LIR_Opr xor_shift_res = gen->new_pointer_register();
155   if (two_operand_lir_form) {
156     __ move(addr, xor_res);
157     __ logical_xor(xor_res, new_val, xor_res);
158     __ move(xor_res, xor_shift_res);
159     __ unsigned_shift_right(xor_shift_res,
160                             LIR_OprFact::intConst(checked_cast<jint>(G1HeapRegion::LogOfHRGrainBytes)),
161                             xor_shift_res,
162                             LIR_Opr::illegalOpr());
163   } else {
164     __ logical_xor(addr, new_val, xor_res);
165     __ unsigned_shift_right(xor_res,
166                             LIR_OprFact::intConst(checked_cast<jint>(G1HeapRegion::LogOfHRGrainBytes)),
167                             xor_shift_res,
168                             LIR_Opr::illegalOpr());
169   }
170 
171   __ cmp(lir_cond_notEqual, xor_shift_res, LIR_OprFact::intptrConst(NULL_WORD));
172 
173   CodeStub* slow = new G1PostBarrierStub(addr, new_val);
174   __ branch(lir_cond_notEqual, slow);
175   __ branch_destination(slow->continuation());
176 }
177 
178 void G1BarrierSetC1::load_at_resolved(LIRAccess& access, LIR_Opr result) {
179   DecoratorSet decorators = access.decorators();
180   bool is_weak = (decorators & ON_WEAK_OOP_REF) != 0;
181   bool is_phantom = (decorators & ON_PHANTOM_OOP_REF) != 0;
182   bool is_anonymous = (decorators & ON_UNKNOWN_OOP_REF) != 0;
183   LIRGenerator *gen = access.gen();
184 
185   BarrierSetC1::load_at_resolved(access, result);
186 
187   if (access.is_oop() && (is_weak || is_phantom || is_anonymous)) {
188     // Register the value in the referent field with the pre-barrier
189     LabelObj *Lcont_anonymous;
190     if (is_anonymous) {
191       Lcont_anonymous = new LabelObj();
192       generate_referent_check(access, Lcont_anonymous);
193     }
194     pre_barrier(access, LIR_OprFact::illegalOpr /* addr_opr */,
195                 result /* pre_val */, access.patch_emit_info() /* info */);
196     if (is_anonymous) {
197       __ branch_destination(Lcont_anonymous->label());
198     }
199   }
200 }
201 
202 class C1G1PreBarrierCodeGenClosure : public StubAssemblerCodeGenClosure {
203   virtual OopMapSet* generate_code(StubAssembler* sasm) {
204     G1BarrierSetAssembler* bs = (G1BarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
205     bs->generate_c1_pre_barrier_runtime_stub(sasm);
206     return nullptr;
207   }
208 };
209 
210 class C1G1PostBarrierCodeGenClosure : public StubAssemblerCodeGenClosure {
211   virtual OopMapSet* generate_code(StubAssembler* sasm) {
212     G1BarrierSetAssembler* bs = (G1BarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
213     bs->generate_c1_post_barrier_runtime_stub(sasm);
214     return nullptr;
215   }
216 };
217 
218 void G1BarrierSetC1::generate_c1_runtime_stubs(BufferBlob* buffer_blob) {
219   C1G1PreBarrierCodeGenClosure pre_code_gen_cl;
220   C1G1PostBarrierCodeGenClosure post_code_gen_cl;
221   _pre_barrier_c1_runtime_code_blob = Runtime1::generate_blob(buffer_blob, C1StubId::NO_STUBID, "g1_pre_barrier_slow",
222                                                               false, &pre_code_gen_cl);
223   _post_barrier_c1_runtime_code_blob = Runtime1::generate_blob(buffer_blob, C1StubId::NO_STUBID, "g1_post_barrier_slow",
224                                                                false, &post_code_gen_cl);
225 }