1 /*
  2  * Copyright (c) 2018, 2024, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "c1/c1_LIRGenerator.hpp"
 27 #include "c1/c1_CodeStubs.hpp"
 28 #if INCLUDE_CDS
 29 #include "code/SCCache.hpp"
 30 #endif
 31 #include "gc/g1/c1/g1BarrierSetC1.hpp"
 32 #include "gc/g1/g1BarrierSet.hpp"
 33 #include "gc/g1/g1BarrierSetAssembler.hpp"
 34 #include "gc/g1/g1HeapRegion.hpp"
 35 #include "gc/g1/g1ThreadLocalData.hpp"
 36 #include "utilities/macros.hpp"
 37 
 38 #ifdef ASSERT
 39 #define __ gen->lir(__FILE__, __LINE__)->
 40 #else
 41 #define __ gen->lir()->
 42 #endif
 43 
 44 void G1PreBarrierStub::emit_code(LIR_Assembler* ce) {
 45   G1BarrierSetAssembler* bs = (G1BarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
 46   bs->gen_pre_barrier_stub(ce, this);
 47 }
 48 
 49 void G1PostBarrierStub::emit_code(LIR_Assembler* ce) {
 50   G1BarrierSetAssembler* bs = (G1BarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
 51   bs->gen_post_barrier_stub(ce, this);
 52 }
 53 
 54 void G1BarrierSetC1::pre_barrier(LIRAccess& access, LIR_Opr addr_opr,
 55                                  LIR_Opr pre_val, CodeEmitInfo* info) {
 56   LIRGenerator* gen = access.gen();
 57   DecoratorSet decorators = access.decorators();
 58 
 59   // First we test whether marking is in progress.
 60   BasicType flag_type;
 61   bool patch = (decorators & C1_NEEDS_PATCHING) != 0;
 62   bool do_load = pre_val == LIR_OprFact::illegalOpr;
 63   if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
 64     flag_type = T_INT;
 65   } else {
 66     guarantee(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1,
 67               "Assumption");
 68     // Use unsigned type T_BOOLEAN here rather than signed T_BYTE since some platforms, eg. ARM,
 69     // need to use unsigned instructions to use the large offset to load the satb_mark_queue.
 70     flag_type = T_BOOLEAN;
 71   }
 72   LIR_Opr thrd = gen->getThreadPointer();
 73   LIR_Address* mark_active_flag_addr =
 74     new LIR_Address(thrd,
 75                     in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset()),
 76                     flag_type);
 77   // Read the marking-in-progress flag.
 78   // Note: When loading pre_val requires patching, i.e. do_load == true &&
 79   // patch == true, a safepoint can occur while patching. This makes the
 80   // pre-barrier non-atomic and invalidates the marking-in-progress check.
 81   // Therefore, in the presence of patching, we must repeat the same
 82   // marking-in-progress checking before calling into the Runtime. For
 83   // simplicity, we do this check unconditionally (regardless of the presence
 84   // of patching) in the runtime stub
 85   // (G1BarrierSetAssembler::generate_c1_pre_barrier_runtime_stub).
 86   LIR_Opr flag_val = gen->new_register(T_INT);
 87   __ load(mark_active_flag_addr, flag_val);
 88   __ cmp(lir_cond_notEqual, flag_val, LIR_OprFact::intConst(0));
 89 
 90   LIR_PatchCode pre_val_patch_code = lir_patch_none;
 91 
 92   CodeStub* slow;
 93 
 94   if (do_load) {
 95     assert(pre_val == LIR_OprFact::illegalOpr, "sanity");
 96     assert(addr_opr != LIR_OprFact::illegalOpr, "sanity");
 97 
 98     if (patch)
 99       pre_val_patch_code = lir_patch_normal;
100 
101     pre_val = gen->new_register(T_OBJECT);
102 
103     if (!addr_opr->is_address()) {
104       assert(addr_opr->is_register(), "must be");
105       addr_opr = LIR_OprFact::address(new LIR_Address(addr_opr, T_OBJECT));
106     }
107     slow = new G1PreBarrierStub(addr_opr, pre_val, pre_val_patch_code, info);
108   } else {
109     assert(addr_opr == LIR_OprFact::illegalOpr, "sanity");
110     assert(pre_val->is_register(), "must be");
111     assert(pre_val->type() == T_OBJECT, "must be an object");
112     assert(info == nullptr, "sanity");
113 
114     slow = new G1PreBarrierStub(pre_val);
115   }
116 
117   __ branch(lir_cond_notEqual, slow);
118   __ branch_destination(slow->continuation());
119 }
120 
121 void G1BarrierSetC1::post_barrier(LIRAccess& access, LIR_Opr addr, LIR_Opr new_val) {
122   LIRGenerator* gen = access.gen();
123   DecoratorSet decorators = access.decorators();
124   bool in_heap = (decorators & IN_HEAP) != 0;
125   if (!in_heap) {
126     return;
127   }
128 
129   // If the "new_val" is a constant null, no barrier is necessary.
130   if (new_val->is_constant() &&
131       new_val->as_constant_ptr()->as_jobject() == nullptr) return;
132 
133   if (!new_val->is_register()) {
134     LIR_Opr new_val_reg = gen->new_register(T_OBJECT);
135     if (new_val->is_constant()) {
136       __ move(new_val, new_val_reg);
137     } else {
138       __ leal(new_val, new_val_reg);
139     }
140     new_val = new_val_reg;
141   }
142   assert(new_val->is_register(), "must be a register at this point");
143 
144   if (addr->is_address()) {
145     LIR_Address* address = addr->as_address_ptr();
146     LIR_Opr ptr = gen->new_pointer_register();
147     if (!address->index()->is_valid() && address->disp() == 0) {
148       __ move(address->base(), ptr);
149     } else {
150       assert(address->disp() != max_jint, "lea doesn't support patched addresses!");
151       __ leal(addr, ptr);
152     }
153     addr = ptr;
154   }
155   assert(addr->is_register(), "must be a register at this point");
156 
157   LIR_Opr xor_res = gen->new_pointer_register();
158   LIR_Opr xor_shift_res = gen->new_pointer_register();
159 #if INCLUDE_CDS
160   // we need to load the grain shift from the AOT Runtime
161   // Constants Area
162   LIR_Opr grain_shift_addr = LIR_OprFact::intptrConst(AOTRuntimeConstants::grain_shift_address());
163   LIR_Opr grain_shift_reg = gen->new_pointer_register();
164   LIR_Address* grain_shift_indirect = new LIR_Address(grain_shift_reg, 0, T_INT);
165 #ifdef X86
166   LIR_Opr grain_shift = gen->shiftCountOpr();
167 #else // X86
168   LIR_Opr grain_shift = gen->new_register(T_INT);
169 #endif // X86
170 #endif
171   if (two_operand_lir_form) {
172     __ move(addr, xor_res);
173     __ logical_xor(xor_res, new_val, xor_res);
174 #if INCLUDE_CDS
175     if (SCCache::is_on_for_write()) {
176       __ move(grain_shift_addr, grain_shift_reg);
177       __ move(xor_res, xor_shift_res);
178       __ move(grain_shift_indirect, grain_shift);
179       __ unsigned_shift_right(xor_shift_res,
180                               grain_shift,
181                               xor_shift_res,
182                               LIR_Opr::illegalOpr());
183     } else
184 #endif
185     {
186       __ move(xor_res, xor_shift_res);
187       __ unsigned_shift_right(xor_shift_res,
188                               LIR_OprFact::intConst(checked_cast<jint>(G1HeapRegion::LogOfHRGrainBytes)),
189                               xor_shift_res,
190                               LIR_Opr::illegalOpr());
191     }
192   } else {
193     __ logical_xor(addr, new_val, xor_res);
194 #if INCLUDE_CDS
195     if (SCCache::is_on_for_write()) {
196       __ move(grain_shift_addr, grain_shift_reg);
197       __ move(grain_shift_indirect, grain_shift);
198       __ unsigned_shift_right(xor_res,
199                               grain_shift,
200                               xor_shift_res,
201                               LIR_Opr::illegalOpr());
202     } else
203 #endif
204     {
205       __ unsigned_shift_right(xor_res,
206                               LIR_OprFact::intConst(checked_cast<jint>(G1HeapRegion::LogOfHRGrainBytes)),
207                               xor_shift_res,
208                               LIR_Opr::illegalOpr());
209     }
210   }
211 
212   __ cmp(lir_cond_notEqual, xor_shift_res, LIR_OprFact::intptrConst(NULL_WORD));
213 
214   CodeStub* slow = new G1PostBarrierStub(addr, new_val);
215   __ branch(lir_cond_notEqual, slow);
216   __ branch_destination(slow->continuation());
217 }
218 
219 void G1BarrierSetC1::load_at_resolved(LIRAccess& access, LIR_Opr result) {
220   DecoratorSet decorators = access.decorators();
221   bool is_weak = (decorators & ON_WEAK_OOP_REF) != 0;
222   bool is_phantom = (decorators & ON_PHANTOM_OOP_REF) != 0;
223   bool is_anonymous = (decorators & ON_UNKNOWN_OOP_REF) != 0;
224   LIRGenerator *gen = access.gen();
225 
226   BarrierSetC1::load_at_resolved(access, result);
227 
228   if (access.is_oop() && (is_weak || is_phantom || is_anonymous)) {
229     // Register the value in the referent field with the pre-barrier
230     LabelObj *Lcont_anonymous;
231     if (is_anonymous) {
232       Lcont_anonymous = new LabelObj();
233       generate_referent_check(access, Lcont_anonymous);
234     }
235     pre_barrier(access, LIR_OprFact::illegalOpr /* addr_opr */,
236                 result /* pre_val */, access.patch_emit_info() /* info */);
237     if (is_anonymous) {
238       __ branch_destination(Lcont_anonymous->label());
239     }
240   }
241 }
242 
243 class C1G1PreBarrierCodeGenClosure : public StubAssemblerCodeGenClosure {
244   virtual OopMapSet* generate_code(StubAssembler* sasm) {
245     G1BarrierSetAssembler* bs = (G1BarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
246     bs->generate_c1_pre_barrier_runtime_stub(sasm);
247     return nullptr;
248   }
249 };
250 
251 class C1G1PostBarrierCodeGenClosure : public StubAssemblerCodeGenClosure {
252   virtual OopMapSet* generate_code(StubAssembler* sasm) {
253     G1BarrierSetAssembler* bs = (G1BarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
254     bs->generate_c1_post_barrier_runtime_stub(sasm);
255     return nullptr;
256   }
257 };
258 
259 void G1BarrierSetC1::generate_c1_runtime_stubs(BufferBlob* buffer_blob) {
260   C1G1PreBarrierCodeGenClosure pre_code_gen_cl;
261   C1G1PostBarrierCodeGenClosure post_code_gen_cl;
262   _pre_barrier_c1_runtime_code_blob = Runtime1::generate_blob(buffer_blob, C1StubId::NO_STUBID, "g1_pre_barrier_slow",
263                                                               false, &pre_code_gen_cl);
264   _post_barrier_c1_runtime_code_blob = Runtime1::generate_blob(buffer_blob, C1StubId::NO_STUBID, "g1_post_barrier_slow",
265                                                                false, &post_code_gen_cl);
266 }