1 /* 2 * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "c1/c1_CodeStubs.hpp" 26 #include "code/aotCodeCache.hpp" 27 #include "c1/c1_LIRGenerator.hpp" 28 #include "gc/g1/c1/g1BarrierSetC1.hpp" 29 #include "gc/g1/g1BarrierSet.hpp" 30 #include "gc/g1/g1BarrierSetAssembler.hpp" 31 #include "gc/g1/g1HeapRegion.hpp" 32 #include "gc/g1/g1ThreadLocalData.hpp" 33 #include "utilities/macros.hpp" 34 35 #ifdef ASSERT 36 #define __ gen->lir(__FILE__, __LINE__)-> 37 #else 38 #define __ gen->lir()-> 39 #endif 40 41 void G1PreBarrierStub::emit_code(LIR_Assembler* ce) { 42 G1BarrierSetAssembler* bs = (G1BarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler(); 43 bs->gen_pre_barrier_stub(ce, this); 44 } 45 46 void G1PostBarrierStub::emit_code(LIR_Assembler* ce) { 47 G1BarrierSetAssembler* bs = (G1BarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler(); 48 bs->gen_post_barrier_stub(ce, this); 49 } 50 51 void G1BarrierSetC1::pre_barrier(LIRAccess& access, LIR_Opr addr_opr, 52 LIR_Opr pre_val, CodeEmitInfo* info) { 53 LIRGenerator* gen = access.gen(); 54 DecoratorSet decorators = access.decorators(); 55 56 // First we test whether marking is in progress. 57 BasicType flag_type; 58 bool patch = (decorators & C1_NEEDS_PATCHING) != 0; 59 bool do_load = pre_val == LIR_OprFact::illegalOpr; 60 if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) { 61 flag_type = T_INT; 62 } else { 63 guarantee(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, 64 "Assumption"); 65 // Use unsigned type T_BOOLEAN here rather than signed T_BYTE since some platforms, eg. ARM, 66 // need to use unsigned instructions to use the large offset to load the satb_mark_queue. 67 flag_type = T_BOOLEAN; 68 } 69 LIR_Opr thrd = gen->getThreadPointer(); 70 LIR_Address* mark_active_flag_addr = 71 new LIR_Address(thrd, 72 in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset()), 73 flag_type); 74 // Read the marking-in-progress flag. 75 // Note: When loading pre_val requires patching, i.e. do_load == true && 76 // patch == true, a safepoint can occur while patching. This makes the 77 // pre-barrier non-atomic and invalidates the marking-in-progress check. 78 // Therefore, in the presence of patching, we must repeat the same 79 // marking-in-progress checking before calling into the Runtime. For 80 // simplicity, we do this check unconditionally (regardless of the presence 81 // of patching) in the runtime stub 82 // (G1BarrierSetAssembler::generate_c1_pre_barrier_runtime_stub). 83 LIR_Opr flag_val = gen->new_register(T_INT); 84 __ load(mark_active_flag_addr, flag_val); 85 __ cmp(lir_cond_notEqual, flag_val, LIR_OprFact::intConst(0)); 86 87 LIR_PatchCode pre_val_patch_code = lir_patch_none; 88 89 CodeStub* slow; 90 91 if (do_load) { 92 assert(pre_val == LIR_OprFact::illegalOpr, "sanity"); 93 assert(addr_opr != LIR_OprFact::illegalOpr, "sanity"); 94 95 if (patch) 96 pre_val_patch_code = lir_patch_normal; 97 98 pre_val = gen->new_register(T_OBJECT); 99 100 if (!addr_opr->is_address()) { 101 assert(addr_opr->is_register(), "must be"); 102 addr_opr = LIR_OprFact::address(new LIR_Address(addr_opr, T_OBJECT)); 103 } 104 slow = new G1PreBarrierStub(addr_opr, pre_val, pre_val_patch_code, info); 105 } else { 106 assert(addr_opr == LIR_OprFact::illegalOpr, "sanity"); 107 assert(pre_val->is_register(), "must be"); 108 assert(pre_val->type() == T_OBJECT, "must be an object"); 109 assert(info == nullptr, "sanity"); 110 111 slow = new G1PreBarrierStub(pre_val); 112 } 113 114 __ branch(lir_cond_notEqual, slow); 115 __ branch_destination(slow->continuation()); 116 } 117 118 void G1BarrierSetC1::post_barrier(LIRAccess& access, LIR_Opr addr, LIR_Opr new_val) { 119 LIRGenerator* gen = access.gen(); 120 DecoratorSet decorators = access.decorators(); 121 bool in_heap = (decorators & IN_HEAP) != 0; 122 if (!in_heap) { 123 return; 124 } 125 126 // If the "new_val" is a constant null, no barrier is necessary. 127 if (new_val->is_constant() && 128 new_val->as_constant_ptr()->as_jobject() == nullptr) return; 129 130 if (!new_val->is_register()) { 131 LIR_Opr new_val_reg = gen->new_register(T_OBJECT); 132 if (new_val->is_constant()) { 133 __ move(new_val, new_val_reg); 134 } else { 135 __ leal(new_val, new_val_reg); 136 } 137 new_val = new_val_reg; 138 } 139 assert(new_val->is_register(), "must be a register at this point"); 140 141 if (addr->is_address()) { 142 LIR_Address* address = addr->as_address_ptr(); 143 LIR_Opr ptr = gen->new_pointer_register(); 144 if (!address->index()->is_valid() && address->disp() == 0) { 145 __ move(address->base(), ptr); 146 } else { 147 assert(address->disp() != max_jint, "lea doesn't support patched addresses!"); 148 __ leal(addr, ptr); 149 } 150 addr = ptr; 151 } 152 assert(addr->is_register(), "must be a register at this point"); 153 154 LIR_Opr xor_res = gen->new_pointer_register(); 155 LIR_Opr xor_shift_res = gen->new_pointer_register(); 156 #if INCLUDE_CDS 157 // we need to load the grain shift from the AOT Runtime 158 // Constants Area 159 LIR_Opr grain_shift_addr = LIR_OprFact::intptrConst(AOTRuntimeConstants::grain_shift_address()); 160 LIR_Opr grain_shift_reg = gen->new_pointer_register(); 161 LIR_Address* grain_shift_indirect = new LIR_Address(grain_shift_reg, 0, T_INT); 162 #ifdef X86 163 LIR_Opr grain_shift = gen->shiftCountOpr(); 164 #else // X86 165 LIR_Opr grain_shift = gen->new_register(T_INT); 166 #endif // X86 167 #endif 168 if (two_operand_lir_form) { 169 __ move(addr, xor_res); 170 __ logical_xor(xor_res, new_val, xor_res); 171 #if INCLUDE_CDS 172 if (AOTCodeCache::is_on_for_dump()) { 173 __ move(grain_shift_addr, grain_shift_reg); 174 __ move(xor_res, xor_shift_res); 175 __ move(grain_shift_indirect, grain_shift); 176 __ unsigned_shift_right(xor_shift_res, 177 grain_shift, 178 xor_shift_res, 179 LIR_Opr::illegalOpr()); 180 } else 181 #endif 182 { 183 __ move(xor_res, xor_shift_res); 184 __ unsigned_shift_right(xor_shift_res, 185 LIR_OprFact::intConst(checked_cast<jint>(G1HeapRegion::LogOfHRGrainBytes)), 186 xor_shift_res, 187 LIR_Opr::illegalOpr()); 188 } 189 } else { 190 __ logical_xor(addr, new_val, xor_res); 191 #if INCLUDE_CDS 192 if (AOTCodeCache::is_on_for_dump()) { 193 __ move(grain_shift_addr, grain_shift_reg); 194 __ move(grain_shift_indirect, grain_shift); 195 __ unsigned_shift_right(xor_res, 196 grain_shift, 197 xor_shift_res, 198 LIR_Opr::illegalOpr()); 199 } else 200 #endif 201 { 202 __ unsigned_shift_right(xor_res, 203 LIR_OprFact::intConst(checked_cast<jint>(G1HeapRegion::LogOfHRGrainBytes)), 204 xor_shift_res, 205 LIR_Opr::illegalOpr()); 206 } 207 } 208 209 __ cmp(lir_cond_notEqual, xor_shift_res, LIR_OprFact::intptrConst(NULL_WORD)); 210 211 CodeStub* slow = new G1PostBarrierStub(addr, new_val); 212 __ branch(lir_cond_notEqual, slow); 213 __ branch_destination(slow->continuation()); 214 } 215 216 void G1BarrierSetC1::load_at_resolved(LIRAccess& access, LIR_Opr result) { 217 DecoratorSet decorators = access.decorators(); 218 bool is_weak = (decorators & ON_WEAK_OOP_REF) != 0; 219 bool is_phantom = (decorators & ON_PHANTOM_OOP_REF) != 0; 220 bool is_anonymous = (decorators & ON_UNKNOWN_OOP_REF) != 0; 221 LIRGenerator *gen = access.gen(); 222 223 BarrierSetC1::load_at_resolved(access, result); 224 225 if (access.is_oop() && (is_weak || is_phantom || is_anonymous)) { 226 // Register the value in the referent field with the pre-barrier 227 LabelObj *Lcont_anonymous; 228 if (is_anonymous) { 229 Lcont_anonymous = new LabelObj(); 230 generate_referent_check(access, Lcont_anonymous); 231 } 232 pre_barrier(access, LIR_OprFact::illegalOpr /* addr_opr */, 233 result /* pre_val */, access.patch_emit_info() /* info */); 234 if (is_anonymous) { 235 __ branch_destination(Lcont_anonymous->label()); 236 } 237 } 238 } 239 240 class C1G1PreBarrierCodeGenClosure : public StubAssemblerCodeGenClosure { 241 virtual OopMapSet* generate_code(StubAssembler* sasm) { 242 G1BarrierSetAssembler* bs = (G1BarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler(); 243 bs->generate_c1_pre_barrier_runtime_stub(sasm); 244 return nullptr; 245 } 246 }; 247 248 class C1G1PostBarrierCodeGenClosure : public StubAssemblerCodeGenClosure { 249 virtual OopMapSet* generate_code(StubAssembler* sasm) { 250 G1BarrierSetAssembler* bs = (G1BarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler(); 251 bs->generate_c1_post_barrier_runtime_stub(sasm); 252 return nullptr; 253 } 254 }; 255 256 bool G1BarrierSetC1::generate_c1_runtime_stubs(BufferBlob* buffer_blob) { 257 C1G1PreBarrierCodeGenClosure pre_code_gen_cl; 258 C1G1PostBarrierCodeGenClosure post_code_gen_cl; 259 _pre_barrier_c1_runtime_code_blob = Runtime1::generate_blob(buffer_blob, C1StubId::NO_STUBID, "g1_pre_barrier_slow", 260 false, &pre_code_gen_cl); 261 _post_barrier_c1_runtime_code_blob = Runtime1::generate_blob(buffer_blob, C1StubId::NO_STUBID, "g1_post_barrier_slow", 262 false, &post_code_gen_cl); 263 return _pre_barrier_c1_runtime_code_blob != nullptr && _post_barrier_c1_runtime_code_blob != nullptr; 264 }