1 /* 2 * Copyright (c) 2008, 2022, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "c1/c1_MacroAssembler.hpp" 27 #include "c1/c1_Runtime1.hpp" 28 #include "gc/shared/collectedHeap.hpp" 29 #include "gc/shared/tlab_globals.hpp" 30 #include "interpreter/interpreter.hpp" 31 #include "oops/arrayOop.hpp" 32 #include "oops/markWord.hpp" 33 #include "runtime/basicLock.hpp" 34 #include "runtime/os.hpp" 35 #include "runtime/sharedRuntime.hpp" 36 #include "runtime/stubRoutines.hpp" 37 #include "utilities/powerOfTwo.hpp" 38 39 // Note: Rtemp usage is this file should not impact C2 and should be 40 // correct as long as it is not implicitly used in lower layers (the 41 // arm [macro]assembler) and used with care in the other C1 specific 42 // files. 43 44 void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache) { 45 Label verified; 46 load_klass(Rtemp, receiver); 47 cmp(Rtemp, iCache); 48 b(verified, eq); // jump over alignment no-ops 49 jump(SharedRuntime::get_ic_miss_stub(), relocInfo::runtime_call_type); 50 align(CodeEntryAlignment); 51 bind(verified); 52 } 53 54 void C1_MacroAssembler::build_frame(int frame_size_in_bytes, int bang_size_in_bytes) { 55 assert(bang_size_in_bytes >= frame_size_in_bytes, "stack bang size incorrect"); 56 assert((frame_size_in_bytes % StackAlignmentInBytes) == 0, "frame size should be aligned"); 57 58 59 arm_stack_overflow_check(bang_size_in_bytes, Rtemp); 60 61 // FP can no longer be used to memorize SP. It may be modified 62 // if this method contains a methodHandle call site 63 raw_push(FP, LR); 64 sub_slow(SP, SP, frame_size_in_bytes); 65 } 66 67 void C1_MacroAssembler::remove_frame(int frame_size_in_bytes) { 68 add_slow(SP, SP, frame_size_in_bytes); 69 raw_pop(FP, LR); 70 } 71 72 void C1_MacroAssembler::verified_entry(bool breakAtEntry) { 73 if (breakAtEntry) { 74 breakpoint(); 75 } 76 } 77 78 // Puts address of allocated object into register `obj` and end of allocated object into register `obj_end`. 79 void C1_MacroAssembler::try_allocate(Register obj, Register obj_end, Register tmp1, Register tmp2, 80 RegisterOrConstant size_expression, Label& slow_case) { 81 if (UseTLAB) { 82 tlab_allocate(obj, obj_end, tmp1, size_expression, slow_case); 83 } else { 84 eden_allocate(obj, obj_end, tmp1, tmp2, size_expression, slow_case); 85 } 86 } 87 88 89 void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register len, Register tmp) { 90 assert_different_registers(obj, klass, len, tmp); 91 92 mov(tmp, (intptr_t)markWord::prototype().value()); 93 94 str(tmp, Address(obj, oopDesc::mark_offset_in_bytes())); 95 str(klass, Address(obj, oopDesc::klass_offset_in_bytes())); 96 97 if (len->is_valid()) { 98 str_32(len, Address(obj, arrayOopDesc::length_offset_in_bytes())); 99 } 100 } 101 102 103 // Cleans object body [base..obj_end]. Clobbers `base` and `tmp` registers. 104 void C1_MacroAssembler::initialize_body(Register base, Register obj_end, Register tmp) { 105 zero_memory(base, obj_end, tmp); 106 } 107 108 109 void C1_MacroAssembler::initialize_object(Register obj, Register obj_end, Register klass, 110 Register len, Register tmp1, Register tmp2, 111 RegisterOrConstant header_size, int obj_size_in_bytes, 112 bool is_tlab_allocated) 113 { 114 assert_different_registers(obj, obj_end, klass, len, tmp1, tmp2); 115 initialize_header(obj, klass, len, tmp1); 116 117 const Register ptr = tmp2; 118 119 if (!(UseTLAB && ZeroTLAB && is_tlab_allocated)) { 120 if (obj_size_in_bytes >= 0 && obj_size_in_bytes <= 8 * BytesPerWord) { 121 mov(tmp1, 0); 122 const int base = instanceOopDesc::header_size() * HeapWordSize; 123 for (int i = base; i < obj_size_in_bytes; i += wordSize) { 124 str(tmp1, Address(obj, i)); 125 } 126 } else { 127 assert(header_size.is_constant() || header_size.as_register() == ptr, "code assumption"); 128 add(ptr, obj, header_size); 129 initialize_body(ptr, obj_end, tmp1); 130 } 131 } 132 133 // StoreStore barrier required after complete initialization 134 // (headers + content zeroing), before the object may escape. 135 membar(MacroAssembler::StoreStore, tmp1); 136 } 137 138 void C1_MacroAssembler::allocate_object(Register obj, Register tmp1, Register tmp2, Register tmp3, 139 int header_size, int object_size, 140 Register klass, Label& slow_case) { 141 assert_different_registers(obj, tmp1, tmp2, tmp3, klass, Rtemp); 142 assert(header_size >= 0 && object_size >= header_size, "illegal sizes"); 143 const int object_size_in_bytes = object_size * BytesPerWord; 144 145 const Register obj_end = tmp1; 146 const Register len = noreg; 147 148 if (Assembler::is_arith_imm_in_range(object_size_in_bytes)) { 149 try_allocate(obj, obj_end, tmp2, tmp3, object_size_in_bytes, slow_case); 150 } else { 151 // Rtemp should be free at c1 LIR level 152 mov_slow(Rtemp, object_size_in_bytes); 153 try_allocate(obj, obj_end, tmp2, tmp3, Rtemp, slow_case); 154 } 155 initialize_object(obj, obj_end, klass, len, tmp2, tmp3, instanceOopDesc::header_size() * HeapWordSize, object_size_in_bytes, /* is_tlab_allocated */ UseTLAB); 156 } 157 158 void C1_MacroAssembler::allocate_array(Register obj, Register len, 159 Register tmp1, Register tmp2, Register tmp3, 160 int header_size, int element_size, 161 Register klass, Label& slow_case) { 162 assert_different_registers(obj, len, tmp1, tmp2, tmp3, klass, Rtemp); 163 const int header_size_in_bytes = header_size * BytesPerWord; 164 const int scale_shift = exact_log2(element_size); 165 const Register obj_size = Rtemp; // Rtemp should be free at c1 LIR level 166 167 cmp_32(len, max_array_allocation_length); 168 b(slow_case, hs); 169 170 bool align_header = ((header_size_in_bytes | element_size) & MinObjAlignmentInBytesMask) != 0; 171 assert(align_header || ((header_size_in_bytes & MinObjAlignmentInBytesMask) == 0), "must be"); 172 assert(align_header || ((element_size & MinObjAlignmentInBytesMask) == 0), "must be"); 173 174 mov(obj_size, header_size_in_bytes + (align_header ? (MinObjAlignmentInBytes - 1) : 0)); 175 add_ptr_scaled_int32(obj_size, obj_size, len, scale_shift); 176 177 if (align_header) { 178 align_reg(obj_size, obj_size, MinObjAlignmentInBytes); 179 } 180 181 try_allocate(obj, tmp1, tmp2, tmp3, obj_size, slow_case); 182 initialize_object(obj, tmp1, klass, len, tmp2, tmp3, header_size_in_bytes, -1, /* is_tlab_allocated */ UseTLAB); 183 } 184 185 int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr, Label& slow_case) { 186 Label done, fast_lock, fast_lock_done; 187 int null_check_offset = 0; 188 189 const Register tmp2 = Rtemp; // Rtemp should be free at c1 LIR level 190 assert_different_registers(hdr, obj, disp_hdr, tmp2); 191 192 assert(BasicObjectLock::lock_offset_in_bytes() == 0, "adjust this code"); 193 const int obj_offset = BasicObjectLock::obj_offset_in_bytes(); 194 const int mark_offset = BasicLock::displaced_header_offset_in_bytes(); 195 196 str(obj, Address(disp_hdr, obj_offset)); 197 198 null_check_offset = offset(); 199 200 if (DiagnoseSyncOnValueBasedClasses != 0) { 201 load_klass(tmp2, obj); 202 ldr_u32(tmp2, Address(tmp2, Klass::access_flags_offset())); 203 tst(tmp2, JVM_ACC_IS_VALUE_BASED_CLASS); 204 b(slow_case, ne); 205 } 206 207 assert(oopDesc::mark_offset_in_bytes() == 0, "Required by atomic instructions"); 208 209 // On MP platforms the next load could return a 'stale' value if the memory location has been modified by another thread. 210 // That would be acceptable as ether CAS or slow case path is taken in that case. 211 212 // Must be the first instruction here, because implicit null check relies on it 213 ldr(hdr, Address(obj, oopDesc::mark_offset_in_bytes())); 214 215 tst(hdr, markWord::unlocked_value); 216 b(fast_lock, ne); 217 218 // Check for recursive locking 219 // See comments in InterpreterMacroAssembler::lock_object for 220 // explanations on the fast recursive locking check. 221 // -1- test low 2 bits 222 movs(tmp2, AsmOperand(hdr, lsl, 30)); 223 // -2- test (hdr - SP) if the low two bits are 0 224 sub(tmp2, hdr, SP, eq); 225 movs(tmp2, AsmOperand(tmp2, lsr, exact_log2(os::vm_page_size())), eq); 226 // If still 'eq' then recursive locking OK 227 // set to zero if recursive lock, set to non zero otherwise (see discussion in JDK-8267042) 228 str(tmp2, Address(disp_hdr, mark_offset)); 229 b(fast_lock_done, eq); 230 // else need slow case 231 b(slow_case); 232 233 234 bind(fast_lock); 235 // Save previous object header in BasicLock structure and update the header 236 str(hdr, Address(disp_hdr, mark_offset)); 237 238 cas_for_lock_acquire(hdr, disp_hdr, obj, tmp2, slow_case); 239 240 bind(fast_lock_done); 241 bind(done); 242 243 return null_check_offset; 244 } 245 246 void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_hdr, Label& slow_case) { 247 assert_different_registers(hdr, obj, disp_hdr, Rtemp); 248 Register tmp2 = Rtemp; 249 250 assert(BasicObjectLock::lock_offset_in_bytes() == 0, "adjust this code"); 251 const int obj_offset = BasicObjectLock::obj_offset_in_bytes(); 252 const int mark_offset = BasicLock::displaced_header_offset_in_bytes(); 253 254 Label done; 255 256 assert(oopDesc::mark_offset_in_bytes() == 0, "Required by atomic instructions"); 257 258 // Load displaced header and object from the lock 259 ldr(hdr, Address(disp_hdr, mark_offset)); 260 // If hdr is NULL, we've got recursive locking and there's nothing more to do 261 cbz(hdr, done); 262 263 // load object 264 ldr(obj, Address(disp_hdr, obj_offset)); 265 266 // Restore the object header 267 cas_for_lock_release(disp_hdr, hdr, obj, tmp2, slow_case); 268 269 bind(done); 270 } 271 272 273 #ifndef PRODUCT 274 275 void C1_MacroAssembler::verify_stack_oop(int stack_offset) { 276 if (!VerifyOops) return; 277 verify_oop_addr(Address(SP, stack_offset)); 278 } 279 280 void C1_MacroAssembler::verify_not_null_oop(Register r) { 281 Label not_null; 282 cbnz(r, not_null); 283 stop("non-null oop required"); 284 bind(not_null); 285 if (!VerifyOops) return; 286 verify_oop(r); 287 } 288 289 #endif // !PRODUCT