1 /* 2 * Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "c1/c1_MacroAssembler.hpp" 27 #include "c1/c1_Runtime1.hpp" 28 #include "compiler/compilerDefinitions.inline.hpp" 29 #include "gc/shared/barrierSet.hpp" 30 #include "gc/shared/barrierSetAssembler.hpp" 31 #include "gc/shared/collectedHeap.hpp" 32 #include "gc/shared/tlab_globals.hpp" 33 #include "interpreter/interpreter.hpp" 34 #include "oops/arrayOop.hpp" 35 #include "oops/markWord.hpp" 36 #include "runtime/basicLock.hpp" 37 #include "runtime/os.hpp" 38 #include "runtime/sharedRuntime.hpp" 39 #include "runtime/stubRoutines.hpp" 40 #include "utilities/checkedCast.hpp" 41 42 int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr, Register tmp, Label& slow_case) { 43 const int aligned_mask = BytesPerWord -1; 44 const int hdr_offset = oopDesc::mark_offset_in_bytes(); 45 assert(hdr == rax, "hdr must be rax, for the cmpxchg instruction"); 46 assert_different_registers(hdr, obj, disp_hdr, tmp); 47 int null_check_offset = -1; 48 49 verify_oop(obj); 50 51 // save object being locked into the BasicObjectLock 52 movptr(Address(disp_hdr, BasicObjectLock::obj_offset()), obj); 53 54 null_check_offset = offset(); 55 56 if (DiagnoseSyncOnValueBasedClasses != 0) { 57 load_klass(hdr, obj, rscratch1); 58 movl(hdr, Address(hdr, Klass::access_flags_offset())); 59 testl(hdr, JVM_ACC_IS_VALUE_BASED_CLASS); 60 jcc(Assembler::notZero, slow_case); 61 } 62 63 // Load object header 64 movptr(hdr, Address(obj, hdr_offset)); 65 66 if (LockingMode == LM_LIGHTWEIGHT) { 67 #ifdef _LP64 68 const Register thread = r15_thread; 69 #else 70 const Register thread = disp_hdr; 71 get_thread(thread); 72 #endif 73 lightweight_lock(obj, hdr, thread, tmp, slow_case); 74 } else if (LockingMode == LM_LEGACY) { 75 Label done; 76 // and mark it as unlocked 77 orptr(hdr, markWord::unlocked_value); 78 // save unlocked object header into the displaced header location on the stack 79 movptr(Address(disp_hdr, 0), hdr); 80 // test if object header is still the same (i.e. unlocked), and if so, store the 81 // displaced header address in the object header - if it is not the same, get the 82 // object header instead 83 MacroAssembler::lock(); // must be immediately before cmpxchg! 84 cmpxchgptr(disp_hdr, Address(obj, hdr_offset)); 85 // if the object header was the same, we're done 86 jcc(Assembler::equal, done); 87 // if the object header was not the same, it is now in the hdr register 88 // => test if it is a stack pointer into the same stack (recursive locking), i.e.: 89 // 90 // 1) (hdr & aligned_mask) == 0 91 // 2) rsp <= hdr 92 // 3) hdr <= rsp + page_size 93 // 94 // these 3 tests can be done by evaluating the following expression: 95 // 96 // (hdr - rsp) & (aligned_mask - page_size) 97 // 98 // assuming both the stack pointer and page_size have their least 99 // significant 2 bits cleared and page_size is a power of 2 100 subptr(hdr, rsp); 101 andptr(hdr, aligned_mask - (int)os::vm_page_size()); 102 // for recursive locking, the result is zero => save it in the displaced header 103 // location (null in the displaced hdr location indicates recursive locking) 104 movptr(Address(disp_hdr, 0), hdr); 105 // otherwise we don't care about the result and handle locking via runtime call 106 jcc(Assembler::notZero, slow_case); 107 // done 108 bind(done); 109 } 110 111 inc_held_monitor_count(); 112 113 return null_check_offset; 114 } 115 116 void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_hdr, Label& slow_case) { 117 const int aligned_mask = BytesPerWord -1; 118 const int hdr_offset = oopDesc::mark_offset_in_bytes(); 119 assert(disp_hdr == rax, "disp_hdr must be rax, for the cmpxchg instruction"); 120 assert(hdr != obj && hdr != disp_hdr && obj != disp_hdr, "registers must be different"); 121 Label done; 122 123 if (LockingMode != LM_LIGHTWEIGHT) { 124 // load displaced header 125 movptr(hdr, Address(disp_hdr, 0)); 126 // if the loaded hdr is null we had recursive locking 127 testptr(hdr, hdr); 128 // if we had recursive locking, we are done 129 jcc(Assembler::zero, done); 130 } 131 132 // load object 133 movptr(obj, Address(disp_hdr, BasicObjectLock::obj_offset())); 134 verify_oop(obj); 135 136 if (LockingMode == LM_LIGHTWEIGHT) { 137 movptr(disp_hdr, Address(obj, hdr_offset)); 138 andptr(disp_hdr, ~(int32_t)markWord::lock_mask_in_place); 139 lightweight_unlock(obj, disp_hdr, hdr, slow_case); 140 } else if (LockingMode == LM_LEGACY) { 141 // test if object header is pointing to the displaced header, and if so, restore 142 // the displaced header in the object - if the object header is not pointing to 143 // the displaced header, get the object header instead 144 MacroAssembler::lock(); // must be immediately before cmpxchg! 145 cmpxchgptr(hdr, Address(obj, hdr_offset)); 146 // if the object header was not pointing to the displaced header, 147 // we do unlocking via runtime call 148 jcc(Assembler::notEqual, slow_case); 149 // done 150 } 151 bind(done); 152 dec_held_monitor_count(); 153 } 154 155 156 // Defines obj, preserves var_size_in_bytes 157 void C1_MacroAssembler::try_allocate(Register obj, Register var_size_in_bytes, int con_size_in_bytes, Register t1, Register t2, Label& slow_case) { 158 if (UseTLAB) { 159 tlab_allocate(noreg, obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case); 160 } else { 161 jmp(slow_case); 162 } 163 } 164 165 166 void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register len, Register t1, Register t2) { 167 assert_different_registers(obj, klass, len); 168 movptr(Address(obj, oopDesc::mark_offset_in_bytes()), checked_cast<int32_t>(markWord::prototype().value())); 169 #ifdef _LP64 170 if (UseCompressedClassPointers) { // Take care not to kill klass 171 movptr(t1, klass); 172 encode_klass_not_null(t1, rscratch1); 173 movl(Address(obj, oopDesc::klass_offset_in_bytes()), t1); 174 } else 175 #endif 176 { 177 movptr(Address(obj, oopDesc::klass_offset_in_bytes()), klass); 178 } 179 180 if (len->is_valid()) { 181 movl(Address(obj, arrayOopDesc::length_offset_in_bytes()), len); 182 } 183 #ifdef _LP64 184 else if (UseCompressedClassPointers) { 185 xorptr(t1, t1); 186 store_klass_gap(obj, t1); 187 } 188 #endif 189 } 190 191 192 // preserves obj, destroys len_in_bytes 193 void C1_MacroAssembler::initialize_body(Register obj, Register len_in_bytes, int hdr_size_in_bytes, Register t1) { 194 assert(hdr_size_in_bytes >= 0, "header size must be positive or 0"); 195 Label done; 196 197 // len_in_bytes is positive and ptr sized 198 subptr(len_in_bytes, hdr_size_in_bytes); 199 zero_memory(obj, len_in_bytes, hdr_size_in_bytes, t1); 200 bind(done); 201 } 202 203 204 void C1_MacroAssembler::allocate_object(Register obj, Register t1, Register t2, int header_size, int object_size, Register klass, Label& slow_case) { 205 assert(obj == rax, "obj must be in rax, for cmpxchg"); 206 assert_different_registers(obj, t1, t2); // XXX really? 207 assert(header_size >= 0 && object_size >= header_size, "illegal sizes"); 208 209 try_allocate(obj, noreg, object_size * BytesPerWord, t1, t2, slow_case); 210 211 initialize_object(obj, klass, noreg, object_size * HeapWordSize, t1, t2, UseTLAB); 212 } 213 214 void C1_MacroAssembler::initialize_object(Register obj, Register klass, Register var_size_in_bytes, int con_size_in_bytes, Register t1, Register t2, bool is_tlab_allocated) { 215 assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0, 216 "con_size_in_bytes is not multiple of alignment"); 217 const int hdr_size_in_bytes = instanceOopDesc::header_size() * HeapWordSize; 218 219 initialize_header(obj, klass, noreg, t1, t2); 220 221 if (!(UseTLAB && ZeroTLAB && is_tlab_allocated)) { 222 // clear rest of allocated space 223 const Register t1_zero = t1; 224 const Register index = t2; 225 const int threshold = 6 * BytesPerWord; // approximate break even point for code size (see comments below) 226 if (var_size_in_bytes != noreg) { 227 mov(index, var_size_in_bytes); 228 initialize_body(obj, index, hdr_size_in_bytes, t1_zero); 229 } else if (con_size_in_bytes <= threshold) { 230 // use explicit null stores 231 // code size = 2 + 3*n bytes (n = number of fields to clear) 232 xorptr(t1_zero, t1_zero); // use t1_zero reg to clear memory (shorter code) 233 for (int i = hdr_size_in_bytes; i < con_size_in_bytes; i += BytesPerWord) 234 movptr(Address(obj, i), t1_zero); 235 } else if (con_size_in_bytes > hdr_size_in_bytes) { 236 // use loop to null out the fields 237 // code size = 16 bytes for even n (n = number of fields to clear) 238 // initialize last object field first if odd number of fields 239 xorptr(t1_zero, t1_zero); // use t1_zero reg to clear memory (shorter code) 240 movptr(index, (con_size_in_bytes - hdr_size_in_bytes) >> 3); 241 // initialize last object field if constant size is odd 242 if (((con_size_in_bytes - hdr_size_in_bytes) & 4) != 0) 243 movptr(Address(obj, con_size_in_bytes - (1*BytesPerWord)), t1_zero); 244 // initialize remaining object fields: rdx is a multiple of 2 245 { Label loop; 246 bind(loop); 247 movptr(Address(obj, index, Address::times_8, hdr_size_in_bytes - (1*BytesPerWord)), 248 t1_zero); 249 NOT_LP64(movptr(Address(obj, index, Address::times_8, hdr_size_in_bytes - (2*BytesPerWord)), 250 t1_zero);) 251 decrement(index); 252 jcc(Assembler::notZero, loop); 253 } 254 } 255 } 256 257 if (CURRENT_ENV->dtrace_alloc_probes()) { 258 assert(obj == rax, "must be"); 259 call(RuntimeAddress(Runtime1::entry_for(Runtime1::dtrace_object_alloc_id))); 260 } 261 262 verify_oop(obj); 263 } 264 265 void C1_MacroAssembler::allocate_array(Register obj, Register len, Register t1, Register t2, int header_size, Address::ScaleFactor f, Register klass, Label& slow_case) { 266 assert(obj == rax, "obj must be in rax, for cmpxchg"); 267 assert_different_registers(obj, len, t1, t2, klass); 268 269 // determine alignment mask 270 assert(!(BytesPerWord & 1), "must be a multiple of 2 for masking code to work"); 271 272 // check for negative or excessive length 273 cmpptr(len, checked_cast<int32_t>(max_array_allocation_length)); 274 jcc(Assembler::above, slow_case); 275 276 const Register arr_size = t2; // okay to be the same 277 // align object end 278 movptr(arr_size, header_size * BytesPerWord + MinObjAlignmentInBytesMask); 279 lea(arr_size, Address(arr_size, len, f)); 280 andptr(arr_size, ~MinObjAlignmentInBytesMask); 281 282 try_allocate(obj, arr_size, 0, t1, t2, slow_case); 283 284 initialize_header(obj, klass, len, t1, t2); 285 286 // clear rest of allocated space 287 const Register len_zero = len; 288 initialize_body(obj, arr_size, header_size * BytesPerWord, len_zero); 289 290 if (CURRENT_ENV->dtrace_alloc_probes()) { 291 assert(obj == rax, "must be"); 292 call(RuntimeAddress(Runtime1::entry_for(Runtime1::dtrace_object_alloc_id))); 293 } 294 295 verify_oop(obj); 296 } 297 298 299 300 void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache) { 301 verify_oop(receiver); 302 // explicit null check not needed since load from [klass_offset] causes a trap 303 // check against inline cache 304 assert(!MacroAssembler::needs_explicit_null_check(oopDesc::klass_offset_in_bytes()), "must add explicit null check"); 305 int start_offset = offset(); 306 307 if (UseCompressedClassPointers) { 308 load_klass(rscratch1, receiver, rscratch2); 309 cmpptr(rscratch1, iCache); 310 } else { 311 cmpptr(iCache, Address(receiver, oopDesc::klass_offset_in_bytes())); 312 } 313 // if icache check fails, then jump to runtime routine 314 // Note: RECEIVER must still contain the receiver! 315 jump_cc(Assembler::notEqual, 316 RuntimeAddress(SharedRuntime::get_ic_miss_stub())); 317 const int ic_cmp_size = LP64_ONLY(10) NOT_LP64(9); 318 assert(UseCompressedClassPointers || offset() - start_offset == ic_cmp_size, "check alignment in emit_method_entry"); 319 } 320 321 322 void C1_MacroAssembler::build_frame(int frame_size_in_bytes, int bang_size_in_bytes) { 323 assert(bang_size_in_bytes >= frame_size_in_bytes, "stack bang size incorrect"); 324 // Make sure there is enough stack space for this method's activation. 325 // Note that we do this before doing an enter(). This matches the 326 // ordering of C2's stack overflow check / rsp decrement and allows 327 // the SharedRuntime stack overflow handling to be consistent 328 // between the two compilers. 329 generate_stack_overflow_check(bang_size_in_bytes); 330 331 push(rbp); 332 if (PreserveFramePointer) { 333 mov(rbp, rsp); 334 } 335 #if !defined(_LP64) && defined(COMPILER2) 336 if (UseSSE < 2 && !CompilerConfig::is_c1_only_no_jvmci()) { 337 // c2 leaves fpu stack dirty. Clean it on entry 338 empty_FPU_stack(); 339 } 340 #endif // !_LP64 && COMPILER2 341 decrement(rsp, frame_size_in_bytes); // does not emit code for frame_size == 0 342 343 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); 344 // C1 code is not hot enough to micro optimize the nmethod entry barrier with an out-of-line stub 345 bs->nmethod_entry_barrier(this, nullptr /* slow_path */, nullptr /* continuation */); 346 } 347 348 349 void C1_MacroAssembler::remove_frame(int frame_size_in_bytes) { 350 increment(rsp, frame_size_in_bytes); // Does not emit code for frame_size == 0 351 pop(rbp); 352 } 353 354 355 void C1_MacroAssembler::verified_entry(bool breakAtEntry) { 356 if (breakAtEntry || VerifyFPU) { 357 // Verified Entry first instruction should be 5 bytes long for correct 358 // patching by patch_verified_entry(). 359 // 360 // Breakpoint and VerifyFPU have one byte first instruction. 361 // Also first instruction will be one byte "push(rbp)" if stack banging 362 // code is not generated (see build_frame() above). 363 // For all these cases generate long instruction first. 364 fat_nop(); 365 } 366 if (breakAtEntry) int3(); 367 // build frame 368 IA32_ONLY( verify_FPU(0, "method_entry"); ) 369 } 370 371 void C1_MacroAssembler::load_parameter(int offset_in_words, Register reg) { 372 // rbp, + 0: link 373 // + 1: return address 374 // + 2: argument with offset 0 375 // + 3: argument with offset 1 376 // + 4: ... 377 378 movptr(reg, Address(rbp, (offset_in_words + 2) * BytesPerWord)); 379 } 380 381 #ifndef PRODUCT 382 383 void C1_MacroAssembler::verify_stack_oop(int stack_offset) { 384 if (!VerifyOops) return; 385 verify_oop_addr(Address(rsp, stack_offset)); 386 } 387 388 void C1_MacroAssembler::verify_not_null_oop(Register r) { 389 if (!VerifyOops) return; 390 Label not_null; 391 testptr(r, r); 392 jcc(Assembler::notZero, not_null); 393 stop("non-null oop required"); 394 bind(not_null); 395 verify_oop(r); 396 } 397 398 void C1_MacroAssembler::invalidate_registers(bool inv_rax, bool inv_rbx, bool inv_rcx, bool inv_rdx, bool inv_rsi, bool inv_rdi) { 399 #ifdef ASSERT 400 if (inv_rax) movptr(rax, 0xDEAD); 401 if (inv_rbx) movptr(rbx, 0xDEAD); 402 if (inv_rcx) movptr(rcx, 0xDEAD); 403 if (inv_rdx) movptr(rdx, 0xDEAD); 404 if (inv_rsi) movptr(rsi, 0xDEAD); 405 if (inv_rdi) movptr(rdi, 0xDEAD); 406 #endif 407 } 408 409 #endif // ifndef PRODUCT