1 /* 2 * Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2016 SAP SE. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "c1/c1_MacroAssembler.hpp" 29 #include "c1/c1_Runtime1.hpp" 30 #include "gc/shared/barrierSet.hpp" 31 #include "gc/shared/barrierSetAssembler.hpp" 32 #include "gc/shared/collectedHeap.hpp" 33 #include "gc/shared/tlab_globals.hpp" 34 #include "interpreter/interpreter.hpp" 35 #include "oops/arrayOop.hpp" 36 #include "oops/markWord.hpp" 37 #include "runtime/basicLock.hpp" 38 #include "runtime/os.hpp" 39 #include "runtime/sharedRuntime.hpp" 40 #include "runtime/stubRoutines.hpp" 41 #include "utilities/macros.hpp" 42 43 void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache) { 44 Label ic_miss, ic_hit; 45 verify_oop(receiver, FILE_AND_LINE); 46 int klass_offset = oopDesc::klass_offset_in_bytes(); 47 48 if (!ImplicitNullChecks || MacroAssembler::needs_explicit_null_check(klass_offset)) { 49 if (VM_Version::has_CompareBranch()) { 50 z_cgij(receiver, 0, Assembler::bcondEqual, ic_miss); 51 } else { 52 z_ltgr(receiver, receiver); 53 z_bre(ic_miss); 54 } 55 } 56 57 compare_klass_ptr(iCache, klass_offset, receiver, false); 58 z_bre(ic_hit); 59 60 // If icache check fails, then jump to runtime routine. 61 // Note: RECEIVER must still contain the receiver! 62 load_const_optimized(Z_R1_scratch, AddressLiteral(SharedRuntime::get_ic_miss_stub())); 63 z_br(Z_R1_scratch); 64 align(CodeEntryAlignment); 65 bind(ic_hit); 66 } 67 68 void C1_MacroAssembler::explicit_null_check(Register base) { 69 ShouldNotCallThis(); // unused 70 } 71 72 void C1_MacroAssembler::build_frame(int frame_size_in_bytes, int bang_size_in_bytes, int max_monitors) { 73 assert(bang_size_in_bytes >= frame_size_in_bytes, "stack bang size incorrect"); 74 generate_stack_overflow_check(bang_size_in_bytes); 75 save_return_pc(); 76 push_frame(frame_size_in_bytes); 77 78 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); 79 bs->nmethod_entry_barrier(this); 80 } 81 82 void C1_MacroAssembler::verified_entry(bool breakAtEntry) { 83 if (breakAtEntry) z_illtrap(0xC1); 84 } 85 86 void C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr, Label& slow_case) { 87 const int hdr_offset = oopDesc::mark_offset_in_bytes(); 88 assert_different_registers(hdr, obj, disp_hdr); 89 NearLabel done; 90 91 verify_oop(obj, FILE_AND_LINE); 92 93 // Load object header. 94 z_lg(hdr, Address(obj, hdr_offset)); 95 96 // Save object being locked into the BasicObjectLock... 97 z_stg(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes())); 98 99 if (DiagnoseSyncOnValueBasedClasses != 0) { 100 load_klass(Z_R1_scratch, obj); 101 testbit(Address(Z_R1_scratch, Klass::access_flags_offset()), exact_log2(JVM_ACC_IS_VALUE_BASED_CLASS)); 102 z_btrue(slow_case); 103 } 104 105 // and mark it as unlocked. 106 z_oill(hdr, markWord::unlocked_value); 107 // Save unlocked object header into the displaced header location on the stack. 108 z_stg(hdr, Address(disp_hdr, (intptr_t)0)); 109 // Test if object header is still the same (i.e. unlocked), and if so, store the 110 // displaced header address in the object header. If it is not the same, get the 111 // object header instead. 112 z_csg(hdr, disp_hdr, hdr_offset, obj); 113 // If the object header was the same, we're done. 114 branch_optimized(Assembler::bcondEqual, done); 115 // If the object header was not the same, it is now in the hdr register. 116 // => Test if it is a stack pointer into the same stack (recursive locking), i.e.: 117 // 118 // 1) (hdr & markWord::lock_mask_in_place) == 0 119 // 2) rsp <= hdr 120 // 3) hdr <= rsp + page_size 121 // 122 // These 3 tests can be done by evaluating the following expression: 123 // 124 // (hdr - Z_SP) & (~(page_size-1) | markWord::lock_mask_in_place) 125 // 126 // assuming both the stack pointer and page_size have their least 127 // significant 2 bits cleared and page_size is a power of 2 128 z_sgr(hdr, Z_SP); 129 130 load_const_optimized(Z_R0_scratch, (~(os::vm_page_size()-1) | markWord::lock_mask_in_place)); 131 z_ngr(hdr, Z_R0_scratch); // AND sets CC (result eq/ne 0). 132 // For recursive locking, the result is zero. => Save it in the displaced header 133 // location (NULL in the displaced hdr location indicates recursive locking). 134 z_stg(hdr, Address(disp_hdr, (intptr_t)0)); 135 // Otherwise we don't care about the result and handle locking via runtime call. 136 branch_optimized(Assembler::bcondNotZero, slow_case); 137 // done 138 bind(done); 139 } 140 141 void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_hdr, Label& slow_case) { 142 const int aligned_mask = BytesPerWord -1; 143 const int hdr_offset = oopDesc::mark_offset_in_bytes(); 144 assert_different_registers(hdr, obj, disp_hdr); 145 NearLabel done; 146 147 // Load displaced header. 148 z_ltg(hdr, Address(disp_hdr, (intptr_t)0)); 149 // If the loaded hdr is NULL we had recursive locking, and we are done. 150 z_bre(done); 151 // Load object. 152 z_lg(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes())); 153 verify_oop(obj, FILE_AND_LINE); 154 // Test if object header is pointing to the displaced header, and if so, restore 155 // the displaced header in the object. If the object header is not pointing to 156 // the displaced header, get the object header instead. 157 z_csg(disp_hdr, hdr, hdr_offset, obj); 158 // If the object header was not pointing to the displaced header, 159 // we do unlocking via runtime call. 160 branch_optimized(Assembler::bcondNotEqual, slow_case); 161 // done 162 bind(done); 163 } 164 165 void C1_MacroAssembler::try_allocate( 166 Register obj, // result: Pointer to object after successful allocation. 167 Register var_size_in_bytes, // Object size in bytes if unknown at compile time; invalid otherwise. 168 int con_size_in_bytes, // Object size in bytes if known at compile time. 169 Register t1, // Temp register: Must be global register for incr_allocated_bytes. 170 Label& slow_case // Continuation point if fast allocation fails. 171 ) { 172 if (UseTLAB) { 173 tlab_allocate(obj, var_size_in_bytes, con_size_in_bytes, t1, slow_case); 174 } else { 175 // Allocation in shared Eden not implemented, because sapjvm allocation trace does not allow it. 176 z_brul(slow_case); 177 } 178 } 179 180 void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register len, Register Rzero, Register t1) { 181 assert_different_registers(obj, klass, len, t1, Rzero); 182 // This assumes that all prototype bits fit in an int32_t. 183 load_const_optimized(t1, (intx)markWord::prototype().value()); 184 z_stg(t1, Address(obj, oopDesc::mark_offset_in_bytes())); 185 186 if (len->is_valid()) { 187 // Length will be in the klass gap, if one exists. 188 z_st(len, Address(obj, arrayOopDesc::length_offset_in_bytes())); 189 } else if (UseCompressedClassPointers) { 190 store_klass_gap(Rzero, obj); // Zero klass gap for compressed oops. 191 } 192 store_klass(klass, obj, t1); 193 } 194 195 void C1_MacroAssembler::initialize_body(Register objectFields, Register len_in_bytes, Register Rzero) { 196 Label done; 197 assert_different_registers(objectFields, len_in_bytes, Rzero); 198 199 // Initialize object fields. 200 // See documentation for MVCLE instruction!!! 201 assert(objectFields->encoding()%2==0, "objectFields must be an even register"); 202 assert(len_in_bytes->encoding() == (objectFields->encoding()+1), "objectFields and len_in_bytes must be a register pair"); 203 assert(Rzero->encoding()%2==1, "Rzero must be an odd register"); 204 205 // Use Rzero as src length, then mvcle will copy nothing 206 // and fill the object with the padding value 0. 207 move_long_ext(objectFields, as_Register(Rzero->encoding()-1), 0); 208 bind(done); 209 } 210 211 void C1_MacroAssembler::allocate_object( 212 Register obj, // Result: pointer to object after successful allocation. 213 Register t1, // temp register 214 Register t2, // temp register: Must be a global register for try_allocate. 215 int hdr_size, // object header size in words 216 int obj_size, // object size in words 217 Register klass, // object klass 218 Label& slow_case // Continuation point if fast allocation fails. 219 ) { 220 assert_different_registers(obj, t1, t2, klass); 221 222 // Allocate space and initialize header. 223 try_allocate(obj, noreg, obj_size * wordSize, t1, slow_case); 224 225 initialize_object(obj, klass, noreg, obj_size * HeapWordSize, t1, t2); 226 } 227 228 void C1_MacroAssembler::initialize_object( 229 Register obj, // result: Pointer to object after successful allocation. 230 Register klass, // object klass 231 Register var_size_in_bytes, // Object size in bytes if unknown at compile time; invalid otherwise. 232 int con_size_in_bytes, // Object size in bytes if known at compile time. 233 Register t1, // temp register 234 Register t2 // temp register 235 ) { 236 assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0, 237 "con_size_in_bytes is not multiple of alignment"); 238 assert(var_size_in_bytes == noreg, "not implemented"); 239 const int hdr_size_in_bytes = instanceOopDesc::header_size() * HeapWordSize; 240 241 const Register Rzero = t2; 242 243 z_xgr(Rzero, Rzero); 244 initialize_header(obj, klass, noreg, Rzero, t1); 245 246 // Clear rest of allocated space. 247 const int threshold = 4 * BytesPerWord; 248 if (con_size_in_bytes <= threshold) { 249 // Use explicit null stores. 250 // code size = 6*n bytes (n = number of fields to clear) 251 for (int i = hdr_size_in_bytes; i < con_size_in_bytes; i += BytesPerWord) 252 z_stg(Rzero, Address(obj, i)); 253 } else { 254 // Code size generated by initialize_body() is 16. 255 Register object_fields = Z_R0_scratch; 256 Register len_in_bytes = Z_R1_scratch; 257 z_la(object_fields, hdr_size_in_bytes, obj); 258 load_const_optimized(len_in_bytes, con_size_in_bytes - hdr_size_in_bytes); 259 initialize_body(object_fields, len_in_bytes, Rzero); 260 } 261 262 // Dtrace support is unimplemented. 263 // if (CURRENT_ENV->dtrace_alloc_probes()) { 264 // assert(obj == rax, "must be"); 265 // call(RuntimeAddress(Runtime1::entry_for (Runtime1::dtrace_object_alloc_id))); 266 // } 267 268 verify_oop(obj, FILE_AND_LINE); 269 } 270 271 void C1_MacroAssembler::allocate_array( 272 Register obj, // result: Pointer to array after successful allocation. 273 Register len, // array length 274 Register t1, // temp register 275 Register t2, // temp register 276 int hdr_size, // object header size in words 277 int elt_size, // element size in bytes 278 Register klass, // object klass 279 Label& slow_case // Continuation point if fast allocation fails. 280 ) { 281 assert_different_registers(obj, len, t1, t2, klass); 282 283 // Determine alignment mask. 284 assert(!(BytesPerWord & 1), "must be a multiple of 2 for masking code to work"); 285 286 // Check for negative or excessive length. 287 compareU64_and_branch(len, (int32_t)max_array_allocation_length, bcondHigh, slow_case); 288 289 // Compute array size. 290 // Note: If 0 <= len <= max_length, len*elt_size + header + alignment is 291 // smaller or equal to the largest integer. Also, since top is always 292 // aligned, we can do the alignment here instead of at the end address 293 // computation. 294 const Register arr_size = t2; 295 switch (elt_size) { 296 case 1: lgr_if_needed(arr_size, len); break; 297 case 2: z_sllg(arr_size, len, 1); break; 298 case 4: z_sllg(arr_size, len, 2); break; 299 case 8: z_sllg(arr_size, len, 3); break; 300 default: ShouldNotReachHere(); 301 } 302 add2reg(arr_size, hdr_size * wordSize + MinObjAlignmentInBytesMask); // Add space for header & alignment. 303 z_nill(arr_size, (~MinObjAlignmentInBytesMask) & 0xffff); // Align array size. 304 305 try_allocate(obj, arr_size, 0, t1, slow_case); 306 307 initialize_header(obj, klass, len, noreg, t1); 308 309 // Clear rest of allocated space. 310 Label done; 311 Register object_fields = t1; 312 Register Rzero = Z_R1_scratch; 313 z_aghi(arr_size, -(hdr_size * BytesPerWord)); 314 z_bre(done); // Jump if size of fields is zero. 315 z_la(object_fields, hdr_size * BytesPerWord, obj); 316 z_xgr(Rzero, Rzero); 317 initialize_body(object_fields, arr_size, Rzero); 318 bind(done); 319 320 // Dtrace support is unimplemented. 321 // if (CURRENT_ENV->dtrace_alloc_probes()) { 322 // assert(obj == rax, "must be"); 323 // call(RuntimeAddress(Runtime1::entry_for (Runtime1::dtrace_object_alloc_id))); 324 // } 325 326 verify_oop(obj, FILE_AND_LINE); 327 } 328 329 330 #ifndef PRODUCT 331 332 void C1_MacroAssembler::verify_stack_oop(int stack_offset) { 333 if (!VerifyOops) return; 334 verify_oop_addr(Address(Z_SP, stack_offset), FILE_AND_LINE); 335 } 336 337 void C1_MacroAssembler::verify_not_null_oop(Register r) { 338 if (!VerifyOops) return; 339 NearLabel not_null; 340 compareU64_and_branch(r, (intptr_t)0, bcondNotEqual, not_null); 341 stop("non-null oop required"); 342 bind(not_null); 343 verify_oop(r, FILE_AND_LINE); 344 } 345 346 void C1_MacroAssembler::invalidate_registers(Register preserve1, 347 Register preserve2, 348 Register preserve3) { 349 Register dead_value = noreg; 350 for (int i = 0; i < FrameMap::nof_cpu_regs; i++) { 351 Register r = as_Register(i); 352 if (r != preserve1 && r != preserve2 && r != preserve3 && r != Z_SP && r != Z_thread) { 353 if (dead_value == noreg) { 354 load_const_optimized(r, 0xc1dead); 355 dead_value = r; 356 } else { 357 z_lgr(r, dead_value); 358 } 359 } 360 } 361 } 362 363 #endif // !PRODUCT