1 /* 2 * Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/classLoaderData.hpp" 27 #include "gc/shared/barrierSet.hpp" 28 #include "gc/shared/barrierSetAssembler.hpp" 29 #include "gc/shared/barrierSetNMethod.hpp" 30 #include "gc/shared/collectedHeap.hpp" 31 #include "interpreter/interp_masm.hpp" 32 #include "memory/universe.hpp" 33 #include "runtime/javaThread.hpp" 34 #include "runtime/jniHandles.hpp" 35 #include "runtime/sharedRuntime.hpp" 36 #include "runtime/stubRoutines.hpp" 37 38 #define __ masm-> 39 40 void BarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, 41 Register dst, Address src, Register tmp1, Register tmp_thread) { 42 bool in_heap = (decorators & IN_HEAP) != 0; 43 bool in_native = (decorators & IN_NATIVE) != 0; 44 bool is_not_null = (decorators & IS_NOT_NULL) != 0; 45 bool atomic = (decorators & MO_RELAXED) != 0; 46 47 switch (type) { 48 case T_OBJECT: 49 case T_ARRAY: { 50 if (in_heap) { 51 #ifdef _LP64 52 if (UseCompressedOops) { 53 __ movl(dst, src); 54 if (is_not_null) { 55 __ decode_heap_oop_not_null(dst); 56 } else { 57 __ decode_heap_oop(dst); 58 } 59 } else 60 #endif 61 { 62 __ movptr(dst, src); 63 } 64 } else { 65 assert(in_native, "why else?"); 66 __ movptr(dst, src); 67 } 68 break; 69 } 70 case T_BOOLEAN: __ load_unsigned_byte(dst, src); break; 71 case T_BYTE: __ load_signed_byte(dst, src); break; 72 case T_CHAR: __ load_unsigned_short(dst, src); break; 73 case T_SHORT: __ load_signed_short(dst, src); break; 74 case T_INT: __ movl (dst, src); break; 75 case T_ADDRESS: __ movptr(dst, src); break; 76 case T_FLOAT: 77 assert(dst == noreg, "only to ftos"); 78 __ load_float(src); 79 break; 80 case T_DOUBLE: 81 assert(dst == noreg, "only to dtos"); 82 __ load_double(src); 83 break; 84 case T_LONG: 85 assert(dst == noreg, "only to ltos"); 86 #ifdef _LP64 87 __ movq(rax, src); 88 #else 89 if (atomic) { 90 __ fild_d(src); // Must load atomically 91 __ subptr(rsp,2*wordSize); // Make space for store 92 __ fistp_d(Address(rsp,0)); 93 __ pop(rax); 94 __ pop(rdx); 95 } else { 96 __ movl(rax, src); 97 __ movl(rdx, src.plus_disp(wordSize)); 98 } 99 #endif 100 break; 101 default: Unimplemented(); 102 } 103 } 104 105 void BarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, 106 Address dst, Register val, Register tmp1, Register tmp2, Register tmp3) { 107 bool in_heap = (decorators & IN_HEAP) != 0; 108 bool in_native = (decorators & IN_NATIVE) != 0; 109 bool is_not_null = (decorators & IS_NOT_NULL) != 0; 110 bool atomic = (decorators & MO_RELAXED) != 0; 111 112 switch (type) { 113 case T_OBJECT: 114 case T_ARRAY: { 115 if (in_heap) { 116 if (val == noreg) { 117 assert(!is_not_null, "inconsistent access"); 118 #ifdef _LP64 119 if (UseCompressedOops) { 120 __ movl(dst, NULL_WORD); 121 } else { 122 __ movslq(dst, NULL_WORD); 123 } 124 #else 125 __ movl(dst, NULL_WORD); 126 #endif 127 } else { 128 #ifdef _LP64 129 if (UseCompressedOops) { 130 assert(!dst.uses(val), "not enough registers"); 131 if (is_not_null) { 132 __ encode_heap_oop_not_null(val); 133 } else { 134 __ encode_heap_oop(val); 135 } 136 __ movl(dst, val); 137 } else 138 #endif 139 { 140 __ movptr(dst, val); 141 } 142 } 143 } else { 144 assert(in_native, "why else?"); 145 assert(val != noreg, "not supported"); 146 __ movptr(dst, val); 147 } 148 break; 149 } 150 case T_BOOLEAN: 151 __ andl(val, 0x1); // boolean is true if LSB is 1 152 __ movb(dst, val); 153 break; 154 case T_BYTE: 155 __ movb(dst, val); 156 break; 157 case T_SHORT: 158 __ movw(dst, val); 159 break; 160 case T_CHAR: 161 __ movw(dst, val); 162 break; 163 case T_INT: 164 __ movl(dst, val); 165 break; 166 case T_LONG: 167 assert(val == noreg, "only tos"); 168 #ifdef _LP64 169 __ movq(dst, rax); 170 #else 171 if (atomic) { 172 __ push(rdx); 173 __ push(rax); // Must update atomically with FIST 174 __ fild_d(Address(rsp,0)); // So load into FPU register 175 __ fistp_d(dst); // and put into memory atomically 176 __ addptr(rsp, 2*wordSize); 177 } else { 178 __ movptr(dst, rax); 179 __ movptr(dst.plus_disp(wordSize), rdx); 180 } 181 #endif 182 break; 183 case T_FLOAT: 184 assert(val == noreg, "only tos"); 185 __ store_float(dst); 186 break; 187 case T_DOUBLE: 188 assert(val == noreg, "only tos"); 189 __ store_double(dst); 190 break; 191 case T_ADDRESS: 192 __ movptr(dst, val); 193 break; 194 default: Unimplemented(); 195 } 196 } 197 198 void BarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env, 199 Register obj, Register tmp, Label& slowpath) { 200 __ clear_jweak_tag(obj); 201 __ movptr(obj, Address(obj, 0)); 202 } 203 204 void BarrierSetAssembler::tlab_allocate(MacroAssembler* masm, 205 Register thread, Register obj, 206 Register var_size_in_bytes, 207 int con_size_in_bytes, 208 Register t1, 209 Register t2, 210 Label& slow_case) { 211 assert_different_registers(obj, t1, t2); 212 assert_different_registers(obj, var_size_in_bytes, t1); 213 Register end = t2; 214 if (!thread->is_valid()) { 215 #ifdef _LP64 216 thread = r15_thread; 217 #else 218 assert(t1->is_valid(), "need temp reg"); 219 thread = t1; 220 __ get_thread(thread); 221 #endif 222 } 223 224 __ verify_tlab(); 225 226 __ movptr(obj, Address(thread, JavaThread::tlab_top_offset())); 227 if (var_size_in_bytes == noreg) { 228 __ lea(end, Address(obj, con_size_in_bytes)); 229 } else { 230 __ lea(end, Address(obj, var_size_in_bytes, Address::times_1)); 231 } 232 __ cmpptr(end, Address(thread, JavaThread::tlab_end_offset())); 233 __ jcc(Assembler::above, slow_case); 234 235 // update the tlab top pointer 236 __ movptr(Address(thread, JavaThread::tlab_top_offset()), end); 237 238 // recover var_size_in_bytes if necessary 239 if (var_size_in_bytes == end) { 240 __ subptr(var_size_in_bytes, obj); 241 } 242 __ verify_tlab(); 243 } 244 245 void BarrierSetAssembler::incr_allocated_bytes(MacroAssembler* masm, Register thread, 246 Register var_size_in_bytes, 247 int con_size_in_bytes, 248 Register t1) { 249 if (!thread->is_valid()) { 250 #ifdef _LP64 251 thread = r15_thread; 252 #else 253 assert(t1->is_valid(), "need temp reg"); 254 thread = t1; 255 __ get_thread(thread); 256 #endif 257 } 258 259 #ifdef _LP64 260 if (var_size_in_bytes->is_valid()) { 261 __ addq(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())), var_size_in_bytes); 262 } else { 263 __ addq(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())), con_size_in_bytes); 264 } 265 #else 266 if (var_size_in_bytes->is_valid()) { 267 __ addl(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())), var_size_in_bytes); 268 } else { 269 __ addl(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())), con_size_in_bytes); 270 } 271 __ adcl(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())+4), 0); 272 #endif 273 } 274 275 #ifdef _LP64 276 void BarrierSetAssembler::nmethod_entry_barrier(MacroAssembler* masm, Label* slow_path, Label* continuation) { 277 BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod(); 278 if (bs_nm == NULL) { 279 return; 280 } 281 Register thread = r15_thread; 282 Address disarmed_addr(thread, in_bytes(bs_nm->thread_disarmed_guard_value_offset())); 283 // The immediate is the last 4 bytes, so if we align the start of the cmp 284 // instruction to 4 bytes, we know that the second half of it is also 4 285 // byte aligned, which means that the immediate will not cross a cache line 286 __ align(4); 287 uintptr_t before_cmp = (uintptr_t)__ pc(); 288 __ cmpl_imm32(disarmed_addr, 0); 289 uintptr_t after_cmp = (uintptr_t)__ pc(); 290 guarantee(after_cmp - before_cmp == 8, "Wrong assumed instruction length"); 291 292 if (slow_path != NULL) { 293 __ jcc(Assembler::notEqual, *slow_path); 294 __ bind(*continuation); 295 } else { 296 Label done; 297 __ jccb(Assembler::equal, done); 298 __ call(RuntimeAddress(StubRoutines::x86::method_entry_barrier())); 299 __ bind(done); 300 } 301 } 302 #else 303 void BarrierSetAssembler::nmethod_entry_barrier(MacroAssembler* masm, Label*, Label*) { 304 BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod(); 305 if (bs_nm == NULL) { 306 return; 307 } 308 309 Label continuation; 310 311 Register tmp = rdi; 312 __ push(tmp); 313 __ movptr(tmp, (intptr_t)bs_nm->disarmed_guard_value_address()); 314 Address disarmed_addr(tmp, 0); 315 __ align(4); 316 __ cmpl_imm32(disarmed_addr, 0); 317 __ pop(tmp); 318 __ jcc(Assembler::equal, continuation); 319 __ call(RuntimeAddress(StubRoutines::x86::method_entry_barrier())); 320 __ bind(continuation); 321 } 322 #endif 323 324 void BarrierSetAssembler::c2i_entry_barrier(MacroAssembler* masm) { 325 BarrierSetNMethod* bs = BarrierSet::barrier_set()->barrier_set_nmethod(); 326 if (bs == NULL) { 327 return; 328 } 329 330 Label bad_call; 331 __ cmpptr(rbx, 0); // rbx contains the incoming method for c2i adapters. 332 __ jcc(Assembler::equal, bad_call); 333 334 Register tmp1 = LP64_ONLY( rscratch1 ) NOT_LP64( rax ); 335 Register tmp2 = LP64_ONLY( rscratch2 ) NOT_LP64( rcx ); 336 #ifndef _LP64 337 __ push(tmp1); 338 __ push(tmp2); 339 #endif // !_LP64 340 341 // Pointer chase to the method holder to find out if the method is concurrently unloading. 342 Label method_live; 343 __ load_method_holder_cld(tmp1, rbx); 344 345 // Is it a strong CLD? 346 __ cmpl(Address(tmp1, ClassLoaderData::keep_alive_offset()), 0); 347 __ jcc(Assembler::greater, method_live); 348 349 // Is it a weak but alive CLD? 350 __ movptr(tmp1, Address(tmp1, ClassLoaderData::holder_offset())); 351 __ resolve_weak_handle(tmp1, tmp2); 352 __ cmpptr(tmp1, 0); 353 __ jcc(Assembler::notEqual, method_live); 354 355 #ifndef _LP64 356 __ pop(tmp2); 357 __ pop(tmp1); 358 #endif 359 360 __ bind(bad_call); 361 __ jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub())); 362 __ bind(method_live); 363 364 #ifndef _LP64 365 __ pop(tmp2); 366 __ pop(tmp1); 367 #endif 368 }