1 /* 2 * Copyright (c) 2018, 2021, Red Hat, Inc. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/shenandoah/shenandoahBarrierSet.hpp" 27 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp" 28 #include "gc/shenandoah/shenandoahForwarding.hpp" 29 #include "gc/shenandoah/shenandoahHeap.inline.hpp" 30 #include "gc/shenandoah/shenandoahHeapRegion.hpp" 31 #include "gc/shenandoah/shenandoahRuntime.hpp" 32 #include "gc/shenandoah/shenandoahThreadLocalData.hpp" 33 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp" 34 #include "interpreter/interpreter.hpp" 35 #include "runtime/javaThread.hpp" 36 #include "runtime/sharedRuntime.hpp" 37 #include "utilities/macros.hpp" 38 #ifdef COMPILER1 39 #include "c1/c1_LIRAssembler.hpp" 40 #include "c1/c1_MacroAssembler.hpp" 41 #include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp" 42 #endif 43 44 #define __ masm-> 45 46 static void save_machine_state(MacroAssembler* masm, bool handle_gpr, bool handle_fp) { 47 if (handle_gpr) { 48 __ push_IU_state(); 49 } 50 51 if (handle_fp) { 52 // Some paths can be reached from the c2i adapter with live fp arguments in registers. 53 LP64_ONLY(assert(Argument::n_float_register_parameters_j == 8, "8 fp registers to save at java call")); 54 55 if (UseSSE >= 2) { 56 const int xmm_size = wordSize * LP64_ONLY(2) NOT_LP64(4); 57 __ subptr(rsp, xmm_size * 8); 58 __ movdbl(Address(rsp, xmm_size * 0), xmm0); 59 __ movdbl(Address(rsp, xmm_size * 1), xmm1); 60 __ movdbl(Address(rsp, xmm_size * 2), xmm2); 61 __ movdbl(Address(rsp, xmm_size * 3), xmm3); 62 __ movdbl(Address(rsp, xmm_size * 4), xmm4); 63 __ movdbl(Address(rsp, xmm_size * 5), xmm5); 64 __ movdbl(Address(rsp, xmm_size * 6), xmm6); 65 __ movdbl(Address(rsp, xmm_size * 7), xmm7); 66 } else if (UseSSE >= 1) { 67 const int xmm_size = wordSize * LP64_ONLY(1) NOT_LP64(2); 68 __ subptr(rsp, xmm_size * 8); 69 __ movflt(Address(rsp, xmm_size * 0), xmm0); 70 __ movflt(Address(rsp, xmm_size * 1), xmm1); 71 __ movflt(Address(rsp, xmm_size * 2), xmm2); 72 __ movflt(Address(rsp, xmm_size * 3), xmm3); 73 __ movflt(Address(rsp, xmm_size * 4), xmm4); 74 __ movflt(Address(rsp, xmm_size * 5), xmm5); 75 __ movflt(Address(rsp, xmm_size * 6), xmm6); 76 __ movflt(Address(rsp, xmm_size * 7), xmm7); 77 } else { 78 __ push_FPU_state(); 79 } 80 } 81 } 82 83 static void restore_machine_state(MacroAssembler* masm, bool handle_gpr, bool handle_fp) { 84 if (handle_fp) { 85 if (UseSSE >= 2) { 86 const int xmm_size = wordSize * LP64_ONLY(2) NOT_LP64(4); 87 __ movdbl(xmm0, Address(rsp, xmm_size * 0)); 88 __ movdbl(xmm1, Address(rsp, xmm_size * 1)); 89 __ movdbl(xmm2, Address(rsp, xmm_size * 2)); 90 __ movdbl(xmm3, Address(rsp, xmm_size * 3)); 91 __ movdbl(xmm4, Address(rsp, xmm_size * 4)); 92 __ movdbl(xmm5, Address(rsp, xmm_size * 5)); 93 __ movdbl(xmm6, Address(rsp, xmm_size * 6)); 94 __ movdbl(xmm7, Address(rsp, xmm_size * 7)); 95 __ addptr(rsp, xmm_size * 8); 96 } else if (UseSSE >= 1) { 97 const int xmm_size = wordSize * LP64_ONLY(1) NOT_LP64(2); 98 __ movflt(xmm0, Address(rsp, xmm_size * 0)); 99 __ movflt(xmm1, Address(rsp, xmm_size * 1)); 100 __ movflt(xmm2, Address(rsp, xmm_size * 2)); 101 __ movflt(xmm3, Address(rsp, xmm_size * 3)); 102 __ movflt(xmm4, Address(rsp, xmm_size * 4)); 103 __ movflt(xmm5, Address(rsp, xmm_size * 5)); 104 __ movflt(xmm6, Address(rsp, xmm_size * 6)); 105 __ movflt(xmm7, Address(rsp, xmm_size * 7)); 106 __ addptr(rsp, xmm_size * 8); 107 } else { 108 __ pop_FPU_state(); 109 } 110 } 111 112 if (handle_gpr) { 113 __ pop_IU_state(); 114 } 115 } 116 117 void ShenandoahBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, BasicType type, 118 Register src, Register dst, Register count) { 119 120 bool dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0; 121 122 if (is_reference_type(type)) { 123 124 if ((ShenandoahSATBBarrier && !dest_uninitialized) || ShenandoahLoadRefBarrier) { 125 #ifdef _LP64 126 Register thread = r15_thread; 127 #else 128 Register thread = rax; 129 if (thread == src || thread == dst || thread == count) { 130 thread = rbx; 131 } 132 if (thread == src || thread == dst || thread == count) { 133 thread = rcx; 134 } 135 if (thread == src || thread == dst || thread == count) { 136 thread = rdx; 137 } 138 __ push(thread); 139 __ get_thread(thread); 140 #endif 141 assert_different_registers(src, dst, count, thread); 142 143 Label done; 144 // Short-circuit if count == 0. 145 __ testptr(count, count); 146 __ jcc(Assembler::zero, done); 147 148 // Avoid runtime call when not active. 149 Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset())); 150 int flags; 151 if (ShenandoahSATBBarrier && dest_uninitialized) { 152 flags = ShenandoahHeap::HAS_FORWARDED; 153 } else { 154 flags = ShenandoahHeap::HAS_FORWARDED | ShenandoahHeap::MARKING; 155 } 156 __ testb(gc_state, flags); 157 __ jcc(Assembler::zero, done); 158 159 save_machine_state(masm, /* handle_gpr = */ true, /* handle_fp = */ false); 160 161 #ifdef _LP64 162 assert(src == rdi, "expected"); 163 assert(dst == rsi, "expected"); 164 assert(count == rdx, "expected"); 165 if (UseCompressedOops) { 166 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::arraycopy_barrier_narrow_oop), 167 src, dst, count); 168 } else 169 #endif 170 { 171 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::arraycopy_barrier_oop), 172 src, dst, count); 173 } 174 175 restore_machine_state(masm, /* handle_gpr = */ true, /* handle_fp = */ false); 176 177 __ bind(done); 178 NOT_LP64(__ pop(thread);) 179 } 180 } 181 182 } 183 184 void ShenandoahBarrierSetAssembler::shenandoah_write_barrier_pre(MacroAssembler* masm, 185 Register obj, 186 Register pre_val, 187 Register thread, 188 Register tmp, 189 bool tosca_live, 190 bool expand_call) { 191 192 if (ShenandoahSATBBarrier) { 193 satb_write_barrier_pre(masm, obj, pre_val, thread, tmp, tosca_live, expand_call); 194 } 195 } 196 197 void ShenandoahBarrierSetAssembler::satb_write_barrier_pre(MacroAssembler* masm, 198 Register obj, 199 Register pre_val, 200 Register thread, 201 Register tmp, 202 bool tosca_live, 203 bool expand_call) { 204 // If expand_call is true then we expand the call_VM_leaf macro 205 // directly to skip generating the check by 206 // InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp. 207 208 #ifdef _LP64 209 assert(thread == r15_thread, "must be"); 210 #endif // _LP64 211 212 Label done; 213 Label runtime; 214 215 assert(pre_val != noreg, "check this code"); 216 217 if (obj != noreg) { 218 assert_different_registers(obj, pre_val, tmp); 219 assert(pre_val != rax, "check this code"); 220 } 221 222 Address index(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset())); 223 Address buffer(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset())); 224 225 Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset())); 226 __ testb(gc_state, ShenandoahHeap::MARKING); 227 __ jcc(Assembler::zero, done); 228 229 // Do we need to load the previous value? 230 if (obj != noreg) { 231 __ load_heap_oop(pre_val, Address(obj, 0), noreg, noreg, AS_RAW); 232 } 233 234 // Is the previous value null? 235 __ cmpptr(pre_val, NULL_WORD); 236 __ jcc(Assembler::equal, done); 237 238 // Can we store original value in the thread's buffer? 239 // Is index == 0? 240 // (The index field is typed as size_t.) 241 242 __ movptr(tmp, index); // tmp := *index_adr 243 __ cmpptr(tmp, 0); // tmp == 0? 244 __ jcc(Assembler::equal, runtime); // If yes, goto runtime 245 246 __ subptr(tmp, wordSize); // tmp := tmp - wordSize 247 __ movptr(index, tmp); // *index_adr := tmp 248 __ addptr(tmp, buffer); // tmp := tmp + *buffer_adr 249 250 // Record the previous value 251 __ movptr(Address(tmp, 0), pre_val); 252 __ jmp(done); 253 254 __ bind(runtime); 255 // save the live input values 256 if(tosca_live) __ push(rax); 257 258 if (obj != noreg && obj != rax) 259 __ push(obj); 260 261 if (pre_val != rax) 262 __ push(pre_val); 263 264 // Calling the runtime using the regular call_VM_leaf mechanism generates 265 // code (generated by InterpreterMacroAssember::call_VM_leaf_base) 266 // that checks that the *(ebp+frame::interpreter_frame_last_sp) == nullptr. 267 // 268 // If we care generating the pre-barrier without a frame (e.g. in the 269 // intrinsified Reference.get() routine) then ebp might be pointing to 270 // the caller frame and so this check will most likely fail at runtime. 271 // 272 // Expanding the call directly bypasses the generation of the check. 273 // So when we do not have have a full interpreter frame on the stack 274 // expand_call should be passed true. 275 276 NOT_LP64( __ push(thread); ) 277 278 #ifdef _LP64 279 // We move pre_val into c_rarg0 early, in order to avoid smashing it, should 280 // pre_val be c_rarg1 (where the call prologue would copy thread argument). 281 // Note: this should not accidentally smash thread, because thread is always r15. 282 assert(thread != c_rarg0, "smashed arg"); 283 if (c_rarg0 != pre_val) { 284 __ mov(c_rarg0, pre_val); 285 } 286 #endif 287 288 if (expand_call) { 289 LP64_ONLY( assert(pre_val != c_rarg1, "smashed arg"); ) 290 #ifdef _LP64 291 if (c_rarg1 != thread) { 292 __ mov(c_rarg1, thread); 293 } 294 // Already moved pre_val into c_rarg0 above 295 #else 296 __ push(thread); 297 __ push(pre_val); 298 #endif 299 __ MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre), 2); 300 } else { 301 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre), LP64_ONLY(c_rarg0) NOT_LP64(pre_val), thread); 302 } 303 304 NOT_LP64( __ pop(thread); ) 305 306 // save the live input values 307 if (pre_val != rax) 308 __ pop(pre_val); 309 310 if (obj != noreg && obj != rax) 311 __ pop(obj); 312 313 if(tosca_live) __ pop(rax); 314 315 __ bind(done); 316 } 317 318 void ShenandoahBarrierSetAssembler::load_reference_barrier(MacroAssembler* masm, Register dst, Address src, DecoratorSet decorators) { 319 assert(ShenandoahLoadRefBarrier, "Should be enabled"); 320 321 bool is_strong = ShenandoahBarrierSet::is_strong_access(decorators); 322 bool is_weak = ShenandoahBarrierSet::is_weak_access(decorators); 323 bool is_phantom = ShenandoahBarrierSet::is_phantom_access(decorators); 324 bool is_native = ShenandoahBarrierSet::is_native_access(decorators); 325 bool is_narrow = UseCompressedOops && !is_native; 326 327 Label heap_stable, not_cset; 328 329 __ block_comment("load_reference_barrier { "); 330 331 // Check if GC is active 332 #ifdef _LP64 333 Register thread = r15_thread; 334 #else 335 Register thread = rcx; 336 if (thread == dst) { 337 thread = rbx; 338 } 339 __ push(thread); 340 __ get_thread(thread); 341 #endif 342 343 Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset())); 344 int flags = ShenandoahHeap::HAS_FORWARDED; 345 if (!is_strong) { 346 flags |= ShenandoahHeap::WEAK_ROOTS; 347 } 348 __ testb(gc_state, flags); 349 __ jcc(Assembler::zero, heap_stable); 350 351 Register tmp1 = noreg, tmp2 = noreg; 352 if (is_strong) { 353 // Test for object in cset 354 // Allocate temporary registers 355 for (int i = 0; i < 8; i++) { 356 Register r = as_Register(i); 357 if (r != rsp && r != rbp && r != dst && r != src.base() && r != src.index()) { 358 if (tmp1 == noreg) { 359 tmp1 = r; 360 } else { 361 tmp2 = r; 362 break; 363 } 364 } 365 } 366 assert(tmp1 != noreg, "tmp1 allocated"); 367 assert(tmp2 != noreg, "tmp2 allocated"); 368 assert_different_registers(tmp1, tmp2, src.base(), src.index()); 369 assert_different_registers(tmp1, tmp2, dst); 370 371 __ push(tmp1); 372 __ push(tmp2); 373 374 // Optimized cset-test 375 __ movptr(tmp1, dst); 376 __ shrptr(tmp1, ShenandoahHeapRegion::region_size_bytes_shift_jint()); 377 __ movptr(tmp2, (intptr_t) ShenandoahHeap::in_cset_fast_test_addr()); 378 __ movbool(tmp1, Address(tmp1, tmp2, Address::times_1)); 379 __ testbool(tmp1); 380 __ jcc(Assembler::zero, not_cset); 381 } 382 383 save_machine_state(masm, /* handle_gpr = */ false, /* handle_fp = */ true); 384 385 // The rest is saved with the optimized path 386 387 uint num_saved_regs = 4 + (dst != rax ? 1 : 0) LP64_ONLY(+4); 388 __ subptr(rsp, num_saved_regs * wordSize); 389 uint slot = num_saved_regs; 390 if (dst != rax) { 391 __ movptr(Address(rsp, (--slot) * wordSize), rax); 392 } 393 __ movptr(Address(rsp, (--slot) * wordSize), rcx); 394 __ movptr(Address(rsp, (--slot) * wordSize), rdx); 395 __ movptr(Address(rsp, (--slot) * wordSize), rdi); 396 __ movptr(Address(rsp, (--slot) * wordSize), rsi); 397 #ifdef _LP64 398 __ movptr(Address(rsp, (--slot) * wordSize), r8); 399 __ movptr(Address(rsp, (--slot) * wordSize), r9); 400 __ movptr(Address(rsp, (--slot) * wordSize), r10); 401 __ movptr(Address(rsp, (--slot) * wordSize), r11); 402 // r12-r15 are callee saved in all calling conventions 403 #endif 404 assert(slot == 0, "must use all slots"); 405 406 // Shuffle registers such that dst is in c_rarg0 and addr in c_rarg1. 407 #ifdef _LP64 408 Register arg0 = c_rarg0, arg1 = c_rarg1; 409 #else 410 Register arg0 = rdi, arg1 = rsi; 411 #endif 412 if (dst == arg1) { 413 __ lea(arg0, src); 414 __ xchgptr(arg1, arg0); 415 } else { 416 __ lea(arg1, src); 417 __ movptr(arg0, dst); 418 } 419 420 if (is_strong) { 421 if (is_narrow) { 422 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong_narrow), arg0, arg1); 423 } else { 424 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong), arg0, arg1); 425 } 426 } else if (is_weak) { 427 if (is_narrow) { 428 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak_narrow), arg0, arg1); 429 } else { 430 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak), arg0, arg1); 431 } 432 } else { 433 assert(is_phantom, "only remaining strength"); 434 assert(!is_narrow, "phantom access cannot be narrow"); 435 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom), arg0, arg1); 436 } 437 438 #ifdef _LP64 439 __ movptr(r11, Address(rsp, (slot++) * wordSize)); 440 __ movptr(r10, Address(rsp, (slot++) * wordSize)); 441 __ movptr(r9, Address(rsp, (slot++) * wordSize)); 442 __ movptr(r8, Address(rsp, (slot++) * wordSize)); 443 #endif 444 __ movptr(rsi, Address(rsp, (slot++) * wordSize)); 445 __ movptr(rdi, Address(rsp, (slot++) * wordSize)); 446 __ movptr(rdx, Address(rsp, (slot++) * wordSize)); 447 __ movptr(rcx, Address(rsp, (slot++) * wordSize)); 448 449 if (dst != rax) { 450 __ movptr(dst, rax); 451 __ movptr(rax, Address(rsp, (slot++) * wordSize)); 452 } 453 454 assert(slot == num_saved_regs, "must use all slots"); 455 __ addptr(rsp, num_saved_regs * wordSize); 456 457 restore_machine_state(masm, /* handle_gpr = */ false, /* handle_fp = */ true); 458 459 __ bind(not_cset); 460 461 if (is_strong) { 462 __ pop(tmp2); 463 __ pop(tmp1); 464 } 465 466 __ bind(heap_stable); 467 468 __ block_comment("} load_reference_barrier"); 469 470 #ifndef _LP64 471 __ pop(thread); 472 #endif 473 } 474 475 // 476 // Arguments: 477 // 478 // Inputs: 479 // src: oop location, might be clobbered 480 // tmp1: scratch register, might not be valid. 481 // 482 // Output: 483 // dst: oop loaded from src location 484 // 485 // Kill: 486 // tmp1 (if it is valid) 487 // 488 void ShenandoahBarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, 489 Register dst, Address src, Register tmp1, Register tmp_thread) { 490 // 1: non-reference load, no additional barrier is needed 491 if (!is_reference_type(type)) { 492 BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread); 493 return; 494 } 495 496 assert((decorators & ON_UNKNOWN_OOP_REF) == 0, "Not expected"); 497 498 // 2: load a reference from src location and apply LRB if needed 499 if (ShenandoahBarrierSet::need_load_reference_barrier(decorators, type)) { 500 Register result_dst = dst; 501 bool use_tmp1_for_dst = false; 502 503 // Preserve src location for LRB 504 if (dst == src.base() || dst == src.index()) { 505 // Use tmp1 for dst if possible, as it is not used in BarrierAssembler::load_at() 506 if (tmp1->is_valid() && tmp1 != src.base() && tmp1 != src.index()) { 507 dst = tmp1; 508 use_tmp1_for_dst = true; 509 } else { 510 dst = rdi; 511 __ push(dst); 512 } 513 assert_different_registers(dst, src.base(), src.index()); 514 } 515 516 BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread); 517 518 load_reference_barrier(masm, dst, src, decorators); 519 520 // Move loaded oop to final destination 521 if (dst != result_dst) { 522 __ movptr(result_dst, dst); 523 524 if (!use_tmp1_for_dst) { 525 __ pop(dst); 526 } 527 528 dst = result_dst; 529 } 530 } else { 531 BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread); 532 } 533 534 // 3: apply keep-alive barrier if needed 535 if (ShenandoahBarrierSet::need_keep_alive_barrier(decorators, type)) { 536 save_machine_state(masm, /* handle_gpr = */ true, /* handle_fp = */ true); 537 538 Register thread = NOT_LP64(tmp_thread) LP64_ONLY(r15_thread); 539 assert_different_registers(dst, tmp1, tmp_thread); 540 if (!thread->is_valid()) { 541 thread = rdx; 542 } 543 NOT_LP64(__ get_thread(thread)); 544 // Generate the SATB pre-barrier code to log the value of 545 // the referent field in an SATB buffer. 546 shenandoah_write_barrier_pre(masm /* masm */, 547 noreg /* obj */, 548 dst /* pre_val */, 549 thread /* thread */, 550 tmp1 /* tmp */, 551 true /* tosca_live */, 552 true /* expand_call */); 553 554 restore_machine_state(masm, /* handle_gpr = */ true, /* handle_fp = */ true); 555 } 556 } 557 558 void ShenandoahBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, 559 Address dst, Register val, Register tmp1, Register tmp2, Register tmp3) { 560 561 bool on_oop = is_reference_type(type); 562 bool in_heap = (decorators & IN_HEAP) != 0; 563 bool as_normal = (decorators & AS_NORMAL) != 0; 564 if (on_oop && in_heap) { 565 bool needs_pre_barrier = as_normal; 566 567 Register rthread = LP64_ONLY(r15_thread) NOT_LP64(rcx); 568 // flatten object address if needed 569 // We do it regardless of precise because we need the registers 570 if (dst.index() == noreg && dst.disp() == 0) { 571 if (dst.base() != tmp1) { 572 __ movptr(tmp1, dst.base()); 573 } 574 } else { 575 __ lea(tmp1, dst); 576 } 577 578 assert_different_registers(val, tmp1, tmp2, tmp3, rthread); 579 580 #ifndef _LP64 581 __ get_thread(rthread); 582 InterpreterMacroAssembler *imasm = static_cast<InterpreterMacroAssembler*>(masm); 583 imasm->save_bcp(); 584 #endif 585 586 if (needs_pre_barrier) { 587 shenandoah_write_barrier_pre(masm /*masm*/, 588 tmp1 /* obj */, 589 tmp2 /* pre_val */, 590 rthread /* thread */, 591 tmp3 /* tmp */, 592 val != noreg /* tosca_live */, 593 false /* expand_call */); 594 } 595 BarrierSetAssembler::store_at(masm, decorators, type, Address(tmp1, 0), val, noreg, noreg, noreg); 596 NOT_LP64(imasm->restore_bcp()); 597 } else { 598 BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2, tmp3); 599 } 600 } 601 602 void ShenandoahBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env, 603 Register obj, Register tmp, Label& slowpath) { 604 Label done; 605 // Resolve jobject 606 BarrierSetAssembler::try_resolve_jobject_in_native(masm, jni_env, obj, tmp, slowpath); 607 608 // Check for null. 609 __ testptr(obj, obj); 610 __ jcc(Assembler::zero, done); 611 612 Address gc_state(jni_env, ShenandoahThreadLocalData::gc_state_offset() - JavaThread::jni_environment_offset()); 613 __ testb(gc_state, ShenandoahHeap::EVACUATION); 614 __ jccb(Assembler::notZero, slowpath); 615 __ bind(done); 616 } 617 618 // Special Shenandoah CAS implementation that handles false negatives 619 // due to concurrent evacuation. 620 void ShenandoahBarrierSetAssembler::cmpxchg_oop(MacroAssembler* masm, 621 Register res, Address addr, Register oldval, Register newval, 622 bool exchange, Register tmp1, Register tmp2) { 623 assert(ShenandoahCASBarrier, "Should only be used when CAS barrier is enabled"); 624 assert(oldval == rax, "must be in rax for implicit use in cmpxchg"); 625 assert_different_registers(oldval, tmp1, tmp2); 626 assert_different_registers(newval, tmp1, tmp2); 627 628 Label L_success, L_failure; 629 630 // Remember oldval for retry logic below 631 #ifdef _LP64 632 if (UseCompressedOops) { 633 __ movl(tmp1, oldval); 634 } else 635 #endif 636 { 637 __ movptr(tmp1, oldval); 638 } 639 640 // Step 1. Fast-path. 641 // 642 // Try to CAS with given arguments. If successful, then we are done. 643 644 #ifdef _LP64 645 if (UseCompressedOops) { 646 __ lock(); 647 __ cmpxchgl(newval, addr); 648 } else 649 #endif 650 { 651 __ lock(); 652 __ cmpxchgptr(newval, addr); 653 } 654 __ jcc(Assembler::equal, L_success); 655 656 // Step 2. CAS had failed. This may be a false negative. 657 // 658 // The trouble comes when we compare the to-space pointer with the from-space 659 // pointer to the same object. To resolve this, it will suffice to resolve 660 // the value from memory -- this will give both to-space pointers. 661 // If they mismatch, then it was a legitimate failure. 662 // 663 // Before reaching to resolve sequence, see if we can avoid the whole shebang 664 // with filters. 665 666 // Filter: when offending in-memory value is null, the failure is definitely legitimate 667 __ testptr(oldval, oldval); 668 __ jcc(Assembler::zero, L_failure); 669 670 // Filter: when heap is stable, the failure is definitely legitimate 671 #ifdef _LP64 672 const Register thread = r15_thread; 673 #else 674 const Register thread = tmp2; 675 __ get_thread(thread); 676 #endif 677 Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset())); 678 __ testb(gc_state, ShenandoahHeap::HAS_FORWARDED); 679 __ jcc(Assembler::zero, L_failure); 680 681 #ifdef _LP64 682 if (UseCompressedOops) { 683 __ movl(tmp2, oldval); 684 __ decode_heap_oop(tmp2); 685 } else 686 #endif 687 { 688 __ movptr(tmp2, oldval); 689 } 690 691 // Decode offending in-memory value. 692 // Test if-forwarded 693 __ testb(Address(tmp2, oopDesc::mark_offset_in_bytes()), markWord::marked_value); 694 __ jcc(Assembler::noParity, L_failure); // When odd number of bits, then not forwarded 695 __ jcc(Assembler::zero, L_failure); // When it is 00, then also not forwarded 696 697 // Load and mask forwarding pointer 698 __ movptr(tmp2, Address(tmp2, oopDesc::mark_offset_in_bytes())); 699 __ shrptr(tmp2, 2); 700 __ shlptr(tmp2, 2); 701 702 #ifdef _LP64 703 if (UseCompressedOops) { 704 __ decode_heap_oop(tmp1); // decode for comparison 705 } 706 #endif 707 708 // Now we have the forwarded offender in tmp2. 709 // Compare and if they don't match, we have legitimate failure 710 __ cmpptr(tmp1, tmp2); 711 __ jcc(Assembler::notEqual, L_failure); 712 713 // Step 3. Need to fix the memory ptr before continuing. 714 // 715 // At this point, we have from-space oldval in the register, and its to-space 716 // address is in tmp2. Let's try to update it into memory. We don't care if it 717 // succeeds or not. If it does, then the retrying CAS would see it and succeed. 718 // If this fixup fails, this means somebody else beat us to it, and necessarily 719 // with to-space ptr store. We still have to do the retry, because the GC might 720 // have updated the reference for us. 721 722 #ifdef _LP64 723 if (UseCompressedOops) { 724 __ encode_heap_oop(tmp2); // previously decoded at step 2. 725 } 726 #endif 727 728 #ifdef _LP64 729 if (UseCompressedOops) { 730 __ lock(); 731 __ cmpxchgl(tmp2, addr); 732 } else 733 #endif 734 { 735 __ lock(); 736 __ cmpxchgptr(tmp2, addr); 737 } 738 739 // Step 4. Try to CAS again. 740 // 741 // This is guaranteed not to have false negatives, because oldval is definitely 742 // to-space, and memory pointer is to-space as well. Nothing is able to store 743 // from-space ptr into memory anymore. Make sure oldval is restored, after being 744 // garbled during retries. 745 // 746 #ifdef _LP64 747 if (UseCompressedOops) { 748 __ movl(oldval, tmp2); 749 } else 750 #endif 751 { 752 __ movptr(oldval, tmp2); 753 } 754 755 #ifdef _LP64 756 if (UseCompressedOops) { 757 __ lock(); 758 __ cmpxchgl(newval, addr); 759 } else 760 #endif 761 { 762 __ lock(); 763 __ cmpxchgptr(newval, addr); 764 } 765 if (!exchange) { 766 __ jccb(Assembler::equal, L_success); // fastpath, peeking into Step 5, no need to jump 767 } 768 769 // Step 5. If we need a boolean result out of CAS, set the flag appropriately. 770 // and promote the result. Note that we handle the flag from both the 1st and 2nd CAS. 771 // Otherwise, failure witness for CAE is in oldval on all paths, and we can return. 772 773 if (exchange) { 774 __ bind(L_failure); 775 __ bind(L_success); 776 } else { 777 assert(res != noreg, "need result register"); 778 779 Label exit; 780 __ bind(L_failure); 781 __ xorptr(res, res); 782 __ jmpb(exit); 783 784 __ bind(L_success); 785 __ movptr(res, 1); 786 __ bind(exit); 787 } 788 } 789 790 #undef __ 791 792 #ifdef COMPILER1 793 794 #define __ ce->masm()-> 795 796 void ShenandoahBarrierSetAssembler::gen_pre_barrier_stub(LIR_Assembler* ce, ShenandoahPreBarrierStub* stub) { 797 ShenandoahBarrierSetC1* bs = (ShenandoahBarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1(); 798 // At this point we know that marking is in progress. 799 // If do_load() is true then we have to emit the 800 // load of the previous value; otherwise it has already 801 // been loaded into _pre_val. 802 803 __ bind(*stub->entry()); 804 assert(stub->pre_val()->is_register(), "Precondition."); 805 806 Register pre_val_reg = stub->pre_val()->as_register(); 807 808 if (stub->do_load()) { 809 ce->mem2reg(stub->addr(), stub->pre_val(), T_OBJECT, stub->patch_code(), stub->info(), false /*wide*/); 810 } 811 812 __ cmpptr(pre_val_reg, NULL_WORD); 813 __ jcc(Assembler::equal, *stub->continuation()); 814 ce->store_parameter(stub->pre_val()->as_register(), 0); 815 __ call(RuntimeAddress(bs->pre_barrier_c1_runtime_code_blob()->code_begin())); 816 __ jmp(*stub->continuation()); 817 818 } 819 820 void ShenandoahBarrierSetAssembler::gen_load_reference_barrier_stub(LIR_Assembler* ce, ShenandoahLoadReferenceBarrierStub* stub) { 821 ShenandoahBarrierSetC1* bs = (ShenandoahBarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1(); 822 __ bind(*stub->entry()); 823 824 DecoratorSet decorators = stub->decorators(); 825 bool is_strong = ShenandoahBarrierSet::is_strong_access(decorators); 826 bool is_weak = ShenandoahBarrierSet::is_weak_access(decorators); 827 bool is_phantom = ShenandoahBarrierSet::is_phantom_access(decorators); 828 bool is_native = ShenandoahBarrierSet::is_native_access(decorators); 829 830 Register obj = stub->obj()->as_register(); 831 Register res = stub->result()->as_register(); 832 Register addr = stub->addr()->as_pointer_register(); 833 Register tmp1 = stub->tmp1()->as_register(); 834 Register tmp2 = stub->tmp2()->as_register(); 835 assert_different_registers(obj, res, addr, tmp1, tmp2); 836 837 Label slow_path; 838 839 assert(res == rax, "result must arrive in rax"); 840 841 if (res != obj) { 842 __ mov(res, obj); 843 } 844 845 if (is_strong) { 846 // Check for object being in the collection set. 847 __ mov(tmp1, res); 848 __ shrptr(tmp1, ShenandoahHeapRegion::region_size_bytes_shift_jint()); 849 __ movptr(tmp2, (intptr_t) ShenandoahHeap::in_cset_fast_test_addr()); 850 #ifdef _LP64 851 __ movbool(tmp2, Address(tmp2, tmp1, Address::times_1)); 852 __ testbool(tmp2); 853 #else 854 // On x86_32, C1 register allocator can give us the register without 8-bit support. 855 // Do the full-register access and test to avoid compilation failures. 856 __ movptr(tmp2, Address(tmp2, tmp1, Address::times_1)); 857 __ testptr(tmp2, 0xFF); 858 #endif 859 __ jcc(Assembler::zero, *stub->continuation()); 860 } 861 862 __ bind(slow_path); 863 ce->store_parameter(res, 0); 864 ce->store_parameter(addr, 1); 865 if (is_strong) { 866 if (is_native) { 867 __ call(RuntimeAddress(bs->load_reference_barrier_strong_native_rt_code_blob()->code_begin())); 868 } else { 869 __ call(RuntimeAddress(bs->load_reference_barrier_strong_rt_code_blob()->code_begin())); 870 } 871 } else if (is_weak) { 872 __ call(RuntimeAddress(bs->load_reference_barrier_weak_rt_code_blob()->code_begin())); 873 } else { 874 assert(is_phantom, "only remaining strength"); 875 __ call(RuntimeAddress(bs->load_reference_barrier_phantom_rt_code_blob()->code_begin())); 876 } 877 __ jmp(*stub->continuation()); 878 } 879 880 #undef __ 881 882 #define __ sasm-> 883 884 void ShenandoahBarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm) { 885 __ prologue("shenandoah_pre_barrier", false); 886 // arg0 : previous value of memory 887 888 __ push(rax); 889 __ push(rdx); 890 891 const Register pre_val = rax; 892 const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread); 893 const Register tmp = rdx; 894 895 NOT_LP64(__ get_thread(thread);) 896 897 Address queue_index(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset())); 898 Address buffer(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset())); 899 900 Label done; 901 Label runtime; 902 903 // Is SATB still active? 904 Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset())); 905 __ testb(gc_state, ShenandoahHeap::MARKING); 906 __ jcc(Assembler::zero, done); 907 908 // Can we store original value in the thread's buffer? 909 910 __ movptr(tmp, queue_index); 911 __ testptr(tmp, tmp); 912 __ jcc(Assembler::zero, runtime); 913 __ subptr(tmp, wordSize); 914 __ movptr(queue_index, tmp); 915 __ addptr(tmp, buffer); 916 917 // prev_val (rax) 918 __ load_parameter(0, pre_val); 919 __ movptr(Address(tmp, 0), pre_val); 920 __ jmp(done); 921 922 __ bind(runtime); 923 924 __ save_live_registers_no_oop_map(true); 925 926 // load the pre-value 927 __ load_parameter(0, rcx); 928 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre), rcx, thread); 929 930 __ restore_live_registers(true); 931 932 __ bind(done); 933 934 __ pop(rdx); 935 __ pop(rax); 936 937 __ epilogue(); 938 } 939 940 void ShenandoahBarrierSetAssembler::generate_c1_load_reference_barrier_runtime_stub(StubAssembler* sasm, DecoratorSet decorators) { 941 __ prologue("shenandoah_load_reference_barrier", false); 942 // arg0 : object to be resolved 943 944 __ save_live_registers_no_oop_map(true); 945 946 bool is_strong = ShenandoahBarrierSet::is_strong_access(decorators); 947 bool is_weak = ShenandoahBarrierSet::is_weak_access(decorators); 948 bool is_phantom = ShenandoahBarrierSet::is_phantom_access(decorators); 949 bool is_native = ShenandoahBarrierSet::is_native_access(decorators); 950 951 #ifdef _LP64 952 __ load_parameter(0, c_rarg0); 953 __ load_parameter(1, c_rarg1); 954 if (is_strong) { 955 if (is_native) { 956 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong), c_rarg0, c_rarg1); 957 } else { 958 if (UseCompressedOops) { 959 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong_narrow), c_rarg0, c_rarg1); 960 } else { 961 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong), c_rarg0, c_rarg1); 962 } 963 } 964 } else if (is_weak) { 965 assert(!is_native, "weak must not be called off-heap"); 966 if (UseCompressedOops) { 967 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak_narrow), c_rarg0, c_rarg1); 968 } else { 969 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak), c_rarg0, c_rarg1); 970 } 971 } else { 972 assert(is_phantom, "only remaining strength"); 973 assert(is_native, "phantom must only be called off-heap"); 974 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom), c_rarg0, c_rarg1); 975 } 976 #else 977 __ load_parameter(0, rax); 978 __ load_parameter(1, rbx); 979 if (is_strong) { 980 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong), rax, rbx); 981 } else if (is_weak) { 982 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak), rax, rbx); 983 } else { 984 assert(is_phantom, "only remaining strength"); 985 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom), rax, rbx); 986 } 987 #endif 988 989 __ restore_live_registers_except_rax(true); 990 991 __ epilogue(); 992 } 993 994 #undef __ 995 996 #endif // COMPILER1