1 /* 2 * Copyright (c) 2018, 2021, Red Hat, Inc. All rights reserved. 3 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "gc/shenandoah/shenandoahBarrierSet.hpp" 28 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp" 29 #include "gc/shenandoah/shenandoahForwarding.hpp" 30 #include "gc/shenandoah/shenandoahHeap.inline.hpp" 31 #include "gc/shenandoah/shenandoahHeapRegion.hpp" 32 #include "gc/shenandoah/shenandoahRuntime.hpp" 33 #include "gc/shenandoah/shenandoahThreadLocalData.hpp" 34 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp" 35 #include "gc/shenandoah/mode/shenandoahMode.hpp" 36 #include "interpreter/interpreter.hpp" 37 #include "runtime/javaThread.hpp" 38 #include "runtime/sharedRuntime.hpp" 39 #include "utilities/macros.hpp" 40 #ifdef COMPILER1 41 #include "c1/c1_LIRAssembler.hpp" 42 #include "c1/c1_MacroAssembler.hpp" 43 #include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp" 44 #endif 45 46 #define __ masm-> 47 48 static void save_machine_state(MacroAssembler* masm, bool handle_gpr, bool handle_fp) { 49 if (handle_gpr) { 50 __ push_IU_state(); 51 } 52 53 if (handle_fp) { 54 // Some paths can be reached from the c2i adapter with live fp arguments in registers. 55 LP64_ONLY(assert(Argument::n_float_register_parameters_j == 8, "8 fp registers to save at java call")); 56 57 if (UseSSE >= 2) { 58 const int xmm_size = wordSize * LP64_ONLY(2) NOT_LP64(4); 59 __ subptr(rsp, xmm_size * 8); 60 __ movdbl(Address(rsp, xmm_size * 0), xmm0); 61 __ movdbl(Address(rsp, xmm_size * 1), xmm1); 62 __ movdbl(Address(rsp, xmm_size * 2), xmm2); 63 __ movdbl(Address(rsp, xmm_size * 3), xmm3); 64 __ movdbl(Address(rsp, xmm_size * 4), xmm4); 65 __ movdbl(Address(rsp, xmm_size * 5), xmm5); 66 __ movdbl(Address(rsp, xmm_size * 6), xmm6); 67 __ movdbl(Address(rsp, xmm_size * 7), xmm7); 68 } else if (UseSSE >= 1) { 69 const int xmm_size = wordSize * LP64_ONLY(1) NOT_LP64(2); 70 __ subptr(rsp, xmm_size * 8); 71 __ movflt(Address(rsp, xmm_size * 0), xmm0); 72 __ movflt(Address(rsp, xmm_size * 1), xmm1); 73 __ movflt(Address(rsp, xmm_size * 2), xmm2); 74 __ movflt(Address(rsp, xmm_size * 3), xmm3); 75 __ movflt(Address(rsp, xmm_size * 4), xmm4); 76 __ movflt(Address(rsp, xmm_size * 5), xmm5); 77 __ movflt(Address(rsp, xmm_size * 6), xmm6); 78 __ movflt(Address(rsp, xmm_size * 7), xmm7); 79 } else { 80 __ push_FPU_state(); 81 } 82 } 83 } 84 85 static void restore_machine_state(MacroAssembler* masm, bool handle_gpr, bool handle_fp) { 86 if (handle_fp) { 87 if (UseSSE >= 2) { 88 const int xmm_size = wordSize * LP64_ONLY(2) NOT_LP64(4); 89 __ movdbl(xmm0, Address(rsp, xmm_size * 0)); 90 __ movdbl(xmm1, Address(rsp, xmm_size * 1)); 91 __ movdbl(xmm2, Address(rsp, xmm_size * 2)); 92 __ movdbl(xmm3, Address(rsp, xmm_size * 3)); 93 __ movdbl(xmm4, Address(rsp, xmm_size * 4)); 94 __ movdbl(xmm5, Address(rsp, xmm_size * 5)); 95 __ movdbl(xmm6, Address(rsp, xmm_size * 6)); 96 __ movdbl(xmm7, Address(rsp, xmm_size * 7)); 97 __ addptr(rsp, xmm_size * 8); 98 } else if (UseSSE >= 1) { 99 const int xmm_size = wordSize * LP64_ONLY(1) NOT_LP64(2); 100 __ movflt(xmm0, Address(rsp, xmm_size * 0)); 101 __ movflt(xmm1, Address(rsp, xmm_size * 1)); 102 __ movflt(xmm2, Address(rsp, xmm_size * 2)); 103 __ movflt(xmm3, Address(rsp, xmm_size * 3)); 104 __ movflt(xmm4, Address(rsp, xmm_size * 4)); 105 __ movflt(xmm5, Address(rsp, xmm_size * 5)); 106 __ movflt(xmm6, Address(rsp, xmm_size * 6)); 107 __ movflt(xmm7, Address(rsp, xmm_size * 7)); 108 __ addptr(rsp, xmm_size * 8); 109 } else { 110 __ pop_FPU_state(); 111 } 112 } 113 114 if (handle_gpr) { 115 __ pop_IU_state(); 116 } 117 } 118 119 void ShenandoahBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, BasicType type, 120 Register src, Register dst, Register count) { 121 122 bool dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0; 123 124 if (is_reference_type(type)) { 125 if (ShenandoahCardBarrier) { 126 bool checkcast = (decorators & ARRAYCOPY_CHECKCAST) != 0; 127 bool disjoint = (decorators & ARRAYCOPY_DISJOINT) != 0; 128 bool obj_int = type == T_OBJECT LP64_ONLY(&& UseCompressedOops); 129 130 // We need to save the original element count because the array copy stub 131 // will destroy the value and we need it for the card marking barrier. 132 #ifdef _LP64 133 if (!checkcast) { 134 if (!obj_int) { 135 // Save count for barrier 136 __ movptr(r11, count); 137 } else if (disjoint) { 138 // Save dst in r11 in the disjoint case 139 __ movq(r11, dst); 140 } 141 } 142 #else 143 if (disjoint) { 144 __ mov(rdx, dst); // save 'to' 145 } 146 #endif 147 } 148 149 if ((ShenandoahSATBBarrier && !dest_uninitialized) || ShenandoahIUBarrier || ShenandoahLoadRefBarrier) { 150 #ifdef _LP64 151 Register thread = r15_thread; 152 #else 153 Register thread = rax; 154 if (thread == src || thread == dst || thread == count) { 155 thread = rbx; 156 } 157 if (thread == src || thread == dst || thread == count) { 158 thread = rcx; 159 } 160 if (thread == src || thread == dst || thread == count) { 161 thread = rdx; 162 } 163 __ push(thread); 164 __ get_thread(thread); 165 #endif 166 assert_different_registers(src, dst, count, thread); 167 168 Label L_done; 169 // Short-circuit if count == 0. 170 __ testptr(count, count); 171 __ jcc(Assembler::zero, L_done); 172 173 // Avoid runtime call when not active. 174 Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset())); 175 int flags; 176 if (ShenandoahSATBBarrier && dest_uninitialized) { 177 flags = ShenandoahHeap::HAS_FORWARDED; 178 } else { 179 flags = ShenandoahHeap::HAS_FORWARDED | ShenandoahHeap::MARKING; 180 } 181 __ testb(gc_state, flags); 182 __ jcc(Assembler::zero, L_done); 183 184 save_machine_state(masm, /* handle_gpr = */ true, /* handle_fp = */ false); 185 186 #ifdef _LP64 187 assert(src == rdi, "expected"); 188 assert(dst == rsi, "expected"); 189 assert(count == rdx, "expected"); 190 if (UseCompressedOops) { 191 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::arraycopy_barrier_narrow_oop_entry), 192 src, dst, count); 193 } else 194 #endif 195 { 196 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::arraycopy_barrier_oop_entry), 197 src, dst, count); 198 } 199 200 restore_machine_state(masm, /* handle_gpr = */ true, /* handle_fp = */ false); 201 202 __ bind(L_done); 203 NOT_LP64(__ pop(thread);) 204 } 205 } 206 207 } 208 209 void ShenandoahBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, BasicType type, 210 Register src, Register dst, Register count) { 211 212 if (ShenandoahCardBarrier && is_reference_type(type)) { 213 bool checkcast = (decorators & ARRAYCOPY_CHECKCAST) != 0; 214 bool disjoint = (decorators & ARRAYCOPY_DISJOINT) != 0; 215 bool obj_int = type == T_OBJECT LP64_ONLY(&& UseCompressedOops); 216 Register tmp = rax; 217 218 #ifdef _LP64 219 if (!checkcast) { 220 if (!obj_int) { 221 // Save count for barrier 222 count = r11; 223 } else if (disjoint) { 224 // Use the saved dst in the disjoint case 225 dst = r11; 226 } 227 } else { 228 tmp = rscratch1; 229 } 230 #else 231 if (disjoint) { 232 __ mov(dst, rdx); // restore 'to' 233 } 234 #endif 235 gen_write_ref_array_post_barrier(masm, decorators, dst, count, tmp); 236 } 237 } 238 239 void ShenandoahBarrierSetAssembler::shenandoah_write_barrier_pre(MacroAssembler* masm, 240 Register obj, 241 Register pre_val, 242 Register thread, 243 Register tmp, 244 bool tosca_live, 245 bool expand_call) { 246 247 if (ShenandoahSATBBarrier) { 248 satb_write_barrier_pre(masm, obj, pre_val, thread, tmp, tosca_live, expand_call); 249 } 250 } 251 252 void ShenandoahBarrierSetAssembler::satb_write_barrier_pre(MacroAssembler* masm, 253 Register obj, 254 Register pre_val, 255 Register thread, 256 Register tmp, 257 bool tosca_live, 258 bool expand_call) { 259 // If expand_call is true then we expand the call_VM_leaf macro 260 // directly to skip generating the check by 261 // InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp. 262 263 #ifdef _LP64 264 assert(thread == r15_thread, "must be"); 265 #endif // _LP64 266 267 Label done; 268 Label runtime; 269 270 assert(pre_val != noreg, "check this code"); 271 272 if (obj != noreg) { 273 assert_different_registers(obj, pre_val, tmp); 274 assert(pre_val != rax, "check this code"); 275 } 276 277 Address in_progress(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_active_offset())); 278 Address index(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset())); 279 Address buffer(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset())); 280 281 Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset())); 282 __ testb(gc_state, ShenandoahHeap::MARKING); 283 __ jcc(Assembler::zero, done); 284 285 // Do we need to load the previous value? 286 if (obj != noreg) { 287 __ load_heap_oop(pre_val, Address(obj, 0), noreg, noreg, AS_RAW); 288 } 289 290 // Is the previous value null? 291 __ cmpptr(pre_val, NULL_WORD); 292 __ jcc(Assembler::equal, done); 293 294 // Can we store original value in the thread's buffer? 295 // Is index == 0? 296 // (The index field is typed as size_t.) 297 298 __ movptr(tmp, index); // tmp := *index_adr 299 __ cmpptr(tmp, 0); // tmp == 0? 300 __ jcc(Assembler::equal, runtime); // If yes, goto runtime 301 302 __ subptr(tmp, wordSize); // tmp := tmp - wordSize 303 __ movptr(index, tmp); // *index_adr := tmp 304 __ addptr(tmp, buffer); // tmp := tmp + *buffer_adr 305 306 // Record the previous value 307 __ movptr(Address(tmp, 0), pre_val); 308 __ jmp(done); 309 310 __ bind(runtime); 311 // save the live input values 312 if(tosca_live) __ push(rax); 313 314 if (obj != noreg && obj != rax) 315 __ push(obj); 316 317 if (pre_val != rax) 318 __ push(pre_val); 319 320 // Calling the runtime using the regular call_VM_leaf mechanism generates 321 // code (generated by InterpreterMacroAssember::call_VM_leaf_base) 322 // that checks that the *(ebp+frame::interpreter_frame_last_sp) == nullptr. 323 // 324 // If we care generating the pre-barrier without a frame (e.g. in the 325 // intrinsified Reference.get() routine) then ebp might be pointing to 326 // the caller frame and so this check will most likely fail at runtime. 327 // 328 // Expanding the call directly bypasses the generation of the check. 329 // So when we do not have have a full interpreter frame on the stack 330 // expand_call should be passed true. 331 332 NOT_LP64( __ push(thread); ) 333 334 #ifdef _LP64 335 // We move pre_val into c_rarg0 early, in order to avoid smashing it, should 336 // pre_val be c_rarg1 (where the call prologue would copy thread argument). 337 // Note: this should not accidentally smash thread, because thread is always r15. 338 assert(thread != c_rarg0, "smashed arg"); 339 if (c_rarg0 != pre_val) { 340 __ mov(c_rarg0, pre_val); 341 } 342 #endif 343 344 if (expand_call) { 345 LP64_ONLY( assert(pre_val != c_rarg1, "smashed arg"); ) 346 #ifdef _LP64 347 if (c_rarg1 != thread) { 348 __ mov(c_rarg1, thread); 349 } 350 // Already moved pre_val into c_rarg0 above 351 #else 352 __ push(thread); 353 __ push(pre_val); 354 #endif 355 __ MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), 2); 356 } else { 357 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), LP64_ONLY(c_rarg0) NOT_LP64(pre_val), thread); 358 } 359 360 NOT_LP64( __ pop(thread); ) 361 362 // save the live input values 363 if (pre_val != rax) 364 __ pop(pre_val); 365 366 if (obj != noreg && obj != rax) 367 __ pop(obj); 368 369 if(tosca_live) __ pop(rax); 370 371 __ bind(done); 372 } 373 374 void ShenandoahBarrierSetAssembler::load_reference_barrier(MacroAssembler* masm, Register dst, Address src, DecoratorSet decorators) { 375 assert(ShenandoahLoadRefBarrier, "Should be enabled"); 376 377 bool is_strong = ShenandoahBarrierSet::is_strong_access(decorators); 378 bool is_weak = ShenandoahBarrierSet::is_weak_access(decorators); 379 bool is_phantom = ShenandoahBarrierSet::is_phantom_access(decorators); 380 bool is_native = ShenandoahBarrierSet::is_native_access(decorators); 381 bool is_narrow = UseCompressedOops && !is_native; 382 383 Label heap_stable, not_cset; 384 385 __ block_comment("load_reference_barrier { "); 386 387 // Check if GC is active 388 #ifdef _LP64 389 Register thread = r15_thread; 390 #else 391 Register thread = rcx; 392 if (thread == dst) { 393 thread = rbx; 394 } 395 __ push(thread); 396 __ get_thread(thread); 397 #endif 398 399 Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset())); 400 int flags = ShenandoahHeap::HAS_FORWARDED; 401 if (!is_strong) { 402 flags |= ShenandoahHeap::WEAK_ROOTS; 403 } 404 __ testb(gc_state, flags); 405 __ jcc(Assembler::zero, heap_stable); 406 407 Register tmp1 = noreg, tmp2 = noreg; 408 if (is_strong) { 409 // Test for object in cset 410 // Allocate temporary registers 411 for (int i = 0; i < 8; i++) { 412 Register r = as_Register(i); 413 if (r != rsp && r != rbp && r != dst && r != src.base() && r != src.index()) { 414 if (tmp1 == noreg) { 415 tmp1 = r; 416 } else { 417 tmp2 = r; 418 break; 419 } 420 } 421 } 422 assert(tmp1 != noreg, "tmp1 allocated"); 423 assert(tmp2 != noreg, "tmp2 allocated"); 424 assert_different_registers(tmp1, tmp2, src.base(), src.index()); 425 assert_different_registers(tmp1, tmp2, dst); 426 427 __ push(tmp1); 428 __ push(tmp2); 429 430 // Optimized cset-test 431 __ movptr(tmp1, dst); 432 __ shrptr(tmp1, ShenandoahHeapRegion::region_size_bytes_shift_jint()); 433 __ movptr(tmp2, (intptr_t) ShenandoahHeap::in_cset_fast_test_addr()); 434 __ movbool(tmp1, Address(tmp1, tmp2, Address::times_1)); 435 __ testbool(tmp1); 436 __ jcc(Assembler::zero, not_cset); 437 } 438 439 save_machine_state(masm, /* handle_gpr = */ false, /* handle_fp = */ true); 440 441 // The rest is saved with the optimized path 442 443 uint num_saved_regs = 4 + (dst != rax ? 1 : 0) LP64_ONLY(+4); 444 __ subptr(rsp, num_saved_regs * wordSize); 445 uint slot = num_saved_regs; 446 if (dst != rax) { 447 __ movptr(Address(rsp, (--slot) * wordSize), rax); 448 } 449 __ movptr(Address(rsp, (--slot) * wordSize), rcx); 450 __ movptr(Address(rsp, (--slot) * wordSize), rdx); 451 __ movptr(Address(rsp, (--slot) * wordSize), rdi); 452 __ movptr(Address(rsp, (--slot) * wordSize), rsi); 453 #ifdef _LP64 454 __ movptr(Address(rsp, (--slot) * wordSize), r8); 455 __ movptr(Address(rsp, (--slot) * wordSize), r9); 456 __ movptr(Address(rsp, (--slot) * wordSize), r10); 457 __ movptr(Address(rsp, (--slot) * wordSize), r11); 458 // r12-r15 are callee saved in all calling conventions 459 #endif 460 assert(slot == 0, "must use all slots"); 461 462 // Shuffle registers such that dst is in c_rarg0 and addr in c_rarg1. 463 #ifdef _LP64 464 Register arg0 = c_rarg0, arg1 = c_rarg1; 465 #else 466 Register arg0 = rdi, arg1 = rsi; 467 #endif 468 if (dst == arg1) { 469 __ lea(arg0, src); 470 __ xchgptr(arg1, arg0); 471 } else { 472 __ lea(arg1, src); 473 __ movptr(arg0, dst); 474 } 475 476 if (is_strong) { 477 if (is_narrow) { 478 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong_narrow), arg0, arg1); 479 } else { 480 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong), arg0, arg1); 481 } 482 } else if (is_weak) { 483 if (is_narrow) { 484 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak_narrow), arg0, arg1); 485 } else { 486 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak), arg0, arg1); 487 } 488 } else { 489 assert(is_phantom, "only remaining strength"); 490 assert(!is_narrow, "phantom access cannot be narrow"); 491 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom), arg0, arg1); 492 } 493 494 #ifdef _LP64 495 __ movptr(r11, Address(rsp, (slot++) * wordSize)); 496 __ movptr(r10, Address(rsp, (slot++) * wordSize)); 497 __ movptr(r9, Address(rsp, (slot++) * wordSize)); 498 __ movptr(r8, Address(rsp, (slot++) * wordSize)); 499 #endif 500 __ movptr(rsi, Address(rsp, (slot++) * wordSize)); 501 __ movptr(rdi, Address(rsp, (slot++) * wordSize)); 502 __ movptr(rdx, Address(rsp, (slot++) * wordSize)); 503 __ movptr(rcx, Address(rsp, (slot++) * wordSize)); 504 505 if (dst != rax) { 506 __ movptr(dst, rax); 507 __ movptr(rax, Address(rsp, (slot++) * wordSize)); 508 } 509 510 assert(slot == num_saved_regs, "must use all slots"); 511 __ addptr(rsp, num_saved_regs * wordSize); 512 513 restore_machine_state(masm, /* handle_gpr = */ false, /* handle_fp = */ true); 514 515 __ bind(not_cset); 516 517 if (is_strong) { 518 __ pop(tmp2); 519 __ pop(tmp1); 520 } 521 522 __ bind(heap_stable); 523 524 __ block_comment("} load_reference_barrier"); 525 526 #ifndef _LP64 527 __ pop(thread); 528 #endif 529 } 530 531 void ShenandoahBarrierSetAssembler::iu_barrier(MacroAssembler* masm, Register dst, Register tmp) { 532 if (ShenandoahIUBarrier) { 533 iu_barrier_impl(masm, dst, tmp); 534 } 535 } 536 537 void ShenandoahBarrierSetAssembler::iu_barrier_impl(MacroAssembler* masm, Register dst, Register tmp) { 538 assert(ShenandoahIUBarrier, "should be enabled"); 539 540 if (dst == noreg) return; 541 542 if (ShenandoahIUBarrier) { 543 save_machine_state(masm, /* handle_gpr = */ true, /* handle_fp = */ true); 544 545 #ifdef _LP64 546 Register thread = r15_thread; 547 #else 548 Register thread = rcx; 549 if (thread == dst || thread == tmp) { 550 thread = rdi; 551 } 552 if (thread == dst || thread == tmp) { 553 thread = rbx; 554 } 555 __ get_thread(thread); 556 #endif 557 assert_different_registers(dst, tmp, thread); 558 559 satb_write_barrier_pre(masm, noreg, dst, thread, tmp, true, false); 560 561 restore_machine_state(masm, /* handle_gpr = */ true, /* handle_fp = */ true); 562 } 563 } 564 565 // 566 // Arguments: 567 // 568 // Inputs: 569 // src: oop location, might be clobbered 570 // tmp1: scratch register, might not be valid. 571 // 572 // Output: 573 // dst: oop loaded from src location 574 // 575 // Kill: 576 // tmp1 (if it is valid) 577 // 578 void ShenandoahBarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, 579 Register dst, Address src, Register tmp1, Register tmp_thread) { 580 // 1: non-reference load, no additional barrier is needed 581 if (!is_reference_type(type)) { 582 BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread); 583 return; 584 } 585 586 assert((decorators & ON_UNKNOWN_OOP_REF) == 0, "Not expected"); 587 588 // 2: load a reference from src location and apply LRB if needed 589 if (ShenandoahBarrierSet::need_load_reference_barrier(decorators, type)) { 590 Register result_dst = dst; 591 bool use_tmp1_for_dst = false; 592 593 // Preserve src location for LRB 594 if (dst == src.base() || dst == src.index()) { 595 // Use tmp1 for dst if possible, as it is not used in BarrierAssembler::load_at() 596 if (tmp1->is_valid() && tmp1 != src.base() && tmp1 != src.index()) { 597 dst = tmp1; 598 use_tmp1_for_dst = true; 599 } else { 600 dst = rdi; 601 __ push(dst); 602 } 603 assert_different_registers(dst, src.base(), src.index()); 604 } 605 606 BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread); 607 608 load_reference_barrier(masm, dst, src, decorators); 609 610 // Move loaded oop to final destination 611 if (dst != result_dst) { 612 __ movptr(result_dst, dst); 613 614 if (!use_tmp1_for_dst) { 615 __ pop(dst); 616 } 617 618 dst = result_dst; 619 } 620 } else { 621 BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread); 622 } 623 624 // 3: apply keep-alive barrier if needed 625 if (ShenandoahBarrierSet::need_keep_alive_barrier(decorators, type)) { 626 save_machine_state(masm, /* handle_gpr = */ true, /* handle_fp = */ true); 627 628 Register thread = NOT_LP64(tmp_thread) LP64_ONLY(r15_thread); 629 assert_different_registers(dst, tmp1, tmp_thread); 630 if (!thread->is_valid()) { 631 thread = rdx; 632 } 633 NOT_LP64(__ get_thread(thread)); 634 // Generate the SATB pre-barrier code to log the value of 635 // the referent field in an SATB buffer. 636 shenandoah_write_barrier_pre(masm /* masm */, 637 noreg /* obj */, 638 dst /* pre_val */, 639 thread /* thread */, 640 tmp1 /* tmp */, 641 true /* tosca_live */, 642 true /* expand_call */); 643 644 restore_machine_state(masm, /* handle_gpr = */ true, /* handle_fp = */ true); 645 } 646 } 647 648 void ShenandoahBarrierSetAssembler::store_check(MacroAssembler* masm, Register obj) { 649 assert(ShenandoahCardBarrier, "Should have been checked by caller"); 650 651 // Does a store check for the oop in register obj. The content of 652 // register obj is destroyed afterwards. 653 654 ShenandoahBarrierSet* ctbs = ShenandoahBarrierSet::barrier_set(); 655 CardTable* ct = ctbs->card_table(); 656 657 __ shrptr(obj, CardTable::card_shift()); 658 659 Address card_addr; 660 661 // The calculation for byte_map_base is as follows: 662 // byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift); 663 // So this essentially converts an address to a displacement and it will 664 // never need to be relocated. On 64-bit however the value may be too 665 // large for a 32-bit displacement. 666 intptr_t byte_map_base = (intptr_t)ct->byte_map_base(); 667 if (__ is_simm32(byte_map_base)) { 668 card_addr = Address(noreg, obj, Address::times_1, byte_map_base); 669 } else { 670 // By doing it as an ExternalAddress 'byte_map_base' could be converted to a rip-relative 671 // displacement and done in a single instruction given favorable mapping and a 672 // smarter version of as_Address. However, 'ExternalAddress' generates a relocation 673 // entry and that entry is not properly handled by the relocation code. 674 AddressLiteral cardtable((address)byte_map_base, relocInfo::none); 675 Address index(noreg, obj, Address::times_1); 676 card_addr = __ as_Address(ArrayAddress(cardtable, index), rscratch1); 677 } 678 679 int dirty = CardTable::dirty_card_val(); 680 if (UseCondCardMark) { 681 Label L_already_dirty; 682 __ cmpb(card_addr, dirty); 683 __ jccb(Assembler::equal, L_already_dirty); 684 __ movb(card_addr, dirty); 685 __ bind(L_already_dirty); 686 } else { 687 __ movb(card_addr, dirty); 688 } 689 } 690 691 void ShenandoahBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, 692 Address dst, Register val, Register tmp1, Register tmp2, Register tmp3) { 693 694 bool on_oop = is_reference_type(type); 695 bool in_heap = (decorators & IN_HEAP) != 0; 696 bool as_normal = (decorators & AS_NORMAL) != 0; 697 if (on_oop && in_heap) { 698 bool needs_pre_barrier = as_normal; 699 700 Register rthread = LP64_ONLY(r15_thread) NOT_LP64(rcx); 701 // flatten object address if needed 702 // We do it regardless of precise because we need the registers 703 if (dst.index() == noreg && dst.disp() == 0) { 704 if (dst.base() != tmp1) { 705 __ movptr(tmp1, dst.base()); 706 } 707 } else { 708 __ lea(tmp1, dst); 709 } 710 711 assert_different_registers(val, tmp1, tmp2, tmp3, rthread); 712 713 #ifndef _LP64 714 __ get_thread(rthread); 715 InterpreterMacroAssembler *imasm = static_cast<InterpreterMacroAssembler*>(masm); 716 imasm->save_bcp(); 717 #endif 718 719 if (needs_pre_barrier) { 720 shenandoah_write_barrier_pre(masm /*masm*/, 721 tmp1 /* obj */, 722 tmp2 /* pre_val */, 723 rthread /* thread */, 724 tmp3 /* tmp */, 725 val != noreg /* tosca_live */, 726 false /* expand_call */); 727 } 728 if (val == noreg) { 729 BarrierSetAssembler::store_at(masm, decorators, type, Address(tmp1, 0), val, noreg, noreg, noreg); 730 } else { 731 iu_barrier(masm, val, tmp3); 732 BarrierSetAssembler::store_at(masm, decorators, type, Address(tmp1, 0), val, noreg, noreg, noreg); 733 if (ShenandoahCardBarrier) { 734 store_check(masm, tmp1); 735 } 736 } 737 NOT_LP64(imasm->restore_bcp()); 738 } else { 739 BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2, tmp3); 740 } 741 } 742 743 void ShenandoahBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env, 744 Register obj, Register tmp, Label& slowpath) { 745 Label done; 746 // Resolve jobject 747 BarrierSetAssembler::try_resolve_jobject_in_native(masm, jni_env, obj, tmp, slowpath); 748 749 // Check for null. 750 __ testptr(obj, obj); 751 __ jcc(Assembler::zero, done); 752 753 Address gc_state(jni_env, ShenandoahThreadLocalData::gc_state_offset() - JavaThread::jni_environment_offset()); 754 __ testb(gc_state, ShenandoahHeap::EVACUATION); 755 __ jccb(Assembler::notZero, slowpath); 756 __ bind(done); 757 } 758 759 // Special Shenandoah CAS implementation that handles false negatives 760 // due to concurrent evacuation. 761 void ShenandoahBarrierSetAssembler::cmpxchg_oop(MacroAssembler* masm, 762 Register res, Address addr, Register oldval, Register newval, 763 bool exchange, Register tmp1, Register tmp2) { 764 assert(ShenandoahCASBarrier, "Should only be used when CAS barrier is enabled"); 765 assert(oldval == rax, "must be in rax for implicit use in cmpxchg"); 766 assert_different_registers(oldval, tmp1, tmp2); 767 assert_different_registers(newval, tmp1, tmp2); 768 769 Label L_success, L_failure; 770 771 // Remember oldval for retry logic below 772 #ifdef _LP64 773 if (UseCompressedOops) { 774 __ movl(tmp1, oldval); 775 } else 776 #endif 777 { 778 __ movptr(tmp1, oldval); 779 } 780 781 // Step 1. Fast-path. 782 // 783 // Try to CAS with given arguments. If successful, then we are done. 784 785 #ifdef _LP64 786 if (UseCompressedOops) { 787 __ lock(); 788 __ cmpxchgl(newval, addr); 789 } else 790 #endif 791 { 792 __ lock(); 793 __ cmpxchgptr(newval, addr); 794 } 795 __ jcc(Assembler::equal, L_success); 796 797 // Step 2. CAS had failed. This may be a false negative. 798 // 799 // The trouble comes when we compare the to-space pointer with the from-space 800 // pointer to the same object. To resolve this, it will suffice to resolve 801 // the value from memory -- this will give both to-space pointers. 802 // If they mismatch, then it was a legitimate failure. 803 // 804 // Before reaching to resolve sequence, see if we can avoid the whole shebang 805 // with filters. 806 807 // Filter: when offending in-memory value is null, the failure is definitely legitimate 808 __ testptr(oldval, oldval); 809 __ jcc(Assembler::zero, L_failure); 810 811 // Filter: when heap is stable, the failure is definitely legitimate 812 #ifdef _LP64 813 const Register thread = r15_thread; 814 #else 815 const Register thread = tmp2; 816 __ get_thread(thread); 817 #endif 818 Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset())); 819 __ testb(gc_state, ShenandoahHeap::HAS_FORWARDED); 820 __ jcc(Assembler::zero, L_failure); 821 822 #ifdef _LP64 823 if (UseCompressedOops) { 824 __ movl(tmp2, oldval); 825 __ decode_heap_oop(tmp2); 826 } else 827 #endif 828 { 829 __ movptr(tmp2, oldval); 830 } 831 832 // Decode offending in-memory value. 833 // Test if-forwarded 834 __ testb(Address(tmp2, oopDesc::mark_offset_in_bytes()), markWord::marked_value); 835 __ jcc(Assembler::noParity, L_failure); // When odd number of bits, then not forwarded 836 __ jcc(Assembler::zero, L_failure); // When it is 00, then also not forwarded 837 838 // Load and mask forwarding pointer 839 __ movptr(tmp2, Address(tmp2, oopDesc::mark_offset_in_bytes())); 840 __ shrptr(tmp2, 2); 841 __ shlptr(tmp2, 2); 842 843 #ifdef _LP64 844 if (UseCompressedOops) { 845 __ decode_heap_oop(tmp1); // decode for comparison 846 } 847 #endif 848 849 // Now we have the forwarded offender in tmp2. 850 // Compare and if they don't match, we have legitimate failure 851 __ cmpptr(tmp1, tmp2); 852 __ jcc(Assembler::notEqual, L_failure); 853 854 // Step 3. Need to fix the memory ptr before continuing. 855 // 856 // At this point, we have from-space oldval in the register, and its to-space 857 // address is in tmp2. Let's try to update it into memory. We don't care if it 858 // succeeds or not. If it does, then the retrying CAS would see it and succeed. 859 // If this fixup fails, this means somebody else beat us to it, and necessarily 860 // with to-space ptr store. We still have to do the retry, because the GC might 861 // have updated the reference for us. 862 863 #ifdef _LP64 864 if (UseCompressedOops) { 865 __ encode_heap_oop(tmp2); // previously decoded at step 2. 866 } 867 #endif 868 869 #ifdef _LP64 870 if (UseCompressedOops) { 871 __ lock(); 872 __ cmpxchgl(tmp2, addr); 873 } else 874 #endif 875 { 876 __ lock(); 877 __ cmpxchgptr(tmp2, addr); 878 } 879 880 // Step 4. Try to CAS again. 881 // 882 // This is guaranteed not to have false negatives, because oldval is definitely 883 // to-space, and memory pointer is to-space as well. Nothing is able to store 884 // from-space ptr into memory anymore. Make sure oldval is restored, after being 885 // garbled during retries. 886 // 887 #ifdef _LP64 888 if (UseCompressedOops) { 889 __ movl(oldval, tmp2); 890 } else 891 #endif 892 { 893 __ movptr(oldval, tmp2); 894 } 895 896 #ifdef _LP64 897 if (UseCompressedOops) { 898 __ lock(); 899 __ cmpxchgl(newval, addr); 900 } else 901 #endif 902 { 903 __ lock(); 904 __ cmpxchgptr(newval, addr); 905 } 906 if (!exchange) { 907 __ jccb(Assembler::equal, L_success); // fastpath, peeking into Step 5, no need to jump 908 } 909 910 // Step 5. If we need a boolean result out of CAS, set the flag appropriately. 911 // and promote the result. Note that we handle the flag from both the 1st and 2nd CAS. 912 // Otherwise, failure witness for CAE is in oldval on all paths, and we can return. 913 914 if (exchange) { 915 __ bind(L_failure); 916 __ bind(L_success); 917 } else { 918 assert(res != noreg, "need result register"); 919 920 Label exit; 921 __ bind(L_failure); 922 __ xorptr(res, res); 923 __ jmpb(exit); 924 925 __ bind(L_success); 926 __ movptr(res, 1); 927 __ bind(exit); 928 } 929 } 930 931 #ifdef PRODUCT 932 #define BLOCK_COMMENT(str) /* nothing */ 933 #else 934 #define BLOCK_COMMENT(str) __ block_comment(str) 935 #endif 936 937 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 938 939 #define TIMES_OOP (UseCompressedOops ? Address::times_4 : Address::times_8) 940 941 void ShenandoahBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators, 942 Register addr, Register count, 943 Register tmp) { 944 assert(ShenandoahCardBarrier, "Should have been checked by caller"); 945 946 ShenandoahBarrierSet* bs = ShenandoahBarrierSet::barrier_set(); 947 CardTable* ct = bs->card_table(); 948 intptr_t disp = (intptr_t) ct->byte_map_base(); 949 950 Label L_loop, L_done; 951 const Register end = count; 952 assert_different_registers(addr, end); 953 954 // Zero count? Nothing to do. 955 __ testl(count, count); 956 __ jccb(Assembler::zero, L_done); 957 958 #ifdef _LP64 959 __ leaq(end, Address(addr, count, TIMES_OOP, 0)); // end == addr+count*oop_size 960 __ subptr(end, BytesPerHeapOop); // end - 1 to make inclusive 961 __ shrptr(addr, CardTable::card_shift()); 962 __ shrptr(end, CardTable::card_shift()); 963 __ subptr(end, addr); // end --> cards count 964 965 __ mov64(tmp, disp); 966 __ addptr(addr, tmp); 967 968 __ BIND(L_loop); 969 __ movb(Address(addr, count, Address::times_1), 0); 970 __ decrement(count); 971 __ jccb(Assembler::greaterEqual, L_loop); 972 #else 973 __ lea(end, Address(addr, count, Address::times_ptr, -wordSize)); 974 __ shrptr(addr, CardTable::card_shift()); 975 __ shrptr(end, CardTable::card_shift()); 976 __ subptr(end, addr); // end --> count 977 978 __ BIND(L_loop); 979 Address cardtable(addr, count, Address::times_1, disp); 980 __ movb(cardtable, 0); 981 __ decrement(count); 982 __ jccb(Assembler::greaterEqual, L_loop); 983 #endif 984 985 __ BIND(L_done); 986 } 987 988 #undef __ 989 990 #ifdef COMPILER1 991 992 #define __ ce->masm()-> 993 994 void ShenandoahBarrierSetAssembler::gen_pre_barrier_stub(LIR_Assembler* ce, ShenandoahPreBarrierStub* stub) { 995 ShenandoahBarrierSetC1* bs = (ShenandoahBarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1(); 996 // At this point we know that marking is in progress. 997 // If do_load() is true then we have to emit the 998 // load of the previous value; otherwise it has already 999 // been loaded into _pre_val. 1000 1001 __ bind(*stub->entry()); 1002 assert(stub->pre_val()->is_register(), "Precondition."); 1003 1004 Register pre_val_reg = stub->pre_val()->as_register(); 1005 1006 if (stub->do_load()) { 1007 ce->mem2reg(stub->addr(), stub->pre_val(), T_OBJECT, stub->patch_code(), stub->info(), false /*wide*/); 1008 } 1009 1010 __ cmpptr(pre_val_reg, NULL_WORD); 1011 __ jcc(Assembler::equal, *stub->continuation()); 1012 ce->store_parameter(stub->pre_val()->as_register(), 0); 1013 __ call(RuntimeAddress(bs->pre_barrier_c1_runtime_code_blob()->code_begin())); 1014 __ jmp(*stub->continuation()); 1015 1016 } 1017 1018 void ShenandoahBarrierSetAssembler::gen_load_reference_barrier_stub(LIR_Assembler* ce, ShenandoahLoadReferenceBarrierStub* stub) { 1019 ShenandoahBarrierSetC1* bs = (ShenandoahBarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1(); 1020 __ bind(*stub->entry()); 1021 1022 DecoratorSet decorators = stub->decorators(); 1023 bool is_strong = ShenandoahBarrierSet::is_strong_access(decorators); 1024 bool is_weak = ShenandoahBarrierSet::is_weak_access(decorators); 1025 bool is_phantom = ShenandoahBarrierSet::is_phantom_access(decorators); 1026 bool is_native = ShenandoahBarrierSet::is_native_access(decorators); 1027 1028 Register obj = stub->obj()->as_register(); 1029 Register res = stub->result()->as_register(); 1030 Register addr = stub->addr()->as_pointer_register(); 1031 Register tmp1 = stub->tmp1()->as_register(); 1032 Register tmp2 = stub->tmp2()->as_register(); 1033 assert_different_registers(obj, res, addr, tmp1, tmp2); 1034 1035 Label slow_path; 1036 1037 assert(res == rax, "result must arrive in rax"); 1038 1039 if (res != obj) { 1040 __ mov(res, obj); 1041 } 1042 1043 if (is_strong) { 1044 // Check for object being in the collection set. 1045 __ mov(tmp1, res); 1046 __ shrptr(tmp1, ShenandoahHeapRegion::region_size_bytes_shift_jint()); 1047 __ movptr(tmp2, (intptr_t) ShenandoahHeap::in_cset_fast_test_addr()); 1048 #ifdef _LP64 1049 __ movbool(tmp2, Address(tmp2, tmp1, Address::times_1)); 1050 __ testbool(tmp2); 1051 #else 1052 // On x86_32, C1 register allocator can give us the register without 8-bit support. 1053 // Do the full-register access and test to avoid compilation failures. 1054 __ movptr(tmp2, Address(tmp2, tmp1, Address::times_1)); 1055 __ testptr(tmp2, 0xFF); 1056 #endif 1057 __ jcc(Assembler::zero, *stub->continuation()); 1058 } 1059 1060 __ bind(slow_path); 1061 ce->store_parameter(res, 0); 1062 ce->store_parameter(addr, 1); 1063 if (is_strong) { 1064 if (is_native) { 1065 __ call(RuntimeAddress(bs->load_reference_barrier_strong_native_rt_code_blob()->code_begin())); 1066 } else { 1067 __ call(RuntimeAddress(bs->load_reference_barrier_strong_rt_code_blob()->code_begin())); 1068 } 1069 } else if (is_weak) { 1070 __ call(RuntimeAddress(bs->load_reference_barrier_weak_rt_code_blob()->code_begin())); 1071 } else { 1072 assert(is_phantom, "only remaining strength"); 1073 __ call(RuntimeAddress(bs->load_reference_barrier_phantom_rt_code_blob()->code_begin())); 1074 } 1075 __ jmp(*stub->continuation()); 1076 } 1077 1078 #undef __ 1079 1080 #define __ sasm-> 1081 1082 void ShenandoahBarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm) { 1083 __ prologue("shenandoah_pre_barrier", false); 1084 // arg0 : previous value of memory 1085 1086 __ push(rax); 1087 __ push(rdx); 1088 1089 const Register pre_val = rax; 1090 const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread); 1091 const Register tmp = rdx; 1092 1093 NOT_LP64(__ get_thread(thread);) 1094 1095 Address queue_index(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset())); 1096 Address buffer(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset())); 1097 1098 Label done; 1099 Label runtime; 1100 1101 // Is SATB still active? 1102 Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset())); 1103 __ testb(gc_state, ShenandoahHeap::MARKING); 1104 __ jcc(Assembler::zero, done); 1105 1106 // Can we store original value in the thread's buffer? 1107 1108 __ movptr(tmp, queue_index); 1109 __ testptr(tmp, tmp); 1110 __ jcc(Assembler::zero, runtime); 1111 __ subptr(tmp, wordSize); 1112 __ movptr(queue_index, tmp); 1113 __ addptr(tmp, buffer); 1114 1115 // prev_val (rax) 1116 __ load_parameter(0, pre_val); 1117 __ movptr(Address(tmp, 0), pre_val); 1118 __ jmp(done); 1119 1120 __ bind(runtime); 1121 1122 __ save_live_registers_no_oop_map(true); 1123 1124 // load the pre-value 1125 __ load_parameter(0, rcx); 1126 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), rcx, thread); 1127 1128 __ restore_live_registers(true); 1129 1130 __ bind(done); 1131 1132 __ pop(rdx); 1133 __ pop(rax); 1134 1135 __ epilogue(); 1136 } 1137 1138 void ShenandoahBarrierSetAssembler::generate_c1_load_reference_barrier_runtime_stub(StubAssembler* sasm, DecoratorSet decorators) { 1139 __ prologue("shenandoah_load_reference_barrier", false); 1140 // arg0 : object to be resolved 1141 1142 __ save_live_registers_no_oop_map(true); 1143 1144 bool is_strong = ShenandoahBarrierSet::is_strong_access(decorators); 1145 bool is_weak = ShenandoahBarrierSet::is_weak_access(decorators); 1146 bool is_phantom = ShenandoahBarrierSet::is_phantom_access(decorators); 1147 bool is_native = ShenandoahBarrierSet::is_native_access(decorators); 1148 1149 #ifdef _LP64 1150 __ load_parameter(0, c_rarg0); 1151 __ load_parameter(1, c_rarg1); 1152 if (is_strong) { 1153 if (is_native) { 1154 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong), c_rarg0, c_rarg1); 1155 } else { 1156 if (UseCompressedOops) { 1157 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong_narrow), c_rarg0, c_rarg1); 1158 } else { 1159 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong), c_rarg0, c_rarg1); 1160 } 1161 } 1162 } else if (is_weak) { 1163 assert(!is_native, "weak must not be called off-heap"); 1164 if (UseCompressedOops) { 1165 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak_narrow), c_rarg0, c_rarg1); 1166 } else { 1167 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak), c_rarg0, c_rarg1); 1168 } 1169 } else { 1170 assert(is_phantom, "only remaining strength"); 1171 assert(is_native, "phantom must only be called off-heap"); 1172 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom), c_rarg0, c_rarg1); 1173 } 1174 #else 1175 __ load_parameter(0, rax); 1176 __ load_parameter(1, rbx); 1177 if (is_strong) { 1178 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong), rax, rbx); 1179 } else if (is_weak) { 1180 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak), rax, rbx); 1181 } else { 1182 assert(is_phantom, "only remaining strength"); 1183 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom), rax, rbx); 1184 } 1185 #endif 1186 1187 __ restore_live_registers_except_rax(true); 1188 1189 __ epilogue(); 1190 } 1191 1192 #undef __ 1193 1194 #endif // COMPILER1