1 /* 2 * Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 */ 23 24 #include "precompiled.hpp" 25 #include "asm/macroAssembler.inline.hpp" 26 #include "code/codeBlob.hpp" 27 #include "code/vmreg.inline.hpp" 28 #include "gc/z/zBarrier.inline.hpp" 29 #include "gc/z/zBarrierSet.hpp" 30 #include "gc/z/zBarrierSetAssembler.hpp" 31 #include "gc/z/zBarrierSetRuntime.hpp" 32 #include "memory/resourceArea.hpp" 33 #include "runtime/sharedRuntime.hpp" 34 #include "utilities/macros.hpp" 35 #ifdef COMPILER1 36 #include "c1/c1_LIRAssembler.hpp" 37 #include "c1/c1_MacroAssembler.hpp" 38 #include "gc/z/c1/zBarrierSetC1.hpp" 39 #endif // COMPILER1 40 #ifdef COMPILER2 41 #include "gc/z/c2/zBarrierSetC2.hpp" 42 #endif // COMPILER2 43 44 #ifdef PRODUCT 45 #define BLOCK_COMMENT(str) /* nothing */ 46 #else 47 #define BLOCK_COMMENT(str) __ block_comment(str) 48 #endif 49 50 #undef __ 51 #define __ masm-> 52 53 static void call_vm(MacroAssembler* masm, 54 address entry_point, 55 Register arg0, 56 Register arg1) { 57 // Setup arguments 58 if (arg1 == c_rarg0) { 59 if (arg0 == c_rarg1) { 60 __ xchgptr(c_rarg1, c_rarg0); 61 } else { 62 __ movptr(c_rarg1, arg1); 63 __ movptr(c_rarg0, arg0); 64 } 65 } else { 66 if (arg0 != c_rarg0) { 67 __ movptr(c_rarg0, arg0); 68 } 69 if (arg1 != c_rarg1) { 70 __ movptr(c_rarg1, arg1); 71 } 72 } 73 74 // Call VM 75 __ MacroAssembler::call_VM_leaf_base(entry_point, 2); 76 } 77 78 void ZBarrierSetAssembler::load_at(MacroAssembler* masm, 79 DecoratorSet decorators, 80 BasicType type, 81 Register dst, 82 Address src, 83 Register tmp1, 84 Register tmp_thread) { 85 if (!ZBarrierSet::barrier_needed(decorators, type)) { 86 // Barrier not needed 87 BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread); 88 return; 89 } 90 91 BLOCK_COMMENT("ZBarrierSetAssembler::load_at {"); 92 93 // Allocate scratch register 94 Register scratch = tmp1; 95 if (tmp1 == noreg) { 96 scratch = r12; 97 __ push(scratch); 98 } 99 100 assert_different_registers(dst, scratch); 101 102 Label done; 103 104 // 105 // Fast Path 106 // 107 108 // Load address 109 __ lea(scratch, src); 110 111 // Load oop at address 112 __ movptr(dst, Address(scratch, 0)); 113 114 // Test address bad mask 115 __ testptr(dst, address_bad_mask_from_thread(r15_thread)); 116 __ jcc(Assembler::zero, done); 117 118 // 119 // Slow path 120 // 121 122 // Save registers 123 __ push(rax); 124 __ push(rcx); 125 __ push(rdx); 126 __ push(rdi); 127 __ push(rsi); 128 __ push(r8); 129 __ push(r9); 130 __ push(r10); 131 __ push(r11); 132 133 // We may end up here from generate_native_wrapper, then the method may have 134 // floats as arguments, and we must spill them before calling the VM runtime 135 // leaf. From the interpreter all floats are passed on the stack. 136 assert(Argument::n_float_register_parameters_j == 8, "Assumption"); 137 const int xmm_size = wordSize * 2; 138 const int xmm_spill_size = xmm_size * Argument::n_float_register_parameters_j; 139 __ subptr(rsp, xmm_spill_size); 140 __ movdqu(Address(rsp, xmm_size * 7), xmm7); 141 __ movdqu(Address(rsp, xmm_size * 6), xmm6); 142 __ movdqu(Address(rsp, xmm_size * 5), xmm5); 143 __ movdqu(Address(rsp, xmm_size * 4), xmm4); 144 __ movdqu(Address(rsp, xmm_size * 3), xmm3); 145 __ movdqu(Address(rsp, xmm_size * 2), xmm2); 146 __ movdqu(Address(rsp, xmm_size * 1), xmm1); 147 __ movdqu(Address(rsp, xmm_size * 0), xmm0); 148 149 // Call VM 150 call_vm(masm, ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators), dst, scratch); 151 152 __ movdqu(xmm0, Address(rsp, xmm_size * 0)); 153 __ movdqu(xmm1, Address(rsp, xmm_size * 1)); 154 __ movdqu(xmm2, Address(rsp, xmm_size * 2)); 155 __ movdqu(xmm3, Address(rsp, xmm_size * 3)); 156 __ movdqu(xmm4, Address(rsp, xmm_size * 4)); 157 __ movdqu(xmm5, Address(rsp, xmm_size * 5)); 158 __ movdqu(xmm6, Address(rsp, xmm_size * 6)); 159 __ movdqu(xmm7, Address(rsp, xmm_size * 7)); 160 __ addptr(rsp, xmm_spill_size); 161 162 __ pop(r11); 163 __ pop(r10); 164 __ pop(r9); 165 __ pop(r8); 166 __ pop(rsi); 167 __ pop(rdi); 168 __ pop(rdx); 169 __ pop(rcx); 170 171 if (dst == rax) { 172 __ addptr(rsp, wordSize); 173 } else { 174 __ movptr(dst, rax); 175 __ pop(rax); 176 } 177 178 __ bind(done); 179 180 // Restore scratch register 181 if (tmp1 == noreg) { 182 __ pop(scratch); 183 } 184 185 BLOCK_COMMENT("} ZBarrierSetAssembler::load_at"); 186 } 187 188 #ifdef ASSERT 189 190 void ZBarrierSetAssembler::store_at(MacroAssembler* masm, 191 DecoratorSet decorators, 192 BasicType type, 193 Address dst, 194 Register src, 195 Register tmp1, 196 Register tmp2, 197 Register tmp3) { 198 BLOCK_COMMENT("ZBarrierSetAssembler::store_at {"); 199 200 // Verify oop store 201 if (is_reference_type(type)) { 202 // Note that src could be noreg, which means we 203 // are storing null and can skip verification. 204 if (src != noreg) { 205 Label done; 206 __ testptr(src, address_bad_mask_from_thread(r15_thread)); 207 __ jcc(Assembler::zero, done); 208 __ stop("Verify oop store failed"); 209 __ should_not_reach_here(); 210 __ bind(done); 211 } 212 } 213 214 // Store value 215 BarrierSetAssembler::store_at(masm, decorators, type, dst, src, tmp1, tmp2, tmp3); 216 217 BLOCK_COMMENT("} ZBarrierSetAssembler::store_at"); 218 } 219 220 #endif // ASSERT 221 222 void ZBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, 223 DecoratorSet decorators, 224 BasicType type, 225 Register src, 226 Register dst, 227 Register count) { 228 if (!ZBarrierSet::barrier_needed(decorators, type)) { 229 // Barrier not needed 230 return; 231 } 232 233 BLOCK_COMMENT("ZBarrierSetAssembler::arraycopy_prologue {"); 234 235 // Save registers 236 __ pusha(); 237 238 // Call VM 239 call_vm(masm, ZBarrierSetRuntime::load_barrier_on_oop_array_addr(), src, count); 240 241 // Restore registers 242 __ popa(); 243 244 BLOCK_COMMENT("} ZBarrierSetAssembler::arraycopy_prologue"); 245 } 246 247 void ZBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, 248 Register jni_env, 249 Register obj, 250 Register tmp, 251 Label& slowpath) { 252 BLOCK_COMMENT("ZBarrierSetAssembler::try_resolve_jobject_in_native {"); 253 254 // Resolve jobject 255 BarrierSetAssembler::try_resolve_jobject_in_native(masm, jni_env, obj, tmp, slowpath); 256 257 // Test address bad mask 258 __ testptr(obj, address_bad_mask_from_jni_env(jni_env)); 259 __ jcc(Assembler::notZero, slowpath); 260 261 BLOCK_COMMENT("} ZBarrierSetAssembler::try_resolve_jobject_in_native"); 262 } 263 264 #ifdef COMPILER1 265 266 #undef __ 267 #define __ ce->masm()-> 268 269 void ZBarrierSetAssembler::generate_c1_load_barrier_test(LIR_Assembler* ce, 270 LIR_Opr ref) const { 271 __ testptr(ref->as_register(), address_bad_mask_from_thread(r15_thread)); 272 } 273 274 void ZBarrierSetAssembler::generate_c1_load_barrier_stub(LIR_Assembler* ce, 275 ZLoadBarrierStubC1* stub) const { 276 // Stub entry 277 __ bind(*stub->entry()); 278 279 Register ref = stub->ref()->as_register(); 280 Register ref_addr = noreg; 281 Register tmp = noreg; 282 283 if (stub->tmp()->is_valid()) { 284 // Load address into tmp register 285 ce->leal(stub->ref_addr(), stub->tmp()); 286 ref_addr = tmp = stub->tmp()->as_pointer_register(); 287 } else { 288 // Address already in register 289 ref_addr = stub->ref_addr()->as_address_ptr()->base()->as_pointer_register(); 290 } 291 292 assert_different_registers(ref, ref_addr, noreg); 293 294 // Save rax unless it is the result or tmp register 295 if (ref != rax && tmp != rax) { 296 __ push(rax); 297 } 298 299 // Setup arguments and call runtime stub 300 __ subptr(rsp, 2 * BytesPerWord); 301 ce->store_parameter(ref_addr, 1); 302 ce->store_parameter(ref, 0); 303 __ call(RuntimeAddress(stub->runtime_stub())); 304 __ addptr(rsp, 2 * BytesPerWord); 305 306 // Verify result 307 __ verify_oop(rax); 308 309 // Move result into place 310 if (ref != rax) { 311 __ movptr(ref, rax); 312 } 313 314 // Restore rax unless it is the result or tmp register 315 if (ref != rax && tmp != rax) { 316 __ pop(rax); 317 } 318 319 // Stub exit 320 __ jmp(*stub->continuation()); 321 } 322 323 #undef __ 324 #define __ sasm-> 325 326 void ZBarrierSetAssembler::generate_c1_load_barrier_runtime_stub(StubAssembler* sasm, 327 DecoratorSet decorators) const { 328 // Enter and save registers 329 __ enter(); 330 __ save_live_registers_no_oop_map(true /* save_fpu_registers */); 331 332 // Setup arguments 333 __ load_parameter(1, c_rarg1); 334 __ load_parameter(0, c_rarg0); 335 336 // Call VM 337 __ call_VM_leaf(ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators), c_rarg0, c_rarg1); 338 339 // Restore registers and return 340 __ restore_live_registers_except_rax(true /* restore_fpu_registers */); 341 __ leave(); 342 __ ret(0); 343 } 344 345 #endif // COMPILER1 346 347 #ifdef COMPILER2 348 349 OptoReg::Name ZBarrierSetAssembler::refine_register(const Node* node, OptoReg::Name opto_reg) { 350 if (!OptoReg::is_reg(opto_reg)) { 351 return OptoReg::Bad; 352 } 353 354 const VMReg vm_reg = OptoReg::as_VMReg(opto_reg); 355 if (vm_reg->is_XMMRegister()) { 356 opto_reg &= ~15; 357 switch (node->ideal_reg()) { 358 case Op_VecX: 359 opto_reg |= 2; 360 break; 361 case Op_VecY: 362 opto_reg |= 4; 363 break; 364 case Op_VecZ: 365 opto_reg |= 8; 366 break; 367 default: 368 opto_reg |= 1; 369 break; 370 } 371 } 372 373 return opto_reg; 374 } 375 376 // We use the vec_spill_helper from the x86.ad file to avoid reinventing this wheel 377 extern void vec_spill_helper(CodeBuffer *cbuf, bool is_load, 378 int stack_offset, int reg, uint ireg, outputStream* st); 379 380 #undef __ 381 #define __ _masm-> 382 383 class ZSaveLiveRegisters { 384 private: 385 struct XMMRegisterData { 386 XMMRegister _reg; 387 int _size; 388 389 // Used by GrowableArray::find() 390 bool operator == (const XMMRegisterData& other) { 391 return _reg == other._reg; 392 } 393 }; 394 395 MacroAssembler* const _masm; 396 GrowableArray<Register> _gp_registers; 397 GrowableArray<KRegister> _opmask_registers; 398 GrowableArray<XMMRegisterData> _xmm_registers; 399 int _spill_size; 400 int _spill_offset; 401 402 static int xmm_compare_register_size(XMMRegisterData* left, XMMRegisterData* right) { 403 if (left->_size == right->_size) { 404 return 0; 405 } 406 407 return (left->_size < right->_size) ? -1 : 1; 408 } 409 410 static int xmm_slot_size(OptoReg::Name opto_reg) { 411 // The low order 4 bytes denote what size of the XMM register is live 412 return (opto_reg & 15) << 3; 413 } 414 415 static uint xmm_ideal_reg_for_size(int reg_size) { 416 switch (reg_size) { 417 case 8: 418 return Op_VecD; 419 case 16: 420 return Op_VecX; 421 case 32: 422 return Op_VecY; 423 case 64: 424 return Op_VecZ; 425 default: 426 fatal("Invalid register size %d", reg_size); 427 return 0; 428 } 429 } 430 431 bool xmm_needs_vzeroupper() const { 432 return _xmm_registers.is_nonempty() && _xmm_registers.at(0)._size > 16; 433 } 434 435 void xmm_register_save(const XMMRegisterData& reg_data) { 436 const OptoReg::Name opto_reg = OptoReg::as_OptoReg(reg_data._reg->as_VMReg()); 437 const uint ideal_reg = xmm_ideal_reg_for_size(reg_data._size); 438 _spill_offset -= reg_data._size; 439 vec_spill_helper(__ code(), false /* is_load */, _spill_offset, opto_reg, ideal_reg, tty); 440 } 441 442 void xmm_register_restore(const XMMRegisterData& reg_data) { 443 const OptoReg::Name opto_reg = OptoReg::as_OptoReg(reg_data._reg->as_VMReg()); 444 const uint ideal_reg = xmm_ideal_reg_for_size(reg_data._size); 445 vec_spill_helper(__ code(), true /* is_load */, _spill_offset, opto_reg, ideal_reg, tty); 446 _spill_offset += reg_data._size; 447 } 448 449 void gp_register_save(Register reg) { 450 _spill_offset -= 8; 451 __ movq(Address(rsp, _spill_offset), reg); 452 } 453 454 void opmask_register_save(KRegister reg) { 455 _spill_offset -= 8; 456 __ kmov(Address(rsp, _spill_offset), reg); 457 } 458 459 void gp_register_restore(Register reg) { 460 __ movq(reg, Address(rsp, _spill_offset)); 461 _spill_offset += 8; 462 } 463 464 void opmask_register_restore(KRegister reg) { 465 __ kmov(reg, Address(rsp, _spill_offset)); 466 _spill_offset += 8; 467 } 468 469 void initialize(ZLoadBarrierStubC2* stub) { 470 // Create mask of caller saved registers that need to 471 // be saved/restored if live 472 RegMask caller_saved; 473 caller_saved.Insert(OptoReg::as_OptoReg(rax->as_VMReg())); 474 caller_saved.Insert(OptoReg::as_OptoReg(rcx->as_VMReg())); 475 caller_saved.Insert(OptoReg::as_OptoReg(rdx->as_VMReg())); 476 caller_saved.Insert(OptoReg::as_OptoReg(rsi->as_VMReg())); 477 caller_saved.Insert(OptoReg::as_OptoReg(rdi->as_VMReg())); 478 caller_saved.Insert(OptoReg::as_OptoReg(r8->as_VMReg())); 479 caller_saved.Insert(OptoReg::as_OptoReg(r9->as_VMReg())); 480 caller_saved.Insert(OptoReg::as_OptoReg(r10->as_VMReg())); 481 caller_saved.Insert(OptoReg::as_OptoReg(r11->as_VMReg())); 482 caller_saved.Remove(OptoReg::as_OptoReg(stub->ref()->as_VMReg())); 483 484 // Create mask of live registers 485 RegMask live = stub->live(); 486 if (stub->tmp() != noreg) { 487 live.Insert(OptoReg::as_OptoReg(stub->tmp()->as_VMReg())); 488 } 489 490 int gp_spill_size = 0; 491 int opmask_spill_size = 0; 492 int xmm_spill_size = 0; 493 494 // Record registers that needs to be saved/restored 495 RegMaskIterator rmi(live); 496 while (rmi.has_next()) { 497 const OptoReg::Name opto_reg = rmi.next(); 498 const VMReg vm_reg = OptoReg::as_VMReg(opto_reg); 499 500 if (vm_reg->is_Register()) { 501 if (caller_saved.Member(opto_reg)) { 502 _gp_registers.append(vm_reg->as_Register()); 503 gp_spill_size += 8; 504 } 505 } else if (vm_reg->is_KRegister()) { 506 // All opmask registers are caller saved, thus spill the ones 507 // which are live. 508 if (_opmask_registers.find(vm_reg->as_KRegister()) == -1) { 509 _opmask_registers.append(vm_reg->as_KRegister()); 510 opmask_spill_size += 8; 511 } 512 } else if (vm_reg->is_XMMRegister()) { 513 // We encode in the low order 4 bits of the opto_reg, how large part of the register is live 514 const VMReg vm_reg_base = OptoReg::as_VMReg(opto_reg & ~15); 515 const int reg_size = xmm_slot_size(opto_reg); 516 const XMMRegisterData reg_data = { vm_reg_base->as_XMMRegister(), reg_size }; 517 const int reg_index = _xmm_registers.find(reg_data); 518 if (reg_index == -1) { 519 // Not previously appended 520 _xmm_registers.append(reg_data); 521 xmm_spill_size += reg_size; 522 } else { 523 // Previously appended, update size 524 const int reg_size_prev = _xmm_registers.at(reg_index)._size; 525 if (reg_size > reg_size_prev) { 526 _xmm_registers.at_put(reg_index, reg_data); 527 xmm_spill_size += reg_size - reg_size_prev; 528 } 529 } 530 } else { 531 fatal("Unexpected register type"); 532 } 533 } 534 535 // Sort by size, largest first 536 _xmm_registers.sort(xmm_compare_register_size); 537 538 // On Windows, the caller reserves stack space for spilling register arguments 539 const int arg_spill_size = frame::arg_reg_save_area_bytes; 540 541 // Stack pointer must be 16 bytes aligned for the call 542 _spill_offset = _spill_size = align_up(xmm_spill_size + gp_spill_size + opmask_spill_size + arg_spill_size, 16); 543 } 544 545 public: 546 ZSaveLiveRegisters(MacroAssembler* masm, ZLoadBarrierStubC2* stub) : 547 _masm(masm), 548 _gp_registers(), 549 _opmask_registers(), 550 _xmm_registers(), 551 _spill_size(0), 552 _spill_offset(0) { 553 554 // 555 // Stack layout after registers have been spilled: 556 // 557 // | ... | original rsp, 16 bytes aligned 558 // ------------------ 559 // | zmm0 high | 560 // | ... | 561 // | zmm0 low | 16 bytes aligned 562 // | ... | 563 // | ymm1 high | 564 // | ... | 565 // | ymm1 low | 16 bytes aligned 566 // | ... | 567 // | xmmN high | 568 // | ... | 569 // | xmmN low | 8 bytes aligned 570 // | reg0 | 8 bytes aligned 571 // | reg1 | 572 // | ... | 573 // | regN | new rsp, if 16 bytes aligned 574 // | <padding> | else new rsp, 16 bytes aligned 575 // ------------------ 576 // 577 578 // Figure out what registers to save/restore 579 initialize(stub); 580 581 // Allocate stack space 582 if (_spill_size > 0) { 583 __ subptr(rsp, _spill_size); 584 } 585 586 // Save XMM/YMM/ZMM registers 587 for (int i = 0; i < _xmm_registers.length(); i++) { 588 xmm_register_save(_xmm_registers.at(i)); 589 } 590 591 if (xmm_needs_vzeroupper()) { 592 __ vzeroupper(); 593 } 594 595 // Save general purpose registers 596 for (int i = 0; i < _gp_registers.length(); i++) { 597 gp_register_save(_gp_registers.at(i)); 598 } 599 600 // Save opmask registers 601 for (int i = 0; i < _opmask_registers.length(); i++) { 602 opmask_register_save(_opmask_registers.at(i)); 603 } 604 } 605 606 ~ZSaveLiveRegisters() { 607 // Restore opmask registers 608 for (int i = _opmask_registers.length() - 1; i >= 0; i--) { 609 opmask_register_restore(_opmask_registers.at(i)); 610 } 611 612 // Restore general purpose registers 613 for (int i = _gp_registers.length() - 1; i >= 0; i--) { 614 gp_register_restore(_gp_registers.at(i)); 615 } 616 617 __ vzeroupper(); 618 619 // Restore XMM/YMM/ZMM registers 620 for (int i = _xmm_registers.length() - 1; i >= 0; i--) { 621 xmm_register_restore(_xmm_registers.at(i)); 622 } 623 624 // Free stack space 625 if (_spill_size > 0) { 626 __ addptr(rsp, _spill_size); 627 } 628 } 629 }; 630 631 class ZSetupArguments { 632 private: 633 MacroAssembler* const _masm; 634 const Register _ref; 635 const Address _ref_addr; 636 637 public: 638 ZSetupArguments(MacroAssembler* masm, ZLoadBarrierStubC2* stub) : 639 _masm(masm), 640 _ref(stub->ref()), 641 _ref_addr(stub->ref_addr()) { 642 643 // Setup arguments 644 if (_ref_addr.base() == noreg) { 645 // No self healing 646 if (_ref != c_rarg0) { 647 __ movq(c_rarg0, _ref); 648 } 649 __ xorq(c_rarg1, c_rarg1); 650 } else { 651 // Self healing 652 if (_ref == c_rarg0) { 653 __ lea(c_rarg1, _ref_addr); 654 } else if (_ref != c_rarg1) { 655 __ lea(c_rarg1, _ref_addr); 656 __ movq(c_rarg0, _ref); 657 } else if (_ref_addr.base() != c_rarg0 && _ref_addr.index() != c_rarg0) { 658 __ movq(c_rarg0, _ref); 659 __ lea(c_rarg1, _ref_addr); 660 } else { 661 __ xchgq(c_rarg0, c_rarg1); 662 if (_ref_addr.base() == c_rarg0) { 663 __ lea(c_rarg1, Address(c_rarg1, _ref_addr.index(), _ref_addr.scale(), _ref_addr.disp())); 664 } else if (_ref_addr.index() == c_rarg0) { 665 __ lea(c_rarg1, Address(_ref_addr.base(), c_rarg1, _ref_addr.scale(), _ref_addr.disp())); 666 } else { 667 ShouldNotReachHere(); 668 } 669 } 670 } 671 } 672 673 ~ZSetupArguments() { 674 // Transfer result 675 if (_ref != rax) { 676 __ movq(_ref, rax); 677 } 678 } 679 }; 680 681 #undef __ 682 #define __ masm-> 683 684 void ZBarrierSetAssembler::generate_c2_load_barrier_stub(MacroAssembler* masm, ZLoadBarrierStubC2* stub) const { 685 BLOCK_COMMENT("ZLoadBarrierStubC2"); 686 687 // Stub entry 688 __ bind(*stub->entry()); 689 690 { 691 ZSaveLiveRegisters save_live_registers(masm, stub); 692 ZSetupArguments setup_arguments(masm, stub); 693 __ call(RuntimeAddress(stub->slow_path())); 694 } 695 696 // Stub exit 697 __ jmp(*stub->continuation()); 698 } 699 700 #undef __ 701 702 #endif // COMPILER2