1 /*
  2  * Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  */
 23 
 24 #include "precompiled.hpp"
 25 #include "asm/macroAssembler.inline.hpp"
 26 #include "code/codeBlob.hpp"
 27 #include "code/vmreg.inline.hpp"
 28 #include "gc/z/zBarrier.inline.hpp"
 29 #include "gc/z/zBarrierSet.hpp"
 30 #include "gc/z/zBarrierSetAssembler.hpp"
 31 #include "gc/z/zBarrierSetRuntime.hpp"
 32 #include "memory/resourceArea.hpp"
 33 #include "runtime/sharedRuntime.hpp"
 34 #include "utilities/macros.hpp"
 35 #ifdef COMPILER1
 36 #include "c1/c1_LIRAssembler.hpp"
 37 #include "c1/c1_MacroAssembler.hpp"
 38 #include "gc/z/c1/zBarrierSetC1.hpp"
 39 #endif // COMPILER1
 40 #ifdef COMPILER2
 41 #include "gc/z/c2/zBarrierSetC2.hpp"
 42 #endif // COMPILER2
 43 
 44 #ifdef PRODUCT
 45 #define BLOCK_COMMENT(str) /* nothing */
 46 #else
 47 #define BLOCK_COMMENT(str) __ block_comment(str)
 48 #endif
 49 
 50 #undef __
 51 #define __ masm->
 52 
 53 static void call_vm(MacroAssembler* masm,
 54                     address entry_point,
 55                     Register arg0,
 56                     Register arg1) {
 57   // Setup arguments
 58   if (arg1 == c_rarg0) {
 59     if (arg0 == c_rarg1) {
 60       __ xchgptr(c_rarg1, c_rarg0);
 61     } else {
 62       __ movptr(c_rarg1, arg1);
 63       __ movptr(c_rarg0, arg0);
 64     }
 65   } else {
 66     if (arg0 != c_rarg0) {
 67       __ movptr(c_rarg0, arg0);
 68     }
 69     if (arg1 != c_rarg1) {
 70       __ movptr(c_rarg1, arg1);
 71     }
 72   }
 73 
 74   // Call VM
 75   __ MacroAssembler::call_VM_leaf_base(entry_point, 2);
 76 }
 77 
 78 void ZBarrierSetAssembler::load_at(MacroAssembler* masm,
 79                                    DecoratorSet decorators,
 80                                    BasicType type,
 81                                    Register dst,
 82                                    Address src,
 83                                    Register tmp1,
 84                                    Register tmp_thread) {
 85   if (!ZBarrierSet::barrier_needed(decorators, type)) {
 86     // Barrier not needed
 87     BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread);
 88     return;
 89   }
 90 
 91   BLOCK_COMMENT("ZBarrierSetAssembler::load_at {");
 92 
 93   // Allocate scratch register
 94   Register scratch = tmp1;
 95   if (tmp1 == noreg) {
 96     scratch = r12;
 97     __ push(scratch);
 98   }
 99 
100   assert_different_registers(dst, scratch);
101 
102   Label done;
103 
104   //
105   // Fast Path
106   //
107 
108   // Load address
109   __ lea(scratch, src);
110 
111   // Load oop at address
112   __ movptr(dst, Address(scratch, 0));
113 
114   // Test address bad mask
115   __ testptr(dst, address_bad_mask_from_thread(r15_thread));
116   __ jcc(Assembler::zero, done);
117 
118   //
119   // Slow path
120   //
121 
122   // Save registers
123   __ push(rax);
124   __ push(rcx);
125   __ push(rdx);
126   __ push(rdi);
127   __ push(rsi);
128   __ push(r8);
129   __ push(r9);
130   __ push(r10);
131   __ push(r11);
132 
133   // We may end up here from generate_native_wrapper, then the method may have
134   // floats as arguments, and we must spill them before calling the VM runtime
135   // leaf. From the interpreter all floats are passed on the stack.
136   assert(Argument::n_float_register_parameters_j == 8, "Assumption");
137   const int xmm_size = wordSize * 2;
138   const int xmm_spill_size = xmm_size * Argument::n_float_register_parameters_j;
139   __ subptr(rsp, xmm_spill_size);
140   __ movdqu(Address(rsp, xmm_size * 7), xmm7);
141   __ movdqu(Address(rsp, xmm_size * 6), xmm6);
142   __ movdqu(Address(rsp, xmm_size * 5), xmm5);
143   __ movdqu(Address(rsp, xmm_size * 4), xmm4);
144   __ movdqu(Address(rsp, xmm_size * 3), xmm3);
145   __ movdqu(Address(rsp, xmm_size * 2), xmm2);
146   __ movdqu(Address(rsp, xmm_size * 1), xmm1);
147   __ movdqu(Address(rsp, xmm_size * 0), xmm0);
148 
149   // Call VM
150   call_vm(masm, ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators), dst, scratch);
151 
152   __ movdqu(xmm0, Address(rsp, xmm_size * 0));
153   __ movdqu(xmm1, Address(rsp, xmm_size * 1));
154   __ movdqu(xmm2, Address(rsp, xmm_size * 2));
155   __ movdqu(xmm3, Address(rsp, xmm_size * 3));
156   __ movdqu(xmm4, Address(rsp, xmm_size * 4));
157   __ movdqu(xmm5, Address(rsp, xmm_size * 5));
158   __ movdqu(xmm6, Address(rsp, xmm_size * 6));
159   __ movdqu(xmm7, Address(rsp, xmm_size * 7));
160   __ addptr(rsp, xmm_spill_size);
161 
162   __ pop(r11);
163   __ pop(r10);
164   __ pop(r9);
165   __ pop(r8);
166   __ pop(rsi);
167   __ pop(rdi);
168   __ pop(rdx);
169   __ pop(rcx);
170 
171   if (dst == rax) {
172     __ addptr(rsp, wordSize);
173   } else {
174     __ movptr(dst, rax);
175     __ pop(rax);
176   }
177 
178   __ bind(done);
179 
180   // Restore scratch register
181   if (tmp1 == noreg) {
182     __ pop(scratch);
183   }
184 
185   BLOCK_COMMENT("} ZBarrierSetAssembler::load_at");
186 }
187 
188 #ifdef ASSERT
189 
190 void ZBarrierSetAssembler::store_at(MacroAssembler* masm,
191                                     DecoratorSet decorators,
192                                     BasicType type,
193                                     Address dst,
194                                     Register src,
195                                     Register tmp1,
196                                     Register tmp2,
197                                     Register tmp3) {
198   BLOCK_COMMENT("ZBarrierSetAssembler::store_at {");
199 
200   assert(type != T_INLINE_TYPE, "Not supported yet");
201   // Verify oop store
202   if (is_reference_type(type)) {
203     // Note that src could be noreg, which means we
204     // are storing null and can skip verification.
205     if (src != noreg) {
206       Label done;
207       __ testptr(src, address_bad_mask_from_thread(r15_thread));
208       __ jcc(Assembler::zero, done);
209       __ stop("Verify oop store failed");
210       __ should_not_reach_here();
211       __ bind(done);
212     }
213   }
214 
215   // Store value
216   BarrierSetAssembler::store_at(masm, decorators, type, dst, src, tmp1, tmp2, tmp3);
217 
218   BLOCK_COMMENT("} ZBarrierSetAssembler::store_at");
219 }
220 
221 #endif // ASSERT
222 
223 void ZBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm,
224                                               DecoratorSet decorators,
225                                               BasicType type,
226                                               Register src,
227                                               Register dst,
228                                               Register count) {
229   if (!ZBarrierSet::barrier_needed(decorators, type)) {
230     // Barrier not needed
231     return;
232   }
233 
234   BLOCK_COMMENT("ZBarrierSetAssembler::arraycopy_prologue {");
235 
236   // Save registers
237   __ pusha();
238 
239   // Call VM
240   call_vm(masm, ZBarrierSetRuntime::load_barrier_on_oop_array_addr(), src, count);
241 
242   // Restore registers
243   __ popa();
244 
245   BLOCK_COMMENT("} ZBarrierSetAssembler::arraycopy_prologue");
246 }
247 
248 void ZBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm,
249                                                          Register jni_env,
250                                                          Register obj,
251                                                          Register tmp,
252                                                          Label& slowpath) {
253   BLOCK_COMMENT("ZBarrierSetAssembler::try_resolve_jobject_in_native {");
254 
255   // Resolve jobject
256   BarrierSetAssembler::try_resolve_jobject_in_native(masm, jni_env, obj, tmp, slowpath);
257 
258   // Test address bad mask
259   __ testptr(obj, address_bad_mask_from_jni_env(jni_env));
260   __ jcc(Assembler::notZero, slowpath);
261 
262   BLOCK_COMMENT("} ZBarrierSetAssembler::try_resolve_jobject_in_native");
263 }
264 
265 #ifdef COMPILER1
266 
267 #undef __
268 #define __ ce->masm()->
269 
270 void ZBarrierSetAssembler::generate_c1_load_barrier_test(LIR_Assembler* ce,
271                                                          LIR_Opr ref) const {
272   __ testptr(ref->as_register(), address_bad_mask_from_thread(r15_thread));
273 }
274 
275 void ZBarrierSetAssembler::generate_c1_load_barrier_stub(LIR_Assembler* ce,
276                                                          ZLoadBarrierStubC1* stub) const {
277   // Stub entry
278   __ bind(*stub->entry());
279 
280   Register ref = stub->ref()->as_register();
281   Register ref_addr = noreg;
282   Register tmp = noreg;
283 
284   if (stub->tmp()->is_valid()) {
285     // Load address into tmp register
286     ce->leal(stub->ref_addr(), stub->tmp());
287     ref_addr = tmp = stub->tmp()->as_pointer_register();
288   } else {
289     // Address already in register
290     ref_addr = stub->ref_addr()->as_address_ptr()->base()->as_pointer_register();
291   }
292 
293   assert_different_registers(ref, ref_addr, noreg);
294 
295   // Save rax unless it is the result or tmp register
296   if (ref != rax && tmp != rax) {
297     __ push(rax);
298   }
299 
300   // Setup arguments and call runtime stub
301   __ subptr(rsp, 2 * BytesPerWord);
302   ce->store_parameter(ref_addr, 1);
303   ce->store_parameter(ref, 0);
304   __ call(RuntimeAddress(stub->runtime_stub()));
305   __ addptr(rsp, 2 * BytesPerWord);
306 
307   // Verify result
308   __ verify_oop(rax);
309 
310   // Move result into place
311   if (ref != rax) {
312     __ movptr(ref, rax);
313   }
314 
315   // Restore rax unless it is the result or tmp register
316   if (ref != rax && tmp != rax) {
317     __ pop(rax);
318   }
319 
320   // Stub exit
321   __ jmp(*stub->continuation());
322 }
323 
324 #undef __
325 #define __ sasm->
326 
327 void ZBarrierSetAssembler::generate_c1_load_barrier_runtime_stub(StubAssembler* sasm,
328                                                                  DecoratorSet decorators) const {
329   // Enter and save registers
330   __ enter();
331   __ save_live_registers_no_oop_map(true /* save_fpu_registers */);
332 
333   // Setup arguments
334   __ load_parameter(1, c_rarg1);
335   __ load_parameter(0, c_rarg0);
336 
337   // Call VM
338   __ call_VM_leaf(ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators), c_rarg0, c_rarg1);
339 
340   // Restore registers and return
341   __ restore_live_registers_except_rax(true /* restore_fpu_registers */);
342   __ leave();
343   __ ret(0);
344 }
345 
346 #endif // COMPILER1
347 
348 #ifdef COMPILER2
349 
350 OptoReg::Name ZBarrierSetAssembler::refine_register(const Node* node, OptoReg::Name opto_reg) {
351   if (!OptoReg::is_reg(opto_reg)) {
352     return OptoReg::Bad;
353   }
354 
355   const VMReg vm_reg = OptoReg::as_VMReg(opto_reg);
356   if (vm_reg->is_XMMRegister()) {
357     opto_reg &= ~15;
358     switch (node->ideal_reg()) {
359       case Op_VecX:
360         opto_reg |= 2;
361         break;
362       case Op_VecY:
363         opto_reg |= 4;
364         break;
365       case Op_VecZ:
366         opto_reg |= 8;
367         break;
368       default:
369         opto_reg |= 1;
370         break;
371     }
372   }
373 
374   return opto_reg;
375 }
376 
377 // We use the vec_spill_helper from the x86.ad file to avoid reinventing this wheel
378 extern void vec_spill_helper(CodeBuffer *cbuf, bool is_load,
379                             int stack_offset, int reg, uint ireg, outputStream* st);
380 
381 #undef __
382 #define __ _masm->
383 
384 class ZSaveLiveRegisters {
385 private:
386   struct XMMRegisterData {
387     XMMRegister _reg;
388     int         _size;
389 
390     // Used by GrowableArray::find()
391     bool operator == (const XMMRegisterData& other) {
392       return _reg == other._reg;
393     }
394   };
395 
396   MacroAssembler* const          _masm;
397   GrowableArray<Register>        _gp_registers;
398   GrowableArray<KRegister>       _opmask_registers;
399   GrowableArray<XMMRegisterData> _xmm_registers;
400   int                            _spill_size;
401   int                            _spill_offset;
402 
403   static int xmm_compare_register_size(XMMRegisterData* left, XMMRegisterData* right) {
404     if (left->_size == right->_size) {
405       return 0;
406     }
407 
408     return (left->_size < right->_size) ? -1 : 1;
409   }
410 
411   static int xmm_slot_size(OptoReg::Name opto_reg) {
412     // The low order 4 bytes denote what size of the XMM register is live
413     return (opto_reg & 15) << 3;
414   }
415 
416   static uint xmm_ideal_reg_for_size(int reg_size) {
417     switch (reg_size) {
418     case 8:
419       return Op_VecD;
420     case 16:
421       return Op_VecX;
422     case 32:
423       return Op_VecY;
424     case 64:
425       return Op_VecZ;
426     default:
427       fatal("Invalid register size %d", reg_size);
428       return 0;
429     }
430   }
431 
432   bool xmm_needs_vzeroupper() const {
433     return _xmm_registers.is_nonempty() && _xmm_registers.at(0)._size > 16;
434   }
435 
436   void xmm_register_save(const XMMRegisterData& reg_data) {
437     const OptoReg::Name opto_reg = OptoReg::as_OptoReg(reg_data._reg->as_VMReg());
438     const uint ideal_reg = xmm_ideal_reg_for_size(reg_data._size);
439     _spill_offset -= reg_data._size;
440     vec_spill_helper(__ code(), false /* is_load */, _spill_offset, opto_reg, ideal_reg, tty);
441   }
442 
443   void xmm_register_restore(const XMMRegisterData& reg_data) {
444     const OptoReg::Name opto_reg = OptoReg::as_OptoReg(reg_data._reg->as_VMReg());
445     const uint ideal_reg = xmm_ideal_reg_for_size(reg_data._size);
446     vec_spill_helper(__ code(), true /* is_load */, _spill_offset, opto_reg, ideal_reg, tty);
447     _spill_offset += reg_data._size;
448   }
449 
450   void gp_register_save(Register reg) {
451     _spill_offset -= 8;
452     __ movq(Address(rsp, _spill_offset), reg);
453   }
454 
455   void opmask_register_save(KRegister reg) {
456     _spill_offset -= 8;
457     __ kmovql(Address(rsp, _spill_offset), reg);
458   }
459 
460   void gp_register_restore(Register reg) {
461     __ movq(reg, Address(rsp, _spill_offset));
462     _spill_offset += 8;
463   }
464 
465   void opmask_register_restore(KRegister reg) {
466     __ kmovql(reg, Address(rsp, _spill_offset));
467     _spill_offset += 8;
468   }
469 
470 // Register is a class, but it would be assigned numerical value.
471 // "0" is assigned for rax. Thus we need to ignore -Wnonnull.
472 PRAGMA_DIAG_PUSH
473 PRAGMA_NONNULL_IGNORED
474   void initialize(ZLoadBarrierStubC2* stub) {
475     // Create mask of caller saved registers that need to
476     // be saved/restored if live
477     RegMask caller_saved;
478     caller_saved.Insert(OptoReg::as_OptoReg(rax->as_VMReg()));
479     caller_saved.Insert(OptoReg::as_OptoReg(rcx->as_VMReg()));
480     caller_saved.Insert(OptoReg::as_OptoReg(rdx->as_VMReg()));
481     caller_saved.Insert(OptoReg::as_OptoReg(rsi->as_VMReg()));
482     caller_saved.Insert(OptoReg::as_OptoReg(rdi->as_VMReg()));
483     caller_saved.Insert(OptoReg::as_OptoReg(r8->as_VMReg()));
484     caller_saved.Insert(OptoReg::as_OptoReg(r9->as_VMReg()));
485     caller_saved.Insert(OptoReg::as_OptoReg(r10->as_VMReg()));
486     caller_saved.Insert(OptoReg::as_OptoReg(r11->as_VMReg()));
487     caller_saved.Remove(OptoReg::as_OptoReg(stub->ref()->as_VMReg()));
488 
489     // Create mask of live registers
490     RegMask live = stub->live();
491     if (stub->tmp() != noreg) {
492       live.Insert(OptoReg::as_OptoReg(stub->tmp()->as_VMReg()));
493     }
494 
495     int gp_spill_size = 0;
496     int opmask_spill_size = 0;
497     int xmm_spill_size = 0;
498 
499     // Record registers that needs to be saved/restored
500     RegMaskIterator rmi(live);
501     while (rmi.has_next()) {
502       const OptoReg::Name opto_reg = rmi.next();
503       const VMReg vm_reg = OptoReg::as_VMReg(opto_reg);
504 
505       if (vm_reg->is_Register()) {
506         if (caller_saved.Member(opto_reg)) {
507           _gp_registers.append(vm_reg->as_Register());
508           gp_spill_size += 8;
509         }
510       } else if (vm_reg->is_KRegister()) {
511         // All opmask registers are caller saved, thus spill the ones
512         // which are live.
513         if (_opmask_registers.find(vm_reg->as_KRegister()) == -1) {
514           _opmask_registers.append(vm_reg->as_KRegister());
515           opmask_spill_size += 8;
516         }
517       } else if (vm_reg->is_XMMRegister()) {
518         // We encode in the low order 4 bits of the opto_reg, how large part of the register is live
519         const VMReg vm_reg_base = OptoReg::as_VMReg(opto_reg & ~15);
520         const int reg_size = xmm_slot_size(opto_reg);
521         const XMMRegisterData reg_data = { vm_reg_base->as_XMMRegister(), reg_size };
522         const int reg_index = _xmm_registers.find(reg_data);
523         if (reg_index == -1) {
524           // Not previously appended
525           _xmm_registers.append(reg_data);
526           xmm_spill_size += reg_size;
527         } else {
528           // Previously appended, update size
529           const int reg_size_prev = _xmm_registers.at(reg_index)._size;
530           if (reg_size > reg_size_prev) {
531             _xmm_registers.at_put(reg_index, reg_data);
532             xmm_spill_size += reg_size - reg_size_prev;
533           }
534         }
535       } else {
536         fatal("Unexpected register type");
537       }
538     }
539 
540     // Sort by size, largest first
541     _xmm_registers.sort(xmm_compare_register_size);
542 
543     // On Windows, the caller reserves stack space for spilling register arguments
544     const int arg_spill_size = frame::arg_reg_save_area_bytes;
545 
546     // Stack pointer must be 16 bytes aligned for the call
547     _spill_offset = _spill_size = align_up(xmm_spill_size + gp_spill_size + opmask_spill_size + arg_spill_size, 16);
548   }
549 PRAGMA_DIAG_POP
550 
551 public:
552   ZSaveLiveRegisters(MacroAssembler* masm, ZLoadBarrierStubC2* stub) :
553       _masm(masm),
554       _gp_registers(),
555       _opmask_registers(),
556       _xmm_registers(),
557       _spill_size(0),
558       _spill_offset(0) {
559 
560     //
561     // Stack layout after registers have been spilled:
562     //
563     // | ...            | original rsp, 16 bytes aligned
564     // ------------------
565     // | zmm0 high      |
566     // | ...            |
567     // | zmm0 low       | 16 bytes aligned
568     // | ...            |
569     // | ymm1 high      |
570     // | ...            |
571     // | ymm1 low       | 16 bytes aligned
572     // | ...            |
573     // | xmmN high      |
574     // | ...            |
575     // | xmmN low       | 8 bytes aligned
576     // | reg0           | 8 bytes aligned
577     // | reg1           |
578     // | ...            |
579     // | regN           | new rsp, if 16 bytes aligned
580     // | <padding>      | else new rsp, 16 bytes aligned
581     // ------------------
582     //
583 
584     // Figure out what registers to save/restore
585     initialize(stub);
586 
587     // Allocate stack space
588     if (_spill_size > 0) {
589       __ subptr(rsp, _spill_size);
590     }
591 
592     // Save XMM/YMM/ZMM registers
593     for (int i = 0; i < _xmm_registers.length(); i++) {
594       xmm_register_save(_xmm_registers.at(i));
595     }
596 
597     if (xmm_needs_vzeroupper()) {
598       __ vzeroupper();
599     }
600 
601     // Save general purpose registers
602     for (int i = 0; i < _gp_registers.length(); i++) {
603       gp_register_save(_gp_registers.at(i));
604     }
605 
606     // Save opmask registers
607     for (int i = 0; i < _opmask_registers.length(); i++) {
608       opmask_register_save(_opmask_registers.at(i));
609     }
610   }
611 
612   ~ZSaveLiveRegisters() {
613     // Restore opmask registers
614     for (int i = _opmask_registers.length() - 1; i >= 0; i--) {
615       opmask_register_restore(_opmask_registers.at(i));
616     }
617 
618     // Restore general purpose registers
619     for (int i = _gp_registers.length() - 1; i >= 0; i--) {
620       gp_register_restore(_gp_registers.at(i));
621     }
622 
623     __ vzeroupper();
624 
625     // Restore XMM/YMM/ZMM registers
626     for (int i = _xmm_registers.length() - 1; i >= 0; i--) {
627       xmm_register_restore(_xmm_registers.at(i));
628     }
629 
630     // Free stack space
631     if (_spill_size > 0) {
632       __ addptr(rsp, _spill_size);
633     }
634   }
635 };
636 
637 class ZSetupArguments {
638 private:
639   MacroAssembler* const _masm;
640   const Register        _ref;
641   const Address         _ref_addr;
642 
643 public:
644   ZSetupArguments(MacroAssembler* masm, ZLoadBarrierStubC2* stub) :
645       _masm(masm),
646       _ref(stub->ref()),
647       _ref_addr(stub->ref_addr()) {
648 
649     // Setup arguments
650     if (_ref_addr.base() == noreg) {
651       // No self healing
652       if (_ref != c_rarg0) {
653         __ movq(c_rarg0, _ref);
654       }
655       __ xorq(c_rarg1, c_rarg1);
656     } else {
657       // Self healing
658       if (_ref == c_rarg0) {
659         __ lea(c_rarg1, _ref_addr);
660       } else if (_ref != c_rarg1) {
661         __ lea(c_rarg1, _ref_addr);
662         __ movq(c_rarg0, _ref);
663       } else if (_ref_addr.base() != c_rarg0 && _ref_addr.index() != c_rarg0) {
664         __ movq(c_rarg0, _ref);
665         __ lea(c_rarg1, _ref_addr);
666       } else {
667         __ xchgq(c_rarg0, c_rarg1);
668         if (_ref_addr.base() == c_rarg0) {
669           __ lea(c_rarg1, Address(c_rarg1, _ref_addr.index(), _ref_addr.scale(), _ref_addr.disp()));
670         } else if (_ref_addr.index() == c_rarg0) {
671           __ lea(c_rarg1, Address(_ref_addr.base(), c_rarg1, _ref_addr.scale(), _ref_addr.disp()));
672         } else {
673           ShouldNotReachHere();
674         }
675       }
676     }
677   }
678 
679   ~ZSetupArguments() {
680     // Transfer result
681     if (_ref != rax) {
682       __ movq(_ref, rax);
683     }
684   }
685 };
686 
687 #undef __
688 #define __ masm->
689 
690 void ZBarrierSetAssembler::generate_c2_load_barrier_stub(MacroAssembler* masm, ZLoadBarrierStubC2* stub) const {
691   BLOCK_COMMENT("ZLoadBarrierStubC2");
692 
693   // Stub entry
694   __ bind(*stub->entry());
695 
696   {
697     ZSaveLiveRegisters save_live_registers(masm, stub);
698     ZSetupArguments setup_arguments(masm, stub);
699     __ call(RuntimeAddress(stub->slow_path()));
700   }
701 
702   // Stub exit
703   __ jmp(*stub->continuation());
704 }
705 
706 #undef __
707 
708 #endif // COMPILER2