< prev index next >

src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp

Print this page

  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include <sys/types.h>
  27 
  28 #include "precompiled.hpp"
  29 #include "jvm.h"
  30 #include "asm/assembler.hpp"
  31 #include "asm/assembler.inline.hpp"
  32 #include "ci/ciEnv.hpp"

  33 #include "gc/shared/barrierSet.hpp"
  34 #include "gc/shared/barrierSetAssembler.hpp"
  35 #include "gc/shared/cardTableBarrierSet.hpp"
  36 #include "gc/shared/cardTable.hpp"
  37 #include "gc/shared/collectedHeap.hpp"
  38 #include "gc/shared/tlab_globals.hpp"
  39 #include "interpreter/bytecodeHistogram.hpp"
  40 #include "interpreter/interpreter.hpp"
  41 #include "compiler/compileTask.hpp"
  42 #include "compiler/disassembler.hpp"
  43 #include "memory/resourceArea.hpp"
  44 #include "memory/universe.hpp"
  45 #include "nativeInst_aarch64.hpp"
  46 #include "oops/accessDecorators.hpp"
  47 #include "oops/compressedOops.inline.hpp"
  48 #include "oops/klass.inline.hpp"
  49 #include "runtime/icache.hpp"
  50 #include "runtime/interfaceSupport.inline.hpp"
  51 #include "runtime/jniHandles.inline.hpp"
  52 #include "runtime/sharedRuntime.hpp"

 279     uint32_t *insns = (uint32_t *)insn_addr;
 280     // Move wide constant: movz, movk, movk.  See movptr().
 281     assert(nativeInstruction_at(insns+1)->is_movk(), "wrong insns in patch");
 282     assert(nativeInstruction_at(insns+2)->is_movk(), "wrong insns in patch");
 283     return address(uint64_t(Instruction_aarch64::extract(insns[0], 20, 5))
 284                    + (uint64_t(Instruction_aarch64::extract(insns[1], 20, 5)) << 16)
 285                    + (uint64_t(Instruction_aarch64::extract(insns[2], 20, 5)) << 32));
 286   } else {
 287     ShouldNotReachHere();
 288   }
 289   return address(((uint64_t)insn_addr + (offset << 2)));
 290 }
 291 
 292 address MacroAssembler::target_addr_for_insn_or_null(address insn_addr, unsigned insn) {
 293   if (NativeInstruction::is_ldrw_to_zr(address(&insn))) {
 294     return 0;
 295   }
 296   return MacroAssembler::target_addr_for_insn(insn_addr, insn);
 297 }
 298 
 299 void MacroAssembler::safepoint_poll(Label& slow_path, bool at_return, bool acquire, bool in_nmethod) {
 300   if (acquire) {
 301     lea(rscratch1, Address(rthread, JavaThread::polling_word_offset()));
 302     ldar(rscratch1, rscratch1);
 303   } else {
 304     ldr(rscratch1, Address(rthread, JavaThread::polling_word_offset()));
 305   }
 306   if (at_return) {
 307     // Note that when in_nmethod is set, the stack pointer is incremented before the poll. Therefore,
 308     // we may safely use the sp instead to perform the stack watermark check.
 309     cmp(in_nmethod ? sp : rfp, rscratch1);
 310     br(Assembler::HI, slow_path);
 311   } else {
 312     tbnz(rscratch1, log2i_exact(SafepointMechanism::poll_bit()), slow_path);










 313   }
 314 }
 315 
 316 void MacroAssembler::reset_last_Java_frame(bool clear_fp) {
 317   // we must set sp to zero to clear frame
 318   str(zr, Address(rthread, JavaThread::last_Java_sp_offset()));
 319 
 320   // must clear fp, so that compiled frames are not confused; it is
 321   // possible that we need it only for debugging
 322   if (clear_fp) {
 323     str(zr, Address(rthread, JavaThread::last_Java_fp_offset()));
 324   }
 325 
 326   // Always clear the pc because it could have been set by make_walkable()
 327   str(zr, Address(rthread, JavaThread::last_Java_pc_offset()));
 328 }
 329 
 330 // Calls to C land
 331 //
 332 // When entering C land, the rfp, & resp of the last Java frame have to be recorded

2147     return;
2148   }
2149   if (CheckCompressedOops) {
2150     Label ok;
2151     push(1 << rscratch1->encoding(), sp); // cmpptr trashes rscratch1
2152     cmpptr(rheapbase, ExternalAddress((address)CompressedOops::ptrs_base_addr()));
2153     br(Assembler::EQ, ok);
2154     stop(msg);
2155     bind(ok);
2156     pop(1 << rscratch1->encoding(), sp);
2157   }
2158 #endif
2159 }
2160 #endif
2161 
2162 void MacroAssembler::resolve_jobject(Register value, Register thread, Register tmp) {
2163   Label done, not_weak;
2164   cbz(value, done);           // Use NULL as-is.
2165 
2166   STATIC_ASSERT(JNIHandles::weak_tag_mask == 1u);
2167   tbz(r0, 0, not_weak);    // Test for jweak tag.
2168 
2169   // Resolve jweak.
2170   access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF, value,
2171                  Address(value, -JNIHandles::weak_tag_value), tmp, thread);
2172   verify_oop(value);
2173   b(done);
2174 
2175   bind(not_weak);
2176   // Resolve (untagged) jobject.
2177   access_load_at(T_OBJECT, IN_NATIVE, value, Address(value, 0), tmp, thread);
2178   verify_oop(value);
2179   bind(done);
2180 }
2181 
2182 void MacroAssembler::stop(const char* msg) {
2183   BLOCK_COMMENT(msg);
2184   dcps1(0xdeae);
2185   emit_int64((uintptr_t)msg);
2186 }
2187 

5265   assert(line.index() == noreg, "index should be noreg");
5266   assert(line.offset() == 0, "offset should be 0");
5267   // would like to assert this
5268   // assert(line._ext.shift == 0, "shift should be zero");
5269   if (VM_Version::supports_dcpop()) {
5270     // writeback using clear virtual address to point of persistence
5271     dc(Assembler::CVAP, line.base());
5272   } else {
5273     // no need to generate anything as Unsafe.writebackMemory should
5274     // never invoke this stub
5275   }
5276 }
5277 
5278 void MacroAssembler::cache_wbsync(bool is_pre) {
5279   // we only need a barrier post sync
5280   if (!is_pre) {
5281     membar(Assembler::AnyAny);
5282   }
5283 }
5284 
5285 void MacroAssembler::verify_sve_vector_length() {
5286   // Make sure that native code does not change SVE vector length.
5287   if (!UseSVE) return;
5288   Label verify_ok;
5289   movw(rscratch1, zr);
5290   sve_inc(rscratch1, B);
5291   subsw(zr, rscratch1, VM_Version::get_initial_sve_vector_length());
5292   br(EQ, verify_ok);
5293   stop("Error: SVE vector length has changed since jvm startup");
5294   bind(verify_ok);
5295 }
5296 
5297 void MacroAssembler::verify_ptrue() {
5298   Label verify_ok;
5299   if (!UseSVE) {
5300     return;
5301   }
5302   sve_cntp(rscratch1, B, ptrue, ptrue); // get true elements count.
5303   sve_dec(rscratch1, B);
5304   cbz(rscratch1, verify_ok);
5305   stop("Error: the preserved predicate register (p7) elements are not all true");
5306   bind(verify_ok);
5307 }
5308 
5309 void MacroAssembler::safepoint_isb() {
5310   isb();
5311 #ifndef PRODUCT

5431 void MacroAssembler::strip_return_address() {
5432   if (VM_Version::use_rop_protection()) {
5433     xpaclri();
5434   }
5435 }
5436 
5437 #ifndef PRODUCT
5438 // PAC failures can be difficult to debug. After an authentication failure, a segfault will only
5439 // occur when the pointer is used - ie when the program returns to the invalid LR. At this point
5440 // it is difficult to debug back to the callee function.
5441 // This function simply loads from the address in the given register.
5442 // Use directly after authentication to catch authentication failures.
5443 // Also use before signing to check that the pointer is valid and hasn't already been signed.
5444 //
5445 void MacroAssembler::check_return_address(Register return_reg) {
5446   if (VM_Version::use_rop_protection()) {
5447     ldr(zr, Address(return_reg));
5448   }
5449 }
5450 #endif

















































































































































































  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include <sys/types.h>
  27 
  28 #include "precompiled.hpp"
  29 #include "jvm.h"
  30 #include "asm/assembler.hpp"
  31 #include "asm/assembler.inline.hpp"
  32 #include "ci/ciEnv.hpp"
  33 #include "compiler/oopMap.hpp"
  34 #include "gc/shared/barrierSet.hpp"
  35 #include "gc/shared/barrierSetAssembler.hpp"
  36 #include "gc/shared/cardTableBarrierSet.hpp"
  37 #include "gc/shared/cardTable.hpp"
  38 #include "gc/shared/collectedHeap.hpp"
  39 #include "gc/shared/tlab_globals.hpp"
  40 #include "interpreter/bytecodeHistogram.hpp"
  41 #include "interpreter/interpreter.hpp"
  42 #include "compiler/compileTask.hpp"
  43 #include "compiler/disassembler.hpp"
  44 #include "memory/resourceArea.hpp"
  45 #include "memory/universe.hpp"
  46 #include "nativeInst_aarch64.hpp"
  47 #include "oops/accessDecorators.hpp"
  48 #include "oops/compressedOops.inline.hpp"
  49 #include "oops/klass.inline.hpp"
  50 #include "runtime/icache.hpp"
  51 #include "runtime/interfaceSupport.inline.hpp"
  52 #include "runtime/jniHandles.inline.hpp"
  53 #include "runtime/sharedRuntime.hpp"

 280     uint32_t *insns = (uint32_t *)insn_addr;
 281     // Move wide constant: movz, movk, movk.  See movptr().
 282     assert(nativeInstruction_at(insns+1)->is_movk(), "wrong insns in patch");
 283     assert(nativeInstruction_at(insns+2)->is_movk(), "wrong insns in patch");
 284     return address(uint64_t(Instruction_aarch64::extract(insns[0], 20, 5))
 285                    + (uint64_t(Instruction_aarch64::extract(insns[1], 20, 5)) << 16)
 286                    + (uint64_t(Instruction_aarch64::extract(insns[2], 20, 5)) << 32));
 287   } else {
 288     ShouldNotReachHere();
 289   }
 290   return address(((uint64_t)insn_addr + (offset << 2)));
 291 }
 292 
 293 address MacroAssembler::target_addr_for_insn_or_null(address insn_addr, unsigned insn) {
 294   if (NativeInstruction::is_ldrw_to_zr(address(&insn))) {
 295     return 0;
 296   }
 297   return MacroAssembler::target_addr_for_insn(insn_addr, insn);
 298 }
 299 
 300 void MacroAssembler::safepoint_poll(Label& slow_path, bool at_return, bool acquire, bool in_nmethod, Register tmp) {
 301   if (acquire) {
 302     lea(tmp, Address(rthread, JavaThread::polling_word_offset()));
 303     ldar(tmp, tmp);
 304   } else {
 305     ldr(tmp, Address(rthread, JavaThread::polling_word_offset()));
 306   }
 307   if (at_return) {
 308     // Note that when in_nmethod is set, the stack pointer is incremented before the poll. Therefore,
 309     // we may safely use the sp instead to perform the stack watermark check.
 310     cmp(in_nmethod ? sp : rfp, tmp);
 311     br(Assembler::HI, slow_path);
 312   } else {
 313     tbnz(tmp, log2i_exact(SafepointMechanism::poll_bit()), slow_path);
 314   }
 315 }
 316 
 317 void MacroAssembler::rt_call(address dest, Register tmp) {
 318   CodeBlob *cb = CodeCache::find_blob(dest);
 319   if (cb) {
 320     far_call(RuntimeAddress(dest));
 321   } else {
 322     lea(tmp, RuntimeAddress(dest));
 323     blr(tmp);
 324   }
 325 }
 326 
 327 void MacroAssembler::reset_last_Java_frame(bool clear_fp) {
 328   // we must set sp to zero to clear frame
 329   str(zr, Address(rthread, JavaThread::last_Java_sp_offset()));
 330 
 331   // must clear fp, so that compiled frames are not confused; it is
 332   // possible that we need it only for debugging
 333   if (clear_fp) {
 334     str(zr, Address(rthread, JavaThread::last_Java_fp_offset()));
 335   }
 336 
 337   // Always clear the pc because it could have been set by make_walkable()
 338   str(zr, Address(rthread, JavaThread::last_Java_pc_offset()));
 339 }
 340 
 341 // Calls to C land
 342 //
 343 // When entering C land, the rfp, & resp of the last Java frame have to be recorded

2158     return;
2159   }
2160   if (CheckCompressedOops) {
2161     Label ok;
2162     push(1 << rscratch1->encoding(), sp); // cmpptr trashes rscratch1
2163     cmpptr(rheapbase, ExternalAddress((address)CompressedOops::ptrs_base_addr()));
2164     br(Assembler::EQ, ok);
2165     stop(msg);
2166     bind(ok);
2167     pop(1 << rscratch1->encoding(), sp);
2168   }
2169 #endif
2170 }
2171 #endif
2172 
2173 void MacroAssembler::resolve_jobject(Register value, Register thread, Register tmp) {
2174   Label done, not_weak;
2175   cbz(value, done);           // Use NULL as-is.
2176 
2177   STATIC_ASSERT(JNIHandles::weak_tag_mask == 1u);
2178   tbz(value, 0, not_weak);    // Test for jweak tag.
2179 
2180   // Resolve jweak.
2181   access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF, value,
2182                  Address(value, -JNIHandles::weak_tag_value), tmp, thread);
2183   verify_oop(value);
2184   b(done);
2185 
2186   bind(not_weak);
2187   // Resolve (untagged) jobject.
2188   access_load_at(T_OBJECT, IN_NATIVE, value, Address(value, 0), tmp, thread);
2189   verify_oop(value);
2190   bind(done);
2191 }
2192 
2193 void MacroAssembler::stop(const char* msg) {
2194   BLOCK_COMMENT(msg);
2195   dcps1(0xdeae);
2196   emit_int64((uintptr_t)msg);
2197 }
2198 

5276   assert(line.index() == noreg, "index should be noreg");
5277   assert(line.offset() == 0, "offset should be 0");
5278   // would like to assert this
5279   // assert(line._ext.shift == 0, "shift should be zero");
5280   if (VM_Version::supports_dcpop()) {
5281     // writeback using clear virtual address to point of persistence
5282     dc(Assembler::CVAP, line.base());
5283   } else {
5284     // no need to generate anything as Unsafe.writebackMemory should
5285     // never invoke this stub
5286   }
5287 }
5288 
5289 void MacroAssembler::cache_wbsync(bool is_pre) {
5290   // we only need a barrier post sync
5291   if (!is_pre) {
5292     membar(Assembler::AnyAny);
5293   }
5294 }
5295 
5296 void MacroAssembler::verify_sve_vector_length(Register tmp) {
5297   // Make sure that native code does not change SVE vector length.
5298   if (!UseSVE) return;
5299   Label verify_ok;
5300   movw(tmp, zr);
5301   sve_inc(tmp, B);
5302   subsw(zr, tmp, VM_Version::get_initial_sve_vector_length());
5303   br(EQ, verify_ok);
5304   stop("Error: SVE vector length has changed since jvm startup");
5305   bind(verify_ok);
5306 }
5307 
5308 void MacroAssembler::verify_ptrue() {
5309   Label verify_ok;
5310   if (!UseSVE) {
5311     return;
5312   }
5313   sve_cntp(rscratch1, B, ptrue, ptrue); // get true elements count.
5314   sve_dec(rscratch1, B);
5315   cbz(rscratch1, verify_ok);
5316   stop("Error: the preserved predicate register (p7) elements are not all true");
5317   bind(verify_ok);
5318 }
5319 
5320 void MacroAssembler::safepoint_isb() {
5321   isb();
5322 #ifndef PRODUCT

5442 void MacroAssembler::strip_return_address() {
5443   if (VM_Version::use_rop_protection()) {
5444     xpaclri();
5445   }
5446 }
5447 
5448 #ifndef PRODUCT
5449 // PAC failures can be difficult to debug. After an authentication failure, a segfault will only
5450 // occur when the pointer is used - ie when the program returns to the invalid LR. At this point
5451 // it is difficult to debug back to the callee function.
5452 // This function simply loads from the address in the given register.
5453 // Use directly after authentication to catch authentication failures.
5454 // Also use before signing to check that the pointer is valid and hasn't already been signed.
5455 //
5456 void MacroAssembler::check_return_address(Register return_reg) {
5457   if (VM_Version::use_rop_protection()) {
5458     ldr(zr, Address(return_reg));
5459   }
5460 }
5461 #endif
5462 
5463 // The java_calling_convention describes stack locations as ideal slots on
5464 // a frame with no abi restrictions. Since we must observe abi restrictions
5465 // (like the placement of the register window) the slots must be biased by
5466 // the following value.
5467 static int reg2offset_in(VMReg r) {
5468   // Account for saved rfp and lr
5469   // This should really be in_preserve_stack_slots
5470   return (r->reg2stack() + 4) * VMRegImpl::stack_slot_size;
5471 }
5472 
5473 static int reg2offset_out(VMReg r) {
5474   return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
5475 }
5476 
5477 // On 64 bit we will store integer like items to the stack as
5478 // 64 bits items (Aarch64 abi) even though java would only store
5479 // 32bits for a parameter. On 32bit it will simply be 32 bits
5480 // So this routine will do 32->32 on 32bit and 32->64 on 64bit
5481 void MacroAssembler::move32_64(VMRegPair src, VMRegPair dst, Register tmp) {
5482   if (src.first()->is_stack()) {
5483     if (dst.first()->is_stack()) {
5484       // stack to stack
5485       ldr(tmp, Address(rfp, reg2offset_in(src.first())));
5486       str(tmp, Address(sp, reg2offset_out(dst.first())));
5487     } else {
5488       // stack to reg
5489       ldrsw(dst.first()->as_Register(), Address(rfp, reg2offset_in(src.first())));
5490     }
5491   } else if (dst.first()->is_stack()) {
5492     // reg to stack
5493     // Do we really have to sign extend???
5494     // __ movslq(src.first()->as_Register(), src.first()->as_Register());
5495     str(src.first()->as_Register(), Address(sp, reg2offset_out(dst.first())));
5496   } else {
5497     if (dst.first() != src.first()) {
5498       sxtw(dst.first()->as_Register(), src.first()->as_Register());
5499     }
5500   }
5501 }
5502 
5503 // An oop arg. Must pass a handle not the oop itself
5504 void MacroAssembler::object_move(
5505                         OopMap* map,
5506                         int oop_handle_offset,
5507                         int framesize_in_slots,
5508                         VMRegPair src,
5509                         VMRegPair dst,
5510                         bool is_receiver,
5511                         int* receiver_offset) {
5512 
5513   // must pass a handle. First figure out the location we use as a handle
5514 
5515   Register rHandle = dst.first()->is_stack() ? rscratch2 : dst.first()->as_Register();
5516 
5517   // See if oop is NULL if it is we need no handle
5518 
5519   if (src.first()->is_stack()) {
5520 
5521     // Oop is already on the stack as an argument
5522     int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
5523     map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots));
5524     if (is_receiver) {
5525       *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size;
5526     }
5527 
5528     ldr(rscratch1, Address(rfp, reg2offset_in(src.first())));
5529     lea(rHandle, Address(rfp, reg2offset_in(src.first())));
5530     // conditionally move a NULL
5531     cmp(rscratch1, zr);
5532     csel(rHandle, zr, rHandle, Assembler::EQ);
5533   } else {
5534 
5535     // Oop is in an a register we must store it to the space we reserve
5536     // on the stack for oop_handles and pass a handle if oop is non-NULL
5537 
5538     const Register rOop = src.first()->as_Register();
5539     int oop_slot;
5540     if (rOop == j_rarg0)
5541       oop_slot = 0;
5542     else if (rOop == j_rarg1)
5543       oop_slot = 1;
5544     else if (rOop == j_rarg2)
5545       oop_slot = 2;
5546     else if (rOop == j_rarg3)
5547       oop_slot = 3;
5548     else if (rOop == j_rarg4)
5549       oop_slot = 4;
5550     else if (rOop == j_rarg5)
5551       oop_slot = 5;
5552     else if (rOop == j_rarg6)
5553       oop_slot = 6;
5554     else {
5555       assert(rOop == j_rarg7, "wrong register");
5556       oop_slot = 7;
5557     }
5558 
5559     oop_slot = oop_slot * VMRegImpl::slots_per_word + oop_handle_offset;
5560     int offset = oop_slot*VMRegImpl::stack_slot_size;
5561 
5562     map->set_oop(VMRegImpl::stack2reg(oop_slot));
5563     // Store oop in handle area, may be NULL
5564     str(rOop, Address(sp, offset));
5565     if (is_receiver) {
5566       *receiver_offset = offset;
5567     }
5568 
5569     cmp(rOop, zr);
5570     lea(rHandle, Address(sp, offset));
5571     // conditionally move a NULL
5572     csel(rHandle, zr, rHandle, Assembler::EQ);
5573   }
5574 
5575   // If arg is on the stack then place it otherwise it is already in correct reg.
5576   if (dst.first()->is_stack()) {
5577     str(rHandle, Address(sp, reg2offset_out(dst.first())));
5578   }
5579 }
5580 
5581 // A float arg may have to do float reg int reg conversion
5582 void MacroAssembler::float_move(VMRegPair src, VMRegPair dst, Register tmp) {
5583  if (src.first()->is_stack()) {
5584     if (dst.first()->is_stack()) {
5585       ldrw(tmp, Address(rfp, reg2offset_in(src.first())));
5586       strw(tmp, Address(sp, reg2offset_out(dst.first())));
5587     } else {
5588       ldrs(dst.first()->as_FloatRegister(), Address(rfp, reg2offset_in(src.first())));
5589     }
5590   } else if (src.first() != dst.first()) {
5591     if (src.is_single_phys_reg() && dst.is_single_phys_reg())
5592       fmovs(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister());
5593     else
5594       strs(src.first()->as_FloatRegister(), Address(sp, reg2offset_out(dst.first())));
5595   }
5596 }
5597 
5598 // A long move
5599 void MacroAssembler::long_move(VMRegPair src, VMRegPair dst, Register tmp) {
5600   if (src.first()->is_stack()) {
5601     if (dst.first()->is_stack()) {
5602       // stack to stack
5603       ldr(tmp, Address(rfp, reg2offset_in(src.first())));
5604       str(tmp, Address(sp, reg2offset_out(dst.first())));
5605     } else {
5606       // stack to reg
5607       ldr(dst.first()->as_Register(), Address(rfp, reg2offset_in(src.first())));
5608     }
5609   } else if (dst.first()->is_stack()) {
5610     // reg to stack
5611     // Do we really have to sign extend???
5612     // __ movslq(src.first()->as_Register(), src.first()->as_Register());
5613     str(src.first()->as_Register(), Address(sp, reg2offset_out(dst.first())));
5614   } else {
5615     if (dst.first() != src.first()) {
5616       mov(dst.first()->as_Register(), src.first()->as_Register());
5617     }
5618   }
5619 }
5620 
5621 
5622 // A double move
5623 void MacroAssembler::double_move(VMRegPair src, VMRegPair dst, Register tmp) {
5624   if (src.first()->is_stack()) {
5625     if (dst.first()->is_stack()) {
5626       ldr(tmp, Address(rfp, reg2offset_in(src.first())));
5627       str(tmp, Address(sp, reg2offset_out(dst.first())));
5628     } else {
5629       ldrd(dst.first()->as_FloatRegister(), Address(rfp, reg2offset_in(src.first())));
5630     }
5631   } else if (src.first() != dst.first()) {
5632     if (src.is_single_phys_reg() && dst.is_single_phys_reg())
5633       fmovd(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister());
5634     else
5635       strd(src.first()->as_FloatRegister(), Address(sp, reg2offset_out(dst.first())));
5636   }
5637 }
< prev index next >