< prev index next >

src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp

Print this page

  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include <sys/types.h>
  27 
  28 #include "precompiled.hpp"
  29 #include "jvm.h"
  30 #include "asm/assembler.hpp"
  31 #include "asm/assembler.inline.hpp"

  32 #include "gc/shared/barrierSet.hpp"
  33 #include "gc/shared/barrierSetAssembler.hpp"
  34 #include "gc/shared/cardTableBarrierSet.hpp"
  35 #include "gc/shared/cardTable.hpp"
  36 #include "gc/shared/collectedHeap.hpp"
  37 #include "gc/shared/tlab_globals.hpp"
  38 #include "interpreter/bytecodeHistogram.hpp"
  39 #include "interpreter/interpreter.hpp"
  40 #include "compiler/compileTask.hpp"
  41 #include "compiler/disassembler.hpp"
  42 #include "memory/resourceArea.hpp"
  43 #include "memory/universe.hpp"
  44 #include "nativeInst_aarch64.hpp"
  45 #include "oops/accessDecorators.hpp"
  46 #include "oops/compressedOops.inline.hpp"
  47 #include "oops/klass.inline.hpp"
  48 #include "runtime/icache.hpp"
  49 #include "runtime/interfaceSupport.inline.hpp"
  50 #include "runtime/jniHandles.inline.hpp"
  51 #include "runtime/sharedRuntime.hpp"

 275     } else {
 276       ShouldNotReachHere();
 277     }
 278   } else if (Instruction_aarch64::extract(insn, 31, 23) == 0b110100101) {
 279     uint32_t *insns = (uint32_t *)insn_addr;
 280     // Move wide constant: movz, movk, movk.  See movptr().
 281     assert(nativeInstruction_at(insns+1)->is_movk(), "wrong insns in patch");
 282     assert(nativeInstruction_at(insns+2)->is_movk(), "wrong insns in patch");
 283     return address(uint64_t(Instruction_aarch64::extract(insns[0], 20, 5))
 284                    + (uint64_t(Instruction_aarch64::extract(insns[1], 20, 5)) << 16)
 285                    + (uint64_t(Instruction_aarch64::extract(insns[2], 20, 5)) << 32));
 286   } else if (Instruction_aarch64::extract(insn, 31, 22) == 0b1011100101 &&
 287              Instruction_aarch64::extract(insn, 4, 0) == 0b11111) {
 288     return 0;
 289   } else {
 290     ShouldNotReachHere();
 291   }
 292   return address(((uint64_t)insn_addr + (offset << 2)));
 293 }
 294 
 295 void MacroAssembler::safepoint_poll(Label& slow_path, bool at_return, bool acquire, bool in_nmethod) {
 296   if (acquire) {
 297     lea(rscratch1, Address(rthread, JavaThread::polling_word_offset()));
 298     ldar(rscratch1, rscratch1);
 299   } else {
 300     ldr(rscratch1, Address(rthread, JavaThread::polling_word_offset()));
 301   }
 302   if (at_return) {
 303     // Note that when in_nmethod is set, the stack pointer is incremented before the poll. Therefore,
 304     // we may safely use the sp instead to perform the stack watermark check.
 305     cmp(in_nmethod ? sp : rfp, rscratch1);
 306     br(Assembler::HI, slow_path);
 307   } else {
 308     tbnz(rscratch1, log2i_exact(SafepointMechanism::poll_bit()), slow_path);










 309   }
 310 }
 311 
 312 void MacroAssembler::reset_last_Java_frame(bool clear_fp) {
 313   // we must set sp to zero to clear frame
 314   str(zr, Address(rthread, JavaThread::last_Java_sp_offset()));
 315 
 316   // must clear fp, so that compiled frames are not confused; it is
 317   // possible that we need it only for debugging
 318   if (clear_fp) {
 319     str(zr, Address(rthread, JavaThread::last_Java_fp_offset()));
 320   }
 321 
 322   // Always clear the pc because it could have been set by make_walkable()
 323   str(zr, Address(rthread, JavaThread::last_Java_pc_offset()));
 324 }
 325 
 326 // Calls to C land
 327 //
 328 // When entering C land, the rfp, & resp of the last Java frame have to be recorded

2121     return;
2122   }
2123   if (CheckCompressedOops) {
2124     Label ok;
2125     push(1 << rscratch1->encoding(), sp); // cmpptr trashes rscratch1
2126     cmpptr(rheapbase, ExternalAddress((address)CompressedOops::ptrs_base_addr()));
2127     br(Assembler::EQ, ok);
2128     stop(msg);
2129     bind(ok);
2130     pop(1 << rscratch1->encoding(), sp);
2131   }
2132 #endif
2133 }
2134 #endif
2135 
2136 void MacroAssembler::resolve_jobject(Register value, Register thread, Register tmp) {
2137   Label done, not_weak;
2138   cbz(value, done);           // Use NULL as-is.
2139 
2140   STATIC_ASSERT(JNIHandles::weak_tag_mask == 1u);
2141   tbz(r0, 0, not_weak);    // Test for jweak tag.
2142 
2143   // Resolve jweak.
2144   access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF, value,
2145                  Address(value, -JNIHandles::weak_tag_value), tmp, thread);
2146   verify_oop(value);
2147   b(done);
2148 
2149   bind(not_weak);
2150   // Resolve (untagged) jobject.
2151   access_load_at(T_OBJECT, IN_NATIVE, value, Address(value, 0), tmp, thread);
2152   verify_oop(value);
2153   bind(done);
2154 }
2155 
2156 void MacroAssembler::stop(const char* msg) {
2157   BLOCK_COMMENT(msg);
2158   dcps1(0xdeae);
2159   emit_int64((uintptr_t)msg);
2160 }
2161 

5174   assert(line.index() == noreg, "index should be noreg");
5175   assert(line.offset() == 0, "offset should be 0");
5176   // would like to assert this
5177   // assert(line._ext.shift == 0, "shift should be zero");
5178   if (VM_Version::features() & VM_Version::CPU_DCPOP) {
5179     // writeback using clear virtual address to point of persistence
5180     dc(Assembler::CVAP, line.base());
5181   } else {
5182     // no need to generate anything as Unsafe.writebackMemory should
5183     // never invoke this stub
5184   }
5185 }
5186 
5187 void MacroAssembler::cache_wbsync(bool is_pre) {
5188   // we only need a barrier post sync
5189   if (!is_pre) {
5190     membar(Assembler::AnyAny);
5191   }
5192 }
5193 
5194 void MacroAssembler::verify_sve_vector_length() {
5195   // Make sure that native code does not change SVE vector length.
5196   if (!UseSVE) return;
5197   Label verify_ok;
5198   movw(rscratch1, zr);
5199   sve_inc(rscratch1, B);
5200   subsw(zr, rscratch1, VM_Version::get_initial_sve_vector_length());
5201   br(EQ, verify_ok);
5202   stop("Error: SVE vector length has changed since jvm startup");
5203   bind(verify_ok);
5204 }
5205 
5206 void MacroAssembler::verify_ptrue() {
5207   Label verify_ok;
5208   if (!UseSVE) {
5209     return;
5210   }
5211   sve_cntp(rscratch1, B, ptrue, ptrue); // get true elements count.
5212   sve_dec(rscratch1, B);
5213   cbz(rscratch1, verify_ok);
5214   stop("Error: the preserved predicate register (p7) elements are not all true");
5215   bind(verify_ok);
5216 }
5217 
5218 void MacroAssembler::safepoint_isb() {
5219   isb();
5220 #ifndef PRODUCT

5241 }
5242 #endif
5243 
5244 void MacroAssembler::spin_wait() {
5245   for (int i = 0; i < VM_Version::spin_wait_desc().inst_count(); ++i) {
5246     switch (VM_Version::spin_wait_desc().inst()) {
5247       case SpinWait::NOP:
5248         nop();
5249         break;
5250       case SpinWait::ISB:
5251         isb();
5252         break;
5253       case SpinWait::YIELD:
5254         yield();
5255         break;
5256       default:
5257         ShouldNotReachHere();
5258     }
5259   }
5260 }

















































































































































































  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include <sys/types.h>
  27 
  28 #include "precompiled.hpp"
  29 #include "jvm.h"
  30 #include "asm/assembler.hpp"
  31 #include "asm/assembler.inline.hpp"
  32 #include "compiler/oopMap.hpp"
  33 #include "gc/shared/barrierSet.hpp"
  34 #include "gc/shared/barrierSetAssembler.hpp"
  35 #include "gc/shared/cardTableBarrierSet.hpp"
  36 #include "gc/shared/cardTable.hpp"
  37 #include "gc/shared/collectedHeap.hpp"
  38 #include "gc/shared/tlab_globals.hpp"
  39 #include "interpreter/bytecodeHistogram.hpp"
  40 #include "interpreter/interpreter.hpp"
  41 #include "compiler/compileTask.hpp"
  42 #include "compiler/disassembler.hpp"
  43 #include "memory/resourceArea.hpp"
  44 #include "memory/universe.hpp"
  45 #include "nativeInst_aarch64.hpp"
  46 #include "oops/accessDecorators.hpp"
  47 #include "oops/compressedOops.inline.hpp"
  48 #include "oops/klass.inline.hpp"
  49 #include "runtime/icache.hpp"
  50 #include "runtime/interfaceSupport.inline.hpp"
  51 #include "runtime/jniHandles.inline.hpp"
  52 #include "runtime/sharedRuntime.hpp"

 276     } else {
 277       ShouldNotReachHere();
 278     }
 279   } else if (Instruction_aarch64::extract(insn, 31, 23) == 0b110100101) {
 280     uint32_t *insns = (uint32_t *)insn_addr;
 281     // Move wide constant: movz, movk, movk.  See movptr().
 282     assert(nativeInstruction_at(insns+1)->is_movk(), "wrong insns in patch");
 283     assert(nativeInstruction_at(insns+2)->is_movk(), "wrong insns in patch");
 284     return address(uint64_t(Instruction_aarch64::extract(insns[0], 20, 5))
 285                    + (uint64_t(Instruction_aarch64::extract(insns[1], 20, 5)) << 16)
 286                    + (uint64_t(Instruction_aarch64::extract(insns[2], 20, 5)) << 32));
 287   } else if (Instruction_aarch64::extract(insn, 31, 22) == 0b1011100101 &&
 288              Instruction_aarch64::extract(insn, 4, 0) == 0b11111) {
 289     return 0;
 290   } else {
 291     ShouldNotReachHere();
 292   }
 293   return address(((uint64_t)insn_addr + (offset << 2)));
 294 }
 295 
 296 void MacroAssembler::safepoint_poll(Label& slow_path, bool at_return, bool acquire, bool in_nmethod, Register tmp) {
 297   if (acquire) {
 298     lea(tmp, Address(rthread, JavaThread::polling_word_offset()));
 299     ldar(tmp, tmp);
 300   } else {
 301     ldr(tmp, Address(rthread, JavaThread::polling_word_offset()));
 302   }
 303   if (at_return) {
 304     // Note that when in_nmethod is set, the stack pointer is incremented before the poll. Therefore,
 305     // we may safely use the sp instead to perform the stack watermark check.
 306     cmp(in_nmethod ? sp : rfp, tmp);
 307     br(Assembler::HI, slow_path);
 308   } else {
 309     tbnz(tmp, log2i_exact(SafepointMechanism::poll_bit()), slow_path);
 310   }
 311 }
 312 
 313 void MacroAssembler::rt_call(address dest, Register tmp) {
 314   CodeBlob *cb = CodeCache::find_blob(dest);
 315   if (cb) {
 316     far_call(RuntimeAddress(dest));
 317   } else {
 318     lea(tmp, RuntimeAddress(dest));
 319     blr(tmp);
 320   }
 321 }
 322 
 323 void MacroAssembler::reset_last_Java_frame(bool clear_fp) {
 324   // we must set sp to zero to clear frame
 325   str(zr, Address(rthread, JavaThread::last_Java_sp_offset()));
 326 
 327   // must clear fp, so that compiled frames are not confused; it is
 328   // possible that we need it only for debugging
 329   if (clear_fp) {
 330     str(zr, Address(rthread, JavaThread::last_Java_fp_offset()));
 331   }
 332 
 333   // Always clear the pc because it could have been set by make_walkable()
 334   str(zr, Address(rthread, JavaThread::last_Java_pc_offset()));
 335 }
 336 
 337 // Calls to C land
 338 //
 339 // When entering C land, the rfp, & resp of the last Java frame have to be recorded

2132     return;
2133   }
2134   if (CheckCompressedOops) {
2135     Label ok;
2136     push(1 << rscratch1->encoding(), sp); // cmpptr trashes rscratch1
2137     cmpptr(rheapbase, ExternalAddress((address)CompressedOops::ptrs_base_addr()));
2138     br(Assembler::EQ, ok);
2139     stop(msg);
2140     bind(ok);
2141     pop(1 << rscratch1->encoding(), sp);
2142   }
2143 #endif
2144 }
2145 #endif
2146 
2147 void MacroAssembler::resolve_jobject(Register value, Register thread, Register tmp) {
2148   Label done, not_weak;
2149   cbz(value, done);           // Use NULL as-is.
2150 
2151   STATIC_ASSERT(JNIHandles::weak_tag_mask == 1u);
2152   tbz(value, 0, not_weak);    // Test for jweak tag.
2153 
2154   // Resolve jweak.
2155   access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF, value,
2156                  Address(value, -JNIHandles::weak_tag_value), tmp, thread);
2157   verify_oop(value);
2158   b(done);
2159 
2160   bind(not_weak);
2161   // Resolve (untagged) jobject.
2162   access_load_at(T_OBJECT, IN_NATIVE, value, Address(value, 0), tmp, thread);
2163   verify_oop(value);
2164   bind(done);
2165 }
2166 
2167 void MacroAssembler::stop(const char* msg) {
2168   BLOCK_COMMENT(msg);
2169   dcps1(0xdeae);
2170   emit_int64((uintptr_t)msg);
2171 }
2172 

5185   assert(line.index() == noreg, "index should be noreg");
5186   assert(line.offset() == 0, "offset should be 0");
5187   // would like to assert this
5188   // assert(line._ext.shift == 0, "shift should be zero");
5189   if (VM_Version::features() & VM_Version::CPU_DCPOP) {
5190     // writeback using clear virtual address to point of persistence
5191     dc(Assembler::CVAP, line.base());
5192   } else {
5193     // no need to generate anything as Unsafe.writebackMemory should
5194     // never invoke this stub
5195   }
5196 }
5197 
5198 void MacroAssembler::cache_wbsync(bool is_pre) {
5199   // we only need a barrier post sync
5200   if (!is_pre) {
5201     membar(Assembler::AnyAny);
5202   }
5203 }
5204 
5205 void MacroAssembler::verify_sve_vector_length(Register tmp) {
5206   // Make sure that native code does not change SVE vector length.
5207   if (!UseSVE) return;
5208   Label verify_ok;
5209   movw(tmp, zr);
5210   sve_inc(tmp, B);
5211   subsw(zr, tmp, VM_Version::get_initial_sve_vector_length());
5212   br(EQ, verify_ok);
5213   stop("Error: SVE vector length has changed since jvm startup");
5214   bind(verify_ok);
5215 }
5216 
5217 void MacroAssembler::verify_ptrue() {
5218   Label verify_ok;
5219   if (!UseSVE) {
5220     return;
5221   }
5222   sve_cntp(rscratch1, B, ptrue, ptrue); // get true elements count.
5223   sve_dec(rscratch1, B);
5224   cbz(rscratch1, verify_ok);
5225   stop("Error: the preserved predicate register (p7) elements are not all true");
5226   bind(verify_ok);
5227 }
5228 
5229 void MacroAssembler::safepoint_isb() {
5230   isb();
5231 #ifndef PRODUCT

5252 }
5253 #endif
5254 
5255 void MacroAssembler::spin_wait() {
5256   for (int i = 0; i < VM_Version::spin_wait_desc().inst_count(); ++i) {
5257     switch (VM_Version::spin_wait_desc().inst()) {
5258       case SpinWait::NOP:
5259         nop();
5260         break;
5261       case SpinWait::ISB:
5262         isb();
5263         break;
5264       case SpinWait::YIELD:
5265         yield();
5266         break;
5267       default:
5268         ShouldNotReachHere();
5269     }
5270   }
5271 }
5272 
5273 // The java_calling_convention describes stack locations as ideal slots on
5274 // a frame with no abi restrictions. Since we must observe abi restrictions
5275 // (like the placement of the register window) the slots must be biased by
5276 // the following value.
5277 static int reg2offset_in(VMReg r) {
5278   // Account for saved rfp and lr
5279   // This should really be in_preserve_stack_slots
5280   return (r->reg2stack() + 4) * VMRegImpl::stack_slot_size;
5281 }
5282 
5283 static int reg2offset_out(VMReg r) {
5284   return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
5285 }
5286 
5287 // On 64 bit we will store integer like items to the stack as
5288 // 64 bits items (Aarch64 abi) even though java would only store
5289 // 32bits for a parameter. On 32bit it will simply be 32 bits
5290 // So this routine will do 32->32 on 32bit and 32->64 on 64bit
5291 void MacroAssembler::move32_64(VMRegPair src, VMRegPair dst, Register tmp) {
5292   if (src.first()->is_stack()) {
5293     if (dst.first()->is_stack()) {
5294       // stack to stack
5295       ldr(tmp, Address(rfp, reg2offset_in(src.first())));
5296       str(tmp, Address(sp, reg2offset_out(dst.first())));
5297     } else {
5298       // stack to reg
5299       ldrsw(dst.first()->as_Register(), Address(rfp, reg2offset_in(src.first())));
5300     }
5301   } else if (dst.first()->is_stack()) {
5302     // reg to stack
5303     // Do we really have to sign extend???
5304     // __ movslq(src.first()->as_Register(), src.first()->as_Register());
5305     str(src.first()->as_Register(), Address(sp, reg2offset_out(dst.first())));
5306   } else {
5307     if (dst.first() != src.first()) {
5308       sxtw(dst.first()->as_Register(), src.first()->as_Register());
5309     }
5310   }
5311 }
5312 
5313 // An oop arg. Must pass a handle not the oop itself
5314 void MacroAssembler::object_move(
5315                         OopMap* map,
5316                         int oop_handle_offset,
5317                         int framesize_in_slots,
5318                         VMRegPair src,
5319                         VMRegPair dst,
5320                         bool is_receiver,
5321                         int* receiver_offset) {
5322 
5323   // must pass a handle. First figure out the location we use as a handle
5324 
5325   Register rHandle = dst.first()->is_stack() ? rscratch2 : dst.first()->as_Register();
5326 
5327   // See if oop is NULL if it is we need no handle
5328 
5329   if (src.first()->is_stack()) {
5330 
5331     // Oop is already on the stack as an argument
5332     int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
5333     map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots));
5334     if (is_receiver) {
5335       *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size;
5336     }
5337 
5338     ldr(rscratch1, Address(rfp, reg2offset_in(src.first())));
5339     lea(rHandle, Address(rfp, reg2offset_in(src.first())));
5340     // conditionally move a NULL
5341     cmp(rscratch1, zr);
5342     csel(rHandle, zr, rHandle, Assembler::EQ);
5343   } else {
5344 
5345     // Oop is in an a register we must store it to the space we reserve
5346     // on the stack for oop_handles and pass a handle if oop is non-NULL
5347 
5348     const Register rOop = src.first()->as_Register();
5349     int oop_slot;
5350     if (rOop == j_rarg0)
5351       oop_slot = 0;
5352     else if (rOop == j_rarg1)
5353       oop_slot = 1;
5354     else if (rOop == j_rarg2)
5355       oop_slot = 2;
5356     else if (rOop == j_rarg3)
5357       oop_slot = 3;
5358     else if (rOop == j_rarg4)
5359       oop_slot = 4;
5360     else if (rOop == j_rarg5)
5361       oop_slot = 5;
5362     else if (rOop == j_rarg6)
5363       oop_slot = 6;
5364     else {
5365       assert(rOop == j_rarg7, "wrong register");
5366       oop_slot = 7;
5367     }
5368 
5369     oop_slot = oop_slot * VMRegImpl::slots_per_word + oop_handle_offset;
5370     int offset = oop_slot*VMRegImpl::stack_slot_size;
5371 
5372     map->set_oop(VMRegImpl::stack2reg(oop_slot));
5373     // Store oop in handle area, may be NULL
5374     str(rOop, Address(sp, offset));
5375     if (is_receiver) {
5376       *receiver_offset = offset;
5377     }
5378 
5379     cmp(rOop, zr);
5380     lea(rHandle, Address(sp, offset));
5381     // conditionally move a NULL
5382     csel(rHandle, zr, rHandle, Assembler::EQ);
5383   }
5384 
5385   // If arg is on the stack then place it otherwise it is already in correct reg.
5386   if (dst.first()->is_stack()) {
5387     str(rHandle, Address(sp, reg2offset_out(dst.first())));
5388   }
5389 }
5390 
5391 // A float arg may have to do float reg int reg conversion
5392 void MacroAssembler::float_move(VMRegPair src, VMRegPair dst, Register tmp) {
5393  if (src.first()->is_stack()) {
5394     if (dst.first()->is_stack()) {
5395       ldrw(tmp, Address(rfp, reg2offset_in(src.first())));
5396       strw(tmp, Address(sp, reg2offset_out(dst.first())));
5397     } else {
5398       ldrs(dst.first()->as_FloatRegister(), Address(rfp, reg2offset_in(src.first())));
5399     }
5400   } else if (src.first() != dst.first()) {
5401     if (src.is_single_phys_reg() && dst.is_single_phys_reg())
5402       fmovs(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister());
5403     else
5404       strs(src.first()->as_FloatRegister(), Address(sp, reg2offset_out(dst.first())));
5405   }
5406 }
5407 
5408 // A long move
5409 void MacroAssembler::long_move(VMRegPair src, VMRegPair dst, Register tmp) {
5410   if (src.first()->is_stack()) {
5411     if (dst.first()->is_stack()) {
5412       // stack to stack
5413       ldr(tmp, Address(rfp, reg2offset_in(src.first())));
5414       str(tmp, Address(sp, reg2offset_out(dst.first())));
5415     } else {
5416       // stack to reg
5417       ldr(dst.first()->as_Register(), Address(rfp, reg2offset_in(src.first())));
5418     }
5419   } else if (dst.first()->is_stack()) {
5420     // reg to stack
5421     // Do we really have to sign extend???
5422     // __ movslq(src.first()->as_Register(), src.first()->as_Register());
5423     str(src.first()->as_Register(), Address(sp, reg2offset_out(dst.first())));
5424   } else {
5425     if (dst.first() != src.first()) {
5426       mov(dst.first()->as_Register(), src.first()->as_Register());
5427     }
5428   }
5429 }
5430 
5431 
5432 // A double move
5433 void MacroAssembler::double_move(VMRegPair src, VMRegPair dst, Register tmp) {
5434   if (src.first()->is_stack()) {
5435     if (dst.first()->is_stack()) {
5436       ldr(tmp, Address(rfp, reg2offset_in(src.first())));
5437       str(tmp, Address(sp, reg2offset_out(dst.first())));
5438     } else {
5439       ldrd(dst.first()->as_FloatRegister(), Address(rfp, reg2offset_in(src.first())));
5440     }
5441   } else if (src.first() != dst.first()) {
5442     if (src.is_single_phys_reg() && dst.is_single_phys_reg())
5443       fmovd(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister());
5444     else
5445       strd(src.first()->as_FloatRegister(), Address(sp, reg2offset_out(dst.first())));
5446   }
5447 }
< prev index next >