< prev index next >

src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp

Print this page




  29 #include "jvm.h"
  30 #include "asm/assembler.hpp"
  31 #include "asm/assembler.inline.hpp"
  32 #include "gc/shared/barrierSet.hpp"
  33 #include "gc/shared/cardTable.hpp"
  34 #include "gc/shared/barrierSetAssembler.hpp"
  35 #include "gc/shared/cardTableBarrierSet.hpp"
  36 #include "interpreter/interpreter.hpp"
  37 #include "compiler/disassembler.hpp"
  38 #include "memory/resourceArea.hpp"
  39 #include "memory/universe.hpp"
  40 #include "nativeInst_aarch64.hpp"
  41 #include "oops/accessDecorators.hpp"
  42 #include "oops/compressedOops.inline.hpp"
  43 #include "oops/klass.inline.hpp"
  44 #include "runtime/biasedLocking.hpp"
  45 #include "runtime/icache.hpp"
  46 #include "runtime/interfaceSupport.inline.hpp"
  47 #include "runtime/jniHandles.inline.hpp"
  48 #include "runtime/sharedRuntime.hpp"

  49 #include "runtime/thread.hpp"
  50 #ifdef COMPILER1
  51 #include "c1/c1_LIRAssembler.hpp"
  52 #endif
  53 #ifdef COMPILER2
  54 #include "oops/oop.hpp"
  55 #include "opto/compile.hpp"
  56 #include "opto/intrinsicnode.hpp"
  57 #include "opto/node.hpp"
  58 #endif
  59 
  60 #ifdef PRODUCT
  61 #define BLOCK_COMMENT(str) /* nothing */
  62 #define STOP(error) stop(error)
  63 #else
  64 #define BLOCK_COMMENT(str) block_comment(str)
  65 #define STOP(error) block_comment(error); stop(error)
  66 #endif
  67 
  68 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")


1292 
1293   // Unspill the temp. registers:
1294   pop(pushed_registers, sp);
1295 
1296   br(Assembler::NE, *L_failure);
1297 
1298   // Success.  Cache the super we found and proceed in triumph.
1299   str(super_klass, super_cache_addr);
1300 
1301   if (L_success != &L_fallthrough) {
1302     b(*L_success);
1303   }
1304 
1305 #undef IS_A_TEMP
1306 
1307   bind(L_fallthrough);
1308 }
1309 
1310 
1311 void MacroAssembler::verify_oop(Register reg, const char* s) {
1312   if (!VerifyOops) return;




1313 
1314   // Pass register number to verify_oop_subroutine
1315   const char* b = NULL;
1316   {
1317     ResourceMark rm;
1318     stringStream ss;
1319     ss.print("verify_oop: %s: %s", reg->name(), s);
1320     b = code_string(ss.as_string());
1321   }
1322   BLOCK_COMMENT("verify_oop {");
1323 
1324   stp(r0, rscratch1, Address(pre(sp, -2 * wordSize)));
1325   stp(rscratch2, lr, Address(pre(sp, -2 * wordSize)));
1326 
1327   mov(r0, reg);
1328   mov(rscratch1, (address)b);
1329 
1330   // call indirectly to solve generation ordering problem
1331   lea(rscratch2, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address()));
1332   ldr(rscratch2, Address(rscratch2));
1333   blr(rscratch2);
1334 
1335   ldp(rscratch2, lr, Address(post(sp, 2 * wordSize)));
1336   ldp(r0, rscratch1, Address(post(sp, 2 * wordSize)));
1337 
1338   BLOCK_COMMENT("} verify_oop");
1339 }
1340 
1341 void MacroAssembler::verify_oop_addr(Address addr, const char* s) {
1342   if (!VerifyOops) return;




1343 
1344   const char* b = NULL;
1345   {
1346     ResourceMark rm;
1347     stringStream ss;
1348     ss.print("verify_oop_addr: %s", s);
1349     b = code_string(ss.as_string());
1350   }
1351   BLOCK_COMMENT("verify_oop_addr {");
1352 
1353   stp(r0, rscratch1, Address(pre(sp, -2 * wordSize)));
1354   stp(rscratch2, lr, Address(pre(sp, -2 * wordSize)));
1355 
1356   // addr may contain sp so we will have to adjust it based on the
1357   // pushes that we just did.
1358   if (addr.uses(sp)) {
1359     lea(r0, addr);
1360     ldr(r0, Address(r0, 4 * wordSize));
1361   } else {
1362     ldr(r0, addr);


1425 
1426 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0) {
1427   pass_arg0(this, arg_0);
1428   call_VM_leaf_base(entry_point, 1);
1429 }
1430 
1431 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
1432   pass_arg0(this, arg_0);
1433   pass_arg1(this, arg_1);
1434   call_VM_leaf_base(entry_point, 2);
1435 }
1436 
1437 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0,
1438                                   Register arg_1, Register arg_2) {
1439   pass_arg0(this, arg_0);
1440   pass_arg1(this, arg_1);
1441   pass_arg2(this, arg_2);
1442   call_VM_leaf_base(entry_point, 3);
1443 }
1444 




1445 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) {
1446   pass_arg0(this, arg_0);
1447   MacroAssembler::call_VM_leaf_base(entry_point, 1);
1448 }
1449 
1450 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
1451 
1452   assert(arg_0 != c_rarg1, "smashed arg");
1453   pass_arg1(this, arg_1);
1454   pass_arg0(this, arg_0);
1455   MacroAssembler::call_VM_leaf_base(entry_point, 2);
1456 }
1457 
1458 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
1459   assert(arg_0 != c_rarg2, "smashed arg");
1460   assert(arg_1 != c_rarg2, "smashed arg");
1461   pass_arg2(this, arg_2);
1462   assert(arg_0 != c_rarg1, "smashed arg");
1463   pass_arg1(this, arg_1);
1464   pass_arg0(this, arg_0);


1474   assert(arg_1 != c_rarg2, "smashed arg");
1475   pass_arg2(this, arg_2);
1476   assert(arg_0 != c_rarg1, "smashed arg");
1477   pass_arg1(this, arg_1);
1478   pass_arg0(this, arg_0);
1479   MacroAssembler::call_VM_leaf_base(entry_point, 4);
1480 }
1481 
1482 void MacroAssembler::null_check(Register reg, int offset) {
1483   if (needs_explicit_null_check(offset)) {
1484     // provoke OS NULL exception if reg = NULL by
1485     // accessing M[reg] w/o changing any registers
1486     // NOTE: this is plenty to provoke a segv
1487     ldr(zr, Address(reg));
1488   } else {
1489     // nothing to do, (later) access of M[reg + offset]
1490     // will provoke OS NULL exception if reg = NULL
1491   }
1492 }
1493 

































1494 // MacroAssembler protected routines needed to implement
1495 // public methods
1496 
1497 void MacroAssembler::mov(Register r, Address dest) {
1498   code_section()->relocate(pc(), dest.rspec());
1499   u_int64_t imm64 = (u_int64_t)dest.target();
1500   movptr(r, imm64);
1501 }
1502 
1503 // Move a constant pointer into r.  In AArch64 mode the virtual
1504 // address space is 48 bits in size, so we only need three
1505 // instructions to create a patchable instruction sequence that can
1506 // reach anywhere.
1507 void MacroAssembler::movptr(Register r, uintptr_t imm64) {
1508 #ifndef PRODUCT
1509   {
1510     char buffer[64];
1511     snprintf(buffer, sizeof(buffer), "0x%" PRIX64, imm64);
1512     block_comment(buffer);
1513   }


3666     adr = Address(rscratch2);
3667     break;
3668   }
3669   ldr(rscratch1, adr);
3670   add(rscratch1, rscratch1, src);
3671   str(rscratch1, adr);
3672 }
3673 
3674 void MacroAssembler::cmpptr(Register src1, Address src2) {
3675   unsigned long offset;
3676   adrp(rscratch1, src2, offset);
3677   ldr(rscratch1, Address(rscratch1, offset));
3678   cmp(src1, rscratch1);
3679 }
3680 
3681 void MacroAssembler::cmpoop(Register obj1, Register obj2) {
3682   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
3683   bs->obj_equals(this, obj1, obj2);
3684 }
3685 
3686 void MacroAssembler::load_klass(Register dst, Register src) {
3687   if (UseCompressedClassPointers) {
3688     ldrw(dst, Address(src, oopDesc::klass_offset_in_bytes()));
3689     decode_klass_not_null(dst);
3690   } else {
3691     ldr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
3692   }
3693 }
3694 










3695 // ((OopHandle)result).resolve();
3696 void MacroAssembler::resolve_oop_handle(Register result, Register tmp) {
3697   // OopHandle::resolve is an indirection.
3698   access_load_at(T_OBJECT, IN_NATIVE, result, Address(result, 0), tmp, noreg);
3699 }
3700 
3701 void MacroAssembler::load_mirror(Register dst, Register method, Register tmp) {
3702   const int mirror_offset = in_bytes(Klass::java_mirror_offset());
3703   ldr(dst, Address(rmethod, Method::const_offset()));
3704   ldr(dst, Address(dst, ConstMethod::constants_offset()));
3705   ldr(dst, Address(dst, ConstantPool::pool_holder_offset_in_bytes()));
3706   ldr(dst, Address(dst, mirror_offset));
3707   resolve_oop_handle(dst, tmp);
3708 }
3709 









3710 void MacroAssembler::cmp_klass(Register oop, Register trial_klass, Register tmp) {
3711   if (UseCompressedClassPointers) {
3712     ldrw(tmp, Address(oop, oopDesc::klass_offset_in_bytes()));
3713     if (CompressedKlassPointers::base() == NULL) {
3714       cmp(trial_klass, tmp, LSL, CompressedKlassPointers::shift());
3715       return;
3716     } else if (((uint64_t)CompressedKlassPointers::base() & 0xffffffff) == 0
3717                && CompressedKlassPointers::shift() == 0) {
3718       // Only the bottom 32 bits matter
3719       cmpw(trial_klass, tmp);
3720       return;
3721     }
3722     decode_klass_not_null(tmp);
3723   } else {
3724     ldr(tmp, Address(oop, oopDesc::klass_offset_in_bytes()));
3725   }
3726   cmp(trial_klass, tmp);
3727 }
3728 
3729 void MacroAssembler::load_prototype_header(Register dst, Register src) {


4007   narrowKlass nk = CompressedKlassPointers::encode(k);
4008   movz(dst, (nk >> 16), 16);
4009   movk(dst, nk & 0xffff);
4010 }
4011 
4012 void MacroAssembler::access_load_at(BasicType type, DecoratorSet decorators,
4013                                     Register dst, Address src,
4014                                     Register tmp1, Register thread_tmp) {
4015   BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
4016   decorators = AccessInternal::decorator_fixup(decorators);
4017   bool as_raw = (decorators & AS_RAW) != 0;
4018   if (as_raw) {
4019     bs->BarrierSetAssembler::load_at(this, decorators, type, dst, src, tmp1, thread_tmp);
4020   } else {
4021     bs->load_at(this, decorators, type, dst, src, tmp1, thread_tmp);
4022   }
4023 }
4024 
4025 void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators,
4026                                      Address dst, Register src,
4027                                      Register tmp1, Register thread_tmp) {

4028   BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
4029   decorators = AccessInternal::decorator_fixup(decorators);
4030   bool as_raw = (decorators & AS_RAW) != 0;
4031   if (as_raw) {
4032     bs->BarrierSetAssembler::store_at(this, decorators, type, dst, src, tmp1, thread_tmp);
4033   } else {
4034     bs->store_at(this, decorators, type, dst, src, tmp1, thread_tmp);
4035   }
4036 }
4037 
4038 void MacroAssembler::resolve(DecoratorSet decorators, Register obj) {
4039   // Use stronger ACCESS_WRITE|ACCESS_READ by default.
4040   if ((decorators & (ACCESS_READ | ACCESS_WRITE)) == 0) {
4041     decorators |= ACCESS_READ | ACCESS_WRITE;
4042   }
4043   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
4044   return bs->resolve(this, decorators, obj);
4045 }
4046 
4047 void MacroAssembler::load_heap_oop(Register dst, Address src, Register tmp1,
4048                                    Register thread_tmp, DecoratorSet decorators) {
4049   access_load_at(T_OBJECT, IN_HEAP | decorators, dst, src, tmp1, thread_tmp);
4050 }
4051 
4052 void MacroAssembler::load_heap_oop_not_null(Register dst, Address src, Register tmp1,
4053                                             Register thread_tmp, DecoratorSet decorators) {
4054   access_load_at(T_OBJECT, IN_HEAP | IS_NOT_NULL | decorators, dst, src, tmp1, thread_tmp);
4055 }
4056 
4057 void MacroAssembler::store_heap_oop(Address dst, Register src, Register tmp1,
4058                                     Register thread_tmp, DecoratorSet decorators) {
4059   access_store_at(T_OBJECT, IN_HEAP | decorators, dst, src, tmp1, thread_tmp);
4060 }
4061 
4062 // Used for storing NULLs.
4063 void MacroAssembler::store_heap_oop_null(Address dst) {
4064   access_store_at(T_OBJECT, IN_HEAP, dst, noreg, noreg, noreg);
4065 }
4066 
4067 Address MacroAssembler::allocate_metadata_address(Metadata* obj) {
4068   assert(oop_recorder() != NULL, "this assembler needs a Recorder");
4069   int index = oop_recorder()->allocate_metadata_index(obj);
4070   RelocationHolder rspec = metadata_Relocation::spec(index);
4071   return Address((address)obj, rspec);
4072 }
4073 
4074 // Move an oop into a register.  immediate is true if we want
4075 // immediate instrcutions, i.e. we are not going to patch this
4076 // instruction while the code is being executed by another thread.  In
4077 // that case we can use move immediates rather than the constant pool.
4078 void MacroAssembler::movoop(Register dst, jobject obj, bool immediate) {
4079   int oop_index;
4080   if (obj == NULL) {
4081     oop_index = oop_recorder()->allocate_oop_index(obj);
4082   } else {
4083 #ifdef ASSERT
4084     {


5847 }
5848 
5849 // get_thread() can be called anywhere inside generated code so we
5850 // need to save whatever non-callee save context might get clobbered
5851 // by the call to JavaThread::aarch64_get_thread_helper() or, indeed,
5852 // the call setup code.
5853 //
5854 // aarch64_get_thread_helper() clobbers only r0, r1, and flags.
5855 //
5856 void MacroAssembler::get_thread(Register dst) {
5857   RegSet saved_regs = RegSet::range(r0, r1) + lr - dst;
5858   push(saved_regs, sp);
5859 
5860   mov(lr, CAST_FROM_FN_PTR(address, JavaThread::aarch64_get_thread_helper));
5861   blrt(lr, 1, 0, 1);
5862   if (dst != c_rarg0) {
5863     mov(dst, c_rarg0);
5864   }
5865 
5866   pop(saved_regs, sp);










































































































































































































































































































































































































5867 }


  29 #include "jvm.h"
  30 #include "asm/assembler.hpp"
  31 #include "asm/assembler.inline.hpp"
  32 #include "gc/shared/barrierSet.hpp"
  33 #include "gc/shared/cardTable.hpp"
  34 #include "gc/shared/barrierSetAssembler.hpp"
  35 #include "gc/shared/cardTableBarrierSet.hpp"
  36 #include "interpreter/interpreter.hpp"
  37 #include "compiler/disassembler.hpp"
  38 #include "memory/resourceArea.hpp"
  39 #include "memory/universe.hpp"
  40 #include "nativeInst_aarch64.hpp"
  41 #include "oops/accessDecorators.hpp"
  42 #include "oops/compressedOops.inline.hpp"
  43 #include "oops/klass.inline.hpp"
  44 #include "runtime/biasedLocking.hpp"
  45 #include "runtime/icache.hpp"
  46 #include "runtime/interfaceSupport.inline.hpp"
  47 #include "runtime/jniHandles.inline.hpp"
  48 #include "runtime/sharedRuntime.hpp"
  49 #include "runtime/signature_cc.hpp"
  50 #include "runtime/thread.hpp"
  51 #ifdef COMPILER1
  52 #include "c1/c1_LIRAssembler.hpp"
  53 #endif
  54 #ifdef COMPILER2
  55 #include "oops/oop.hpp"
  56 #include "opto/compile.hpp"
  57 #include "opto/intrinsicnode.hpp"
  58 #include "opto/node.hpp"
  59 #endif
  60 
  61 #ifdef PRODUCT
  62 #define BLOCK_COMMENT(str) /* nothing */
  63 #define STOP(error) stop(error)
  64 #else
  65 #define BLOCK_COMMENT(str) block_comment(str)
  66 #define STOP(error) block_comment(error); stop(error)
  67 #endif
  68 
  69 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")


1293 
1294   // Unspill the temp. registers:
1295   pop(pushed_registers, sp);
1296 
1297   br(Assembler::NE, *L_failure);
1298 
1299   // Success.  Cache the super we found and proceed in triumph.
1300   str(super_klass, super_cache_addr);
1301 
1302   if (L_success != &L_fallthrough) {
1303     b(*L_success);
1304   }
1305 
1306 #undef IS_A_TEMP
1307 
1308   bind(L_fallthrough);
1309 }
1310 
1311 
1312 void MacroAssembler::verify_oop(Register reg, const char* s) {
1313   if (!VerifyOops || VerifyAdapterSharing) {
1314     // Below address of the code string confuses VerifyAdapterSharing
1315     // because it may differ between otherwise equivalent adapters.
1316     return;
1317   }
1318 
1319   // Pass register number to verify_oop_subroutine
1320   const char* b = NULL;
1321   {
1322     ResourceMark rm;
1323     stringStream ss;
1324     ss.print("verify_oop: %s: %s", reg->name(), s);
1325     b = code_string(ss.as_string());
1326   }
1327   BLOCK_COMMENT("verify_oop {");
1328 
1329   stp(r0, rscratch1, Address(pre(sp, -2 * wordSize)));
1330   stp(rscratch2, lr, Address(pre(sp, -2 * wordSize)));
1331 
1332   mov(r0, reg);
1333   mov(rscratch1, (address)b);
1334 
1335   // call indirectly to solve generation ordering problem
1336   lea(rscratch2, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address()));
1337   ldr(rscratch2, Address(rscratch2));
1338   blr(rscratch2);
1339 
1340   ldp(rscratch2, lr, Address(post(sp, 2 * wordSize)));
1341   ldp(r0, rscratch1, Address(post(sp, 2 * wordSize)));
1342 
1343   BLOCK_COMMENT("} verify_oop");
1344 }
1345 
1346 void MacroAssembler::verify_oop_addr(Address addr, const char* s) {
1347   if (!VerifyOops || VerifyAdapterSharing) {
1348     // Below address of the code string confuses VerifyAdapterSharing
1349     // because it may differ between otherwise equivalent adapters.
1350     return;
1351   }
1352 
1353   const char* b = NULL;
1354   {
1355     ResourceMark rm;
1356     stringStream ss;
1357     ss.print("verify_oop_addr: %s", s);
1358     b = code_string(ss.as_string());
1359   }
1360   BLOCK_COMMENT("verify_oop_addr {");
1361 
1362   stp(r0, rscratch1, Address(pre(sp, -2 * wordSize)));
1363   stp(rscratch2, lr, Address(pre(sp, -2 * wordSize)));
1364 
1365   // addr may contain sp so we will have to adjust it based on the
1366   // pushes that we just did.
1367   if (addr.uses(sp)) {
1368     lea(r0, addr);
1369     ldr(r0, Address(r0, 4 * wordSize));
1370   } else {
1371     ldr(r0, addr);


1434 
1435 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0) {
1436   pass_arg0(this, arg_0);
1437   call_VM_leaf_base(entry_point, 1);
1438 }
1439 
1440 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
1441   pass_arg0(this, arg_0);
1442   pass_arg1(this, arg_1);
1443   call_VM_leaf_base(entry_point, 2);
1444 }
1445 
1446 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0,
1447                                   Register arg_1, Register arg_2) {
1448   pass_arg0(this, arg_0);
1449   pass_arg1(this, arg_1);
1450   pass_arg2(this, arg_2);
1451   call_VM_leaf_base(entry_point, 3);
1452 }
1453 
1454 void MacroAssembler::super_call_VM_leaf(address entry_point) {
1455   MacroAssembler::call_VM_leaf_base(entry_point, 1);
1456 }
1457 
1458 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) {
1459   pass_arg0(this, arg_0);
1460   MacroAssembler::call_VM_leaf_base(entry_point, 1);
1461 }
1462 
1463 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
1464 
1465   assert(arg_0 != c_rarg1, "smashed arg");
1466   pass_arg1(this, arg_1);
1467   pass_arg0(this, arg_0);
1468   MacroAssembler::call_VM_leaf_base(entry_point, 2);
1469 }
1470 
1471 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
1472   assert(arg_0 != c_rarg2, "smashed arg");
1473   assert(arg_1 != c_rarg2, "smashed arg");
1474   pass_arg2(this, arg_2);
1475   assert(arg_0 != c_rarg1, "smashed arg");
1476   pass_arg1(this, arg_1);
1477   pass_arg0(this, arg_0);


1487   assert(arg_1 != c_rarg2, "smashed arg");
1488   pass_arg2(this, arg_2);
1489   assert(arg_0 != c_rarg1, "smashed arg");
1490   pass_arg1(this, arg_1);
1491   pass_arg0(this, arg_0);
1492   MacroAssembler::call_VM_leaf_base(entry_point, 4);
1493 }
1494 
1495 void MacroAssembler::null_check(Register reg, int offset) {
1496   if (needs_explicit_null_check(offset)) {
1497     // provoke OS NULL exception if reg = NULL by
1498     // accessing M[reg] w/o changing any registers
1499     // NOTE: this is plenty to provoke a segv
1500     ldr(zr, Address(reg));
1501   } else {
1502     // nothing to do, (later) access of M[reg + offset]
1503     // will provoke OS NULL exception if reg = NULL
1504   }
1505 }
1506 
1507 void MacroAssembler::test_klass_is_value(Register klass, Register temp_reg, Label& is_value) {
1508   ldrw(temp_reg, Address(klass, Klass::access_flags_offset()));
1509   andr(temp_reg, temp_reg, JVM_ACC_VALUE);
1510   cbnz(temp_reg, is_value); 
1511 }
1512 
1513 void MacroAssembler::test_field_is_flattenable(Register flags, Register temp_reg, Label& is_flattenable) {
1514   (void) temp_reg; // keep signature uniform with x86
1515   tbnz(flags, ConstantPoolCacheEntry::is_flattenable_field_shift, is_flattenable);
1516 }
1517 
1518 void MacroAssembler::test_field_is_not_flattenable(Register flags, Register temp_reg, Label& not_flattenable) {
1519   (void) temp_reg; // keep signature uniform with x86
1520   tbz(flags, ConstantPoolCacheEntry::is_flattenable_field_shift, not_flattenable);
1521 }
1522 
1523 void MacroAssembler::test_field_is_flattened(Register flags, Register temp_reg, Label& is_flattened) {
1524   (void) temp_reg; // keep signature uniform with x86
1525   tbnz(flags, ConstantPoolCacheEntry::is_flattened_field_shift, is_flattened);
1526 }
1527 
1528 void MacroAssembler::test_flattened_array_oop(Register oop, Register temp_reg, Label& is_flattened_array) {
1529   load_storage_props(temp_reg, oop);
1530   andr(temp_reg, temp_reg, ArrayStorageProperties::flattened_value);
1531   cbnz(temp_reg, is_flattened_array);
1532 }
1533 
1534 void MacroAssembler::test_null_free_array_oop(Register oop, Register temp_reg, Label& is_null_free_array) {
1535   load_storage_props(temp_reg, oop);
1536   andr(temp_reg, temp_reg, ArrayStorageProperties::null_free_value);
1537   cbnz(temp_reg, is_null_free_array);
1538 }
1539 
1540 // MacroAssembler protected routines needed to implement
1541 // public methods
1542 
1543 void MacroAssembler::mov(Register r, Address dest) {
1544   code_section()->relocate(pc(), dest.rspec());
1545   u_int64_t imm64 = (u_int64_t)dest.target();
1546   movptr(r, imm64);
1547 }
1548 
1549 // Move a constant pointer into r.  In AArch64 mode the virtual
1550 // address space is 48 bits in size, so we only need three
1551 // instructions to create a patchable instruction sequence that can
1552 // reach anywhere.
1553 void MacroAssembler::movptr(Register r, uintptr_t imm64) {
1554 #ifndef PRODUCT
1555   {
1556     char buffer[64];
1557     snprintf(buffer, sizeof(buffer), "0x%" PRIX64, imm64);
1558     block_comment(buffer);
1559   }


3712     adr = Address(rscratch2);
3713     break;
3714   }
3715   ldr(rscratch1, adr);
3716   add(rscratch1, rscratch1, src);
3717   str(rscratch1, adr);
3718 }
3719 
3720 void MacroAssembler::cmpptr(Register src1, Address src2) {
3721   unsigned long offset;
3722   adrp(rscratch1, src2, offset);
3723   ldr(rscratch1, Address(rscratch1, offset));
3724   cmp(src1, rscratch1);
3725 }
3726 
3727 void MacroAssembler::cmpoop(Register obj1, Register obj2) {
3728   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
3729   bs->obj_equals(this, obj1, obj2);
3730 }
3731 
3732 void MacroAssembler::load_metadata(Register dst, Register src) {
3733   if (UseCompressedClassPointers) {
3734     ldrw(dst, Address(src, oopDesc::klass_offset_in_bytes()));

3735   } else {
3736     ldr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
3737   }
3738 }
3739 
3740 void MacroAssembler::load_klass(Register dst, Register src) {
3741   load_metadata(dst, src);
3742   if (UseCompressedClassPointers) {
3743     andr(dst, dst, oopDesc::compressed_klass_mask());
3744     decode_klass_not_null(dst);
3745   } else {
3746     ubfm(dst, dst, 0, 63 - oopDesc::storage_props_nof_bits);
3747   }
3748 }
3749 
3750 // ((OopHandle)result).resolve();
3751 void MacroAssembler::resolve_oop_handle(Register result, Register tmp) {
3752   // OopHandle::resolve is an indirection.
3753   access_load_at(T_OBJECT, IN_NATIVE, result, Address(result, 0), tmp, noreg);
3754 }
3755 
3756 void MacroAssembler::load_mirror(Register dst, Register method, Register tmp) {
3757   const int mirror_offset = in_bytes(Klass::java_mirror_offset());
3758   ldr(dst, Address(rmethod, Method::const_offset()));
3759   ldr(dst, Address(dst, ConstMethod::constants_offset()));
3760   ldr(dst, Address(dst, ConstantPool::pool_holder_offset_in_bytes()));
3761   ldr(dst, Address(dst, mirror_offset));
3762   resolve_oop_handle(dst, tmp);
3763 }
3764 
3765 void MacroAssembler::load_storage_props(Register dst, Register src) {
3766   load_metadata(dst, src);
3767   if (UseCompressedClassPointers) {
3768     asrw(dst, dst, oopDesc::narrow_storage_props_shift);
3769   } else {
3770     asr(dst, dst, oopDesc::wide_storage_props_shift);
3771   }
3772 }
3773 
3774 void MacroAssembler::cmp_klass(Register oop, Register trial_klass, Register tmp) {
3775   if (UseCompressedClassPointers) {
3776     ldrw(tmp, Address(oop, oopDesc::klass_offset_in_bytes()));
3777     if (CompressedKlassPointers::base() == NULL) {
3778       cmp(trial_klass, tmp, LSL, CompressedKlassPointers::shift());
3779       return;
3780     } else if (((uint64_t)CompressedKlassPointers::base() & 0xffffffff) == 0
3781                && CompressedKlassPointers::shift() == 0) {
3782       // Only the bottom 32 bits matter
3783       cmpw(trial_klass, tmp);
3784       return;
3785     }
3786     decode_klass_not_null(tmp);
3787   } else {
3788     ldr(tmp, Address(oop, oopDesc::klass_offset_in_bytes()));
3789   }
3790   cmp(trial_klass, tmp);
3791 }
3792 
3793 void MacroAssembler::load_prototype_header(Register dst, Register src) {


4071   narrowKlass nk = CompressedKlassPointers::encode(k);
4072   movz(dst, (nk >> 16), 16);
4073   movk(dst, nk & 0xffff);
4074 }
4075 
4076 void MacroAssembler::access_load_at(BasicType type, DecoratorSet decorators,
4077                                     Register dst, Address src,
4078                                     Register tmp1, Register thread_tmp) {
4079   BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
4080   decorators = AccessInternal::decorator_fixup(decorators);
4081   bool as_raw = (decorators & AS_RAW) != 0;
4082   if (as_raw) {
4083     bs->BarrierSetAssembler::load_at(this, decorators, type, dst, src, tmp1, thread_tmp);
4084   } else {
4085     bs->load_at(this, decorators, type, dst, src, tmp1, thread_tmp);
4086   }
4087 }
4088 
4089 void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators,
4090                                      Address dst, Register src,
4091                                      Register tmp1, Register thread_tmp, Register tmp3) {
4092 
4093   BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
4094   decorators = AccessInternal::decorator_fixup(decorators);
4095   bool as_raw = (decorators & AS_RAW) != 0;
4096   if (as_raw) {
4097     bs->BarrierSetAssembler::store_at(this, decorators, type, dst, src, tmp1, thread_tmp, tmp3);
4098   } else {
4099     bs->store_at(this, decorators, type, dst, src, tmp1, thread_tmp, tmp3);
4100   }
4101 }
4102 
4103 void MacroAssembler::resolve(DecoratorSet decorators, Register obj) {
4104   // Use stronger ACCESS_WRITE|ACCESS_READ by default.
4105   if ((decorators & (ACCESS_READ | ACCESS_WRITE)) == 0) {
4106     decorators |= ACCESS_READ | ACCESS_WRITE;
4107   }
4108   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
4109   return bs->resolve(this, decorators, obj);
4110 }
4111 
4112 void MacroAssembler::load_heap_oop(Register dst, Address src, Register tmp1,
4113                                    Register thread_tmp, DecoratorSet decorators) {
4114   access_load_at(T_OBJECT, IN_HEAP | decorators, dst, src, tmp1, thread_tmp);
4115 }
4116 
4117 void MacroAssembler::load_heap_oop_not_null(Register dst, Address src, Register tmp1,
4118                                             Register thread_tmp, DecoratorSet decorators) {
4119   access_load_at(T_OBJECT, IN_HEAP | IS_NOT_NULL | decorators, dst, src, tmp1, thread_tmp);
4120 }
4121 
4122 void MacroAssembler::store_heap_oop(Address dst, Register src, Register tmp1,
4123                                     Register thread_tmp, Register tmp3, DecoratorSet decorators) {
4124   access_store_at(T_OBJECT, IN_HEAP | decorators, dst, src, tmp1, thread_tmp, tmp3);
4125 }
4126 
4127 // Used for storing NULLs.
4128 void MacroAssembler::store_heap_oop_null(Address dst) {
4129   access_store_at(T_OBJECT, IN_HEAP, dst, noreg, noreg, noreg, noreg);
4130 }
4131 
4132 Address MacroAssembler::allocate_metadata_address(Metadata* obj) {
4133   assert(oop_recorder() != NULL, "this assembler needs a Recorder");
4134   int index = oop_recorder()->allocate_metadata_index(obj);
4135   RelocationHolder rspec = metadata_Relocation::spec(index);
4136   return Address((address)obj, rspec);
4137 }
4138 
4139 // Move an oop into a register.  immediate is true if we want
4140 // immediate instrcutions, i.e. we are not going to patch this
4141 // instruction while the code is being executed by another thread.  In
4142 // that case we can use move immediates rather than the constant pool.
4143 void MacroAssembler::movoop(Register dst, jobject obj, bool immediate) {
4144   int oop_index;
4145   if (obj == NULL) {
4146     oop_index = oop_recorder()->allocate_oop_index(obj);
4147   } else {
4148 #ifdef ASSERT
4149     {


5912 }
5913 
5914 // get_thread() can be called anywhere inside generated code so we
5915 // need to save whatever non-callee save context might get clobbered
5916 // by the call to JavaThread::aarch64_get_thread_helper() or, indeed,
5917 // the call setup code.
5918 //
5919 // aarch64_get_thread_helper() clobbers only r0, r1, and flags.
5920 //
5921 void MacroAssembler::get_thread(Register dst) {
5922   RegSet saved_regs = RegSet::range(r0, r1) + lr - dst;
5923   push(saved_regs, sp);
5924 
5925   mov(lr, CAST_FROM_FN_PTR(address, JavaThread::aarch64_get_thread_helper));
5926   blrt(lr, 1, 0, 1);
5927   if (dst != c_rarg0) {
5928     mov(dst, c_rarg0);
5929   }
5930 
5931   pop(saved_regs, sp);
5932 }
5933 
5934 // C2 compiled method's prolog code 
5935 // Moved here from aarch64.ad to support Valhalla code belows
5936 void MacroAssembler::verified_entry(Compile* C, int sp_inc) {
5937 
5938 // n.b. frame size includes space for return pc and rfp
5939   const long framesize = C->frame_size_in_bytes();
5940   assert(framesize % (2 * wordSize) == 0, "must preserve 2 * wordSize alignment");
5941 
5942   // insert a nop at the start of the prolog so we can patch in a
5943   // branch if we need to invalidate the method later
5944   nop();
5945 
5946   int bangsize = C->bang_size_in_bytes();
5947   if (C->need_stack_bang(bangsize) && UseStackBanging)
5948      generate_stack_overflow_check(bangsize);
5949 
5950   build_frame(framesize);
5951 
5952   if (NotifySimulator) {
5953     notify(Assembler::method_entry);
5954   }
5955 
5956   if (VerifyStackAtCalls) {
5957     Unimplemented();
5958   }
5959 }
5960 
5961 int MacroAssembler::store_value_type_fields_to_buf(ciValueKlass* vk, bool from_interpreter) {
5962   // A value type might be returned. If fields are in registers we
5963   // need to allocate a value type instance and initialize it with
5964   // the value of the fields.
5965   Label skip;
5966   // We only need a new buffered value if a new one is not returned
5967   cmp(r0, (u1) 1);
5968   br(Assembler::EQ, skip);
5969   int call_offset = -1;
5970 
5971   Label slow_case;
5972 
5973   // Try to allocate a new buffered value (from the heap)
5974   if (UseTLAB) {
5975 
5976     if (vk != NULL) {
5977       // Called from C1, where the return type is statically known.
5978       mov(r1, (intptr_t)vk->get_ValueKlass());
5979       jint lh = vk->layout_helper();
5980       assert(lh != Klass::_lh_neutral_value, "inline class in return type must have been resolved");
5981       mov(r14, lh);
5982     } else {
5983        // Call from interpreter. R0 contains ((the ValueKlass* of the return type) | 0x01)
5984        andr(r1, r0, -2);
5985        // get obj size
5986        ldrw(r14, Address(rscratch1 /*klass*/, Klass::layout_helper_offset()));
5987     }
5988 
5989      ldr(r13, Address(rthread, in_bytes(JavaThread::tlab_top_offset())));
5990  
5991      // check whether we have space in TLAB, 
5992      // rscratch1 contains pointer to just allocated obj
5993       lea(r14, Address(r13, r14)); 
5994       ldr(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_end_offset())));
5995 
5996       cmp(r14, rscratch1);
5997       br(Assembler::GT, slow_case);
5998 
5999       // OK we have room in TLAB, 
6000       // Set new TLAB top
6001       str(r14, Address(rthread, in_bytes(JavaThread::tlab_top_offset()))); 
6002 
6003       // Set new class always locked
6004       mov(rscratch1, (uint64_t) markOopDesc::always_locked_prototype());
6005       str(rscratch1, Address(r13, oopDesc::mark_offset_in_bytes()));
6006 
6007       store_klass_gap(r13, zr);  // zero klass gap for compressed oops
6008       if (vk == NULL) {
6009         // store_klass corrupts rbx, so save it in rax for later use (interpreter case only).
6010          mov(r0, r1);
6011       }
6012       
6013       store_klass(r13, r1);  // klass
6014 
6015       if (vk != NULL) {
6016         // FIXME -- do the packing in-line to avoid the runtime call
6017         mov(r0, r13);
6018         far_call(RuntimeAddress(vk->pack_handler())); // no need for call info as this will not safepoint.
6019       } else {
6020 
6021         // We have our new buffered value, initialize its fields with a
6022         // value class specific handler
6023         ldr(r1, Address(r0, InstanceKlass::adr_valueklass_fixed_block_offset()));
6024         ldr(r1, Address(r1, ValueKlass::pack_handler_offset()));
6025 
6026         // Mov new class to r0 and call pack_handler
6027         mov(r0, r13);
6028         blr(r1);
6029       }
6030       b(skip);
6031   }
6032 
6033   bind(slow_case);
6034   // We failed to allocate a new value, fall back to a runtime
6035   // call. Some oop field may be live in some registers but we can't
6036   // tell. That runtime call will take care of preserving them
6037   // across a GC if there's one.
6038 
6039 
6040   if (from_interpreter) {
6041     super_call_VM_leaf(StubRoutines::store_value_type_fields_to_buf());
6042   } else {
6043     ldr(rscratch1, RuntimeAddress(StubRoutines::store_value_type_fields_to_buf()));
6044     blr(rscratch1);
6045     call_offset = offset();
6046   }
6047 
6048   bind(skip);
6049   return call_offset;
6050 }
6051 
6052 // Move a value between registers/stack slots and update the reg_state
6053 bool MacroAssembler::move_helper(VMReg from, VMReg to, BasicType bt, RegState reg_state[], int ret_off, int extra_stack_offset) {
6054   if (reg_state[to->value()] == reg_written) {
6055     return true; // Already written
6056   }
6057 
6058   if (from != to && bt != T_VOID) {
6059     if (reg_state[to->value()] == reg_readonly) {
6060       return false; // Not yet writable
6061     }
6062     if (from->is_reg()) {
6063       if (to->is_reg()) {
6064         mov(to->as_Register(), from->as_Register());
6065       } else {
6066         int st_off = to->reg2stack() * VMRegImpl::stack_slot_size + extra_stack_offset;
6067         Address to_addr = Address(sp, st_off);
6068         if (from->is_FloatRegister()) {
6069           if (bt == T_DOUBLE) {
6070              strd(from->as_FloatRegister(), to_addr);
6071           } else {
6072              assert(bt == T_FLOAT, "must be float");
6073              strs(from->as_FloatRegister(), to_addr);
6074           }
6075         } else {
6076           str(from->as_Register(), to_addr); 
6077         }
6078       }
6079     } else {
6080       Address from_addr = Address(sp, from->reg2stack() * VMRegImpl::stack_slot_size + extra_stack_offset);
6081       if (to->is_reg()) {
6082         if (to->is_FloatRegister()) {
6083           if (bt == T_DOUBLE) {
6084              ldrd(to->as_FloatRegister(), from_addr);
6085           } else {
6086             assert(bt == T_FLOAT, "must be float");
6087             ldrs(to->as_FloatRegister(), from_addr);
6088           }
6089         } else {
6090           ldr(to->as_Register(), from_addr); 
6091         }
6092       } else {
6093         int st_off = to->reg2stack() * VMRegImpl::stack_slot_size + extra_stack_offset;
6094         ldr(rscratch1, from_addr); 
6095         str(rscratch1, Address(sp, st_off));
6096       }
6097     }
6098   }
6099 
6100   // Update register states
6101   reg_state[from->value()] = reg_writable;
6102   reg_state[to->value()] = reg_written;
6103   return true;
6104 }
6105 
6106 // Read all fields from a value type oop and store the values in registers/stack slots
6107 bool MacroAssembler::unpack_value_helper(const GrowableArray<SigEntry>* sig, int& sig_index, VMReg from, VMRegPair* regs_to,
6108                                          int& to_index, RegState reg_state[], int ret_off, int extra_stack_offset) {
6109   Register fromReg = from->is_reg() ? from->as_Register() : noreg;
6110   assert(sig->at(sig_index)._bt == T_VOID, "should be at end delimiter");
6111 
6112 
6113   int vt = 1;
6114   bool done = true;
6115   bool mark_done = true;
6116   do {
6117     sig_index--;
6118     BasicType bt = sig->at(sig_index)._bt;
6119     if (bt == T_VALUETYPE) {
6120       vt--;
6121     } else if (bt == T_VOID &&
6122                sig->at(sig_index-1)._bt != T_LONG &&
6123                sig->at(sig_index-1)._bt != T_DOUBLE) {
6124       vt++;
6125     } else if (SigEntry::is_reserved_entry(sig, sig_index)) {
6126       to_index--; // Ignore this
6127     } else {
6128       assert(to_index >= 0, "invalid to_index");
6129       VMRegPair pair_to = regs_to[to_index--];
6130       VMReg to = pair_to.first();
6131 
6132       if (bt == T_VOID) continue;
6133 
6134       int idx = (int) to->value();
6135       if (reg_state[idx] == reg_readonly) {
6136          if (idx != from->value()) {
6137            mark_done = false;
6138          }
6139          done = false;
6140          continue;
6141       } else if (reg_state[idx] == reg_written) {
6142         continue;
6143       } else {
6144         assert(reg_state[idx] == reg_writable, "must be writable");
6145         reg_state[idx] = reg_written;
6146       }
6147 
6148       if (fromReg == noreg) {
6149         int st_off = from->reg2stack() * VMRegImpl::stack_slot_size + extra_stack_offset;
6150         ldr(rscratch2, Address(sp, st_off)); 
6151         fromReg = rscratch2;
6152       }
6153 
6154       int off = sig->at(sig_index)._offset;
6155       assert(off > 0, "offset in object should be positive");
6156       bool is_oop = (bt == T_OBJECT || bt == T_ARRAY);
6157 
6158       Address fromAddr = Address(fromReg, off);
6159       bool is_signed = (bt != T_CHAR) && (bt != T_BOOLEAN);
6160 
6161       if (!to->is_FloatRegister()) {
6162 
6163         Register dst = to->is_stack() ? rscratch1 : to->as_Register();
6164 
6165         if (is_oop) {
6166           load_heap_oop(dst, fromAddr);
6167         } else {
6168           load_sized_value(dst, fromAddr, type2aelembytes(bt), is_signed);
6169         }
6170         if (to->is_stack()) {
6171           int st_off = to->reg2stack() * VMRegImpl::stack_slot_size + extra_stack_offset;
6172           str(dst, Address(sp, st_off));
6173         }
6174       } else {
6175         if (bt == T_DOUBLE) {
6176           ldrd(to->as_FloatRegister(), fromAddr);
6177         } else {
6178           assert(bt == T_FLOAT, "must be float");
6179           ldrs(to->as_FloatRegister(), fromAddr);
6180         }
6181      }
6182 
6183     }
6184 
6185   } while (vt != 0);
6186 
6187   if (mark_done && reg_state[from->value()] != reg_written) {
6188     // This is okay because no one else will write to that slot
6189     reg_state[from->value()] = reg_writable;
6190   }
6191   return done;
6192 }
6193 
6194 // Pack fields back into a value type oop
6195 bool MacroAssembler::pack_value_helper(const GrowableArray<SigEntry>* sig, int& sig_index, int vtarg_index,
6196                                        VMReg to, VMRegPair* regs_from, int regs_from_count, int& from_index, RegState reg_state[],
6197                                        int ret_off, int extra_stack_offset) {
6198   assert(sig->at(sig_index)._bt == T_VALUETYPE, "should be at end delimiter");
6199   assert(to->is_valid(), "must be");
6200 
6201   if (reg_state[to->value()] == reg_written) {
6202     skip_unpacked_fields(sig, sig_index, regs_from, regs_from_count, from_index);
6203     return true; // Already written
6204   }
6205 
6206   Register val_array = r0;
6207   Register val_obj_tmp = r11;
6208   Register from_reg_tmp = r10;
6209   Register tmp1 = r14;
6210   Register tmp2 = r13;
6211   Register tmp3 = r1;
6212   Register val_obj = to->is_stack() ? val_obj_tmp : to->as_Register();
6213 
6214   if (reg_state[to->value()] == reg_readonly) {
6215     if (!is_reg_in_unpacked_fields(sig, sig_index, to, regs_from, regs_from_count, from_index)) {
6216       skip_unpacked_fields(sig, sig_index, regs_from, regs_from_count, from_index);
6217       return false; // Not yet writable
6218     }
6219     val_obj = val_obj_tmp;
6220   }
6221 
6222   int index = arrayOopDesc::base_offset_in_bytes(T_OBJECT) + vtarg_index * type2aelembytes(T_VALUETYPE);
6223   load_heap_oop(val_obj, Address(val_array, index));
6224 
6225   ScalarizedValueArgsStream stream(sig, sig_index, regs_from, regs_from_count, from_index);
6226   VMRegPair from_pair;
6227   BasicType bt;
6228 
6229   while (stream.next(from_pair, bt)) {
6230     int off = sig->at(stream.sig_cc_index())._offset;
6231     assert(off > 0, "offset in object should be positive");
6232     bool is_oop = (bt == T_OBJECT || bt == T_ARRAY);
6233     size_t size_in_bytes = is_java_primitive(bt) ? type2aelembytes(bt) : wordSize;
6234 
6235     VMReg from_r1 = from_pair.first();
6236     VMReg from_r2 = from_pair.second();
6237 
6238     // Pack the scalarized field into the value object.
6239     Address dst(val_obj, off);
6240 
6241     if (!from_r1->is_FloatRegister()) {
6242       Register from_reg;
6243       if (from_r1->is_stack()) {
6244         from_reg = from_reg_tmp;
6245         int ld_off = from_r1->reg2stack() * VMRegImpl::stack_slot_size + extra_stack_offset;
6246         load_sized_value(from_reg, Address(sp, ld_off), size_in_bytes, /* is_signed */ false);
6247       } else {
6248         from_reg = from_r1->as_Register();
6249       }
6250 
6251       if (is_oop) {
6252         DecoratorSet decorators = IN_HEAP | ACCESS_WRITE;
6253         store_heap_oop(dst, from_reg, tmp1, tmp2, tmp3, decorators);
6254       } else {
6255         store_sized_value(dst, from_reg, size_in_bytes);
6256       }
6257     } else { 
6258       if (from_r2->is_valid()) {
6259         strd(from_r1->as_FloatRegister(), dst);
6260       } else {
6261         strs(from_r1->as_FloatRegister(), dst);
6262       }
6263     }
6264 
6265     reg_state[from_r1->value()] = reg_writable;
6266   }
6267   sig_index = stream.sig_cc_index();
6268   from_index = stream.regs_cc_index();
6269 
6270   assert(reg_state[to->value()] == reg_writable, "must have already been read");
6271   bool success = move_helper(val_obj->as_VMReg(), to, T_OBJECT, reg_state, ret_off, extra_stack_offset);
6272   assert(success, "to register must be writeable");
6273 
6274   return true;
6275 }
6276 
6277 // Unpack all value type arguments passed as oops
6278 void MacroAssembler::unpack_value_args(Compile* C, bool receiver_only) {
6279   int sp_inc = unpack_value_args_common(C, receiver_only);
6280   // Emit code for verified entry and save increment for stack repair on return
6281   verified_entry(C, sp_inc);
6282 }
6283 
6284 int MacroAssembler::shuffle_value_args(bool is_packing, bool receiver_only, int extra_stack_offset,
6285                                        BasicType* sig_bt, const GrowableArray<SigEntry>* sig_cc,
6286                                        int args_passed, int args_on_stack, VMRegPair* regs,            // from
6287                                        int args_passed_to, int args_on_stack_to, VMRegPair* regs_to) { // to
6288   // Check if we need to extend the stack for packing/unpacking
6289   int sp_inc = (args_on_stack_to - args_on_stack) * VMRegImpl::stack_slot_size;
6290   if (sp_inc > 0) {
6291     sp_inc = align_up(sp_inc, StackAlignmentInBytes);
6292     if (!is_packing) {
6293       // Save the return address, adjust the stack (make sure it is properly
6294       // 16-byte aligned) and copy the return address to the new top of the stack.
6295       // (Note: C1 does this in C1_MacroAssembler::scalarized_entry).
6296       // FIXME: We need not to preserve return address on aarch64
6297       pop(rscratch1);
6298       sub(sp, sp, sp_inc); 
6299       push(rscratch1);
6300     }
6301   } else {
6302     // The scalarized calling convention needs less stack space than the unscalarized one.
6303     // No need to extend the stack, the caller will take care of these adjustments.
6304     sp_inc = 0;
6305   }
6306 
6307   int ret_off; // make sure we don't overwrite the return address
6308   if (is_packing) {
6309     // For C1 code, the VVEP doesn't have reserved slots, so we store the returned address at
6310     // rsp[0] during shuffling.
6311     ret_off = 0;
6312   } else {
6313     // C2 code ensures that sp_inc is a reserved slot.
6314     ret_off = sp_inc;
6315   }
6316 
6317   return shuffle_value_args_common(is_packing, receiver_only, extra_stack_offset,
6318                                    sig_bt, sig_cc,
6319                                    args_passed, args_on_stack, regs,
6320                                    args_passed_to, args_on_stack_to, regs_to,
6321                                    sp_inc, ret_off);
6322 }
6323 
6324 VMReg MacroAssembler::spill_reg_for(VMReg reg) {
6325   return (reg->is_FloatRegister()) ? v0->as_VMReg() : r14->as_VMReg();
6326 }
< prev index next >