< prev index next >

src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp

Print this page

  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "asm/assembler.hpp"
  29 #include "c1/c1_CodeStubs.hpp"
  30 #include "c1/c1_Compilation.hpp"
  31 #include "c1/c1_LIRAssembler.hpp"
  32 #include "c1/c1_MacroAssembler.hpp"
  33 #include "c1/c1_Runtime1.hpp"
  34 #include "c1/c1_ValueStack.hpp"
  35 #include "ci/ciArrayKlass.hpp"

  36 #include "ci/ciInstance.hpp"
  37 #include "code/compiledIC.hpp"
  38 #include "gc/shared/collectedHeap.hpp"
  39 #include "gc/shared/gc_globals.hpp"
  40 #include "nativeInst_aarch64.hpp"
  41 #include "oops/objArrayKlass.hpp"

  42 #include "runtime/frame.inline.hpp"
  43 #include "runtime/sharedRuntime.hpp"
  44 #include "runtime/stubRoutines.hpp"
  45 #include "utilities/powerOfTwo.hpp"
  46 #include "vmreg_aarch64.inline.hpp"
  47 
  48 
  49 #ifndef PRODUCT
  50 #define COMMENT(x)   do { __ block_comment(x); } while (0)
  51 #else
  52 #define COMMENT(x)
  53 #endif
  54 
  55 NEEDS_CLEANUP // remove this definitions ?
  56 const Register IC_Klass    = rscratch2;   // where the IC klass is cached
  57 const Register SYNC_header = r0;   // synchronization header
  58 const Register SHIFT_count = r0;   // where count for shift operations must be
  59 
  60 #define __ _masm->
  61 

 438   MonitorExitStub* stub = NULL;
 439   if (method()->is_synchronized()) {
 440     monitor_address(0, FrameMap::r0_opr);
 441     stub = new MonitorExitStub(FrameMap::r0_opr, true, 0);
 442     __ unlock_object(r5, r4, r0, *stub->entry());
 443     __ bind(*stub->continuation());
 444   }
 445 
 446   if (compilation()->env()->dtrace_method_probes()) {
 447     __ mov(c_rarg0, rthread);
 448     __ mov_metadata(c_rarg1, method()->constant_encoding());
 449     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), c_rarg0, c_rarg1);
 450   }
 451 
 452   if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
 453     __ mov(r0, r19);  // Restore the exception
 454   }
 455 
 456   // remove the activation and dispatch to the unwind handler
 457   __ block_comment("remove_frame and dispatch to the unwind handler");
 458   __ remove_frame(initial_frame_size_in_bytes());
 459   __ far_jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));
 460 
 461   // Emit the slow path assembly
 462   if (stub != NULL) {
 463     stub->emit_code(this);
 464   }
 465 
 466   return offset;
 467 }
 468 
 469 
 470 int LIR_Assembler::emit_deopt_handler() {
 471   // if the last instruction is a call (typically to do a throw which
 472   // is coming at the end after block reordering) the return address
 473   // must still point into the code area in order to avoid assertion
 474   // failures when searching for the corresponding bci => add a nop
 475   // (was bug 5/14/1999 - gri)
 476   __ nop();
 477 
 478   // generate code for exception handler

 489   __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
 490   guarantee(code_offset() - offset <= deopt_handler_size(), "overflow");
 491   __ end_a_stub();
 492 
 493   return offset;
 494 }
 495 
 496 void LIR_Assembler::add_debug_info_for_branch(address adr, CodeEmitInfo* info) {
 497   _masm->code_section()->relocate(adr, relocInfo::poll_type);
 498   int pc_offset = code_offset();
 499   flush_debug_info(pc_offset);
 500   info->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
 501   if (info->exception_handlers() != NULL) {
 502     compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers());
 503   }
 504 }
 505 
 506 void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) {
 507   assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == r0, "word returns are in r0,");
 508 












 509   // Pop the stack before the safepoint code
 510   __ remove_frame(initial_frame_size_in_bytes());
 511 
 512   if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
 513     __ reserved_stack_check();
 514   }
 515 
 516   code_stub->set_safepoint_offset(__ offset());
 517   __ relocate(relocInfo::poll_return_type);
 518   __ safepoint_poll(*code_stub->entry(), true /* at_return */, false /* acquire */, true /* in_nmethod */);
 519   __ ret(lr);
 520 }
 521 




 522 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
 523   guarantee(info != NULL, "Shouldn't be NULL");
 524   __ get_polling_page(rscratch1, relocInfo::poll_type);
 525   add_debug_info_for_branch(info);  // This isn't just debug info:
 526                                     // it's the oop map
 527   __ read_polling_page(rscratch1, relocInfo::poll_type);
 528   return __ offset();
 529 }
 530 
 531 
 532 void LIR_Assembler::move_regs(Register from_reg, Register to_reg) {
 533   if (from_reg == r31_sp)
 534     from_reg = sp;
 535   if (to_reg == r31_sp)
 536     to_reg = sp;
 537   __ mov(to_reg, from_reg);
 538 }
 539 
 540 void LIR_Assembler::swap_reg(Register a, Register b) { Unimplemented(); }
 541 

 547 
 548   switch (c->type()) {
 549     case T_INT: {
 550       assert(patch_code == lir_patch_none, "no patching handled here");
 551       __ movw(dest->as_register(), c->as_jint());
 552       break;
 553     }
 554 
 555     case T_ADDRESS: {
 556       assert(patch_code == lir_patch_none, "no patching handled here");
 557       __ mov(dest->as_register(), c->as_jint());
 558       break;
 559     }
 560 
 561     case T_LONG: {
 562       assert(patch_code == lir_patch_none, "no patching handled here");
 563       __ mov(dest->as_register_lo(), (intptr_t)c->as_jlong());
 564       break;
 565     }
 566 

 567     case T_OBJECT: {
 568         if (patch_code == lir_patch_none) {
 569           jobject2reg(c->as_jobject(), dest->as_register());
 570         } else {
 571           jobject2reg_with_patching(dest->as_register(), info);


 572         }
 573       break;
 574     }
 575 
 576     case T_METADATA: {
 577       if (patch_code != lir_patch_none) {
 578         klass2reg_with_patching(dest->as_register(), info);
 579       } else {
 580         __ mov_metadata(dest->as_register(), c->as_metadata());
 581       }
 582       break;
 583     }
 584 
 585     case T_FLOAT: {
 586       if (__ operand_valid_for_float_immediate(c->as_jfloat())) {
 587         __ fmovs(dest->as_float_reg(), (c->as_jfloat()));
 588       } else {
 589         __ adr(rscratch1, InternalAddress(float_constant(c->as_jfloat())));
 590         __ ldrs(dest->as_float_reg(), Address(rscratch1));
 591       }

 593     }
 594 
 595     case T_DOUBLE: {
 596       if (__ operand_valid_for_float_immediate(c->as_jdouble())) {
 597         __ fmovd(dest->as_double_reg(), (c->as_jdouble()));
 598       } else {
 599         __ adr(rscratch1, InternalAddress(double_constant(c->as_jdouble())));
 600         __ ldrd(dest->as_double_reg(), Address(rscratch1));
 601       }
 602       break;
 603     }
 604 
 605     default:
 606       ShouldNotReachHere();
 607   }
 608 }
 609 
 610 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
 611   LIR_Const* c = src->as_constant_ptr();
 612   switch (c->type()) {

 613   case T_OBJECT:
 614     {
 615       if (! c->as_jobject())
 616         __ str(zr, frame_map()->address_for_slot(dest->single_stack_ix()));
 617       else {
 618         const2reg(src, FrameMap::rscratch1_opr, lir_patch_none, NULL);
 619         reg2stack(FrameMap::rscratch1_opr, dest, c->type(), false);
 620       }
 621     }
 622     break;
 623   case T_ADDRESS:
 624     {
 625       const2reg(src, FrameMap::rscratch1_opr, lir_patch_none, NULL);
 626       reg2stack(FrameMap::rscratch1_opr, dest, c->type(), false);
 627     }
 628   case T_INT:
 629   case T_FLOAT:
 630     {
 631       Register reg = zr;
 632       if (c->as_jint_bits() == 0)

 659 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) {
 660   assert(src->is_constant(), "should not call otherwise");
 661   LIR_Const* c = src->as_constant_ptr();
 662   LIR_Address* to_addr = dest->as_address_ptr();
 663 
 664   void (Assembler::* insn)(Register Rt, const Address &adr);
 665 
 666   switch (type) {
 667   case T_ADDRESS:
 668     assert(c->as_jint() == 0, "should be");
 669     insn = &Assembler::str;
 670     break;
 671   case T_LONG:
 672     assert(c->as_jlong() == 0, "should be");
 673     insn = &Assembler::str;
 674     break;
 675   case T_INT:
 676     assert(c->as_jint() == 0, "should be");
 677     insn = &Assembler::strw;
 678     break;

 679   case T_OBJECT:
 680   case T_ARRAY:


 681     assert(c->as_jobject() == 0, "should be");
 682     if (UseCompressedOops && !wide) {
 683       insn = &Assembler::strw;
 684     } else {
 685       insn = &Assembler::str;
 686     }
 687     break;
 688   case T_CHAR:
 689   case T_SHORT:
 690     assert(c->as_jint() == 0, "should be");
 691     insn = &Assembler::strh;
 692     break;
 693   case T_BOOLEAN:
 694   case T_BYTE:
 695     assert(c->as_jint() == 0, "should be");
 696     insn = &Assembler::strb;
 697     break;
 698   default:
 699     ShouldNotReachHere();
 700     insn = &Assembler::str;  // unreachable
 701   }
 702 
 703   if (info) add_debug_info_for_null_check_here(info);
 704   (_masm->*insn)(zr, as_Address(to_addr, rscratch1));
 705 }
 706 
 707 void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) {
 708   assert(src->is_register(), "should not call otherwise");
 709   assert(dest->is_register(), "should not call otherwise");
 710 
 711   // move between cpu-registers
 712   if (dest->is_single_cpu()) {
 713     if (src->type() == T_LONG) {
 714       // Can do LONG -> OBJECT
 715       move_regs(src->as_register_lo(), dest->as_register());
 716       return;
 717     }
 718     assert(src->is_single_cpu(), "must match");
 719     if (src->type() == T_OBJECT) {
 720       __ verify_oop(src->as_register());
 721     }
 722     move_regs(src->as_register(), dest->as_register());
 723 
 724   } else if (dest->is_double_cpu()) {
 725     if (is_reference_type(src->type())) {
 726       // Surprising to me but we can see move of a long to t_object
 727       __ verify_oop(src->as_register());
 728       move_regs(src->as_register(), dest->as_register_lo());
 729       return;
 730     }
 731     assert(src->is_double_cpu(), "must match");
 732     Register f_lo = src->as_register_lo();
 733     Register f_hi = src->as_register_hi();
 734     Register t_lo = dest->as_register_lo();
 735     Register t_hi = dest->as_register_hi();
 736     assert(f_hi == f_lo, "must be same");
 737     assert(t_hi == t_lo, "must be same");
 738     move_regs(f_lo, t_lo);
 739 

 799 
 800     if (UseCompressedOops && !wide) {
 801       __ encode_heap_oop(compressed_src, src->as_register());
 802     } else {
 803       compressed_src = src->as_register();
 804     }
 805   }
 806 
 807   int null_check_here = code_offset();
 808   switch (type) {
 809     case T_FLOAT: {
 810       __ strs(src->as_float_reg(), as_Address(to_addr));
 811       break;
 812     }
 813 
 814     case T_DOUBLE: {
 815       __ strd(src->as_double_reg(), as_Address(to_addr));
 816       break;
 817     }
 818 

 819     case T_ARRAY:   // fall through
 820     case T_OBJECT:  // fall through
 821       if (UseCompressedOops && !wide) {
 822         __ strw(compressed_src, as_Address(to_addr, rscratch2));
 823       } else {
 824          __ str(compressed_src, as_Address(to_addr));
 825       }
 826       break;
 827     case T_METADATA:
 828       // We get here to store a method pointer to the stack to pass to
 829       // a dtrace runtime call. This can't work on 64 bit with
 830       // compressed klass ptrs: T_METADATA can be a compressed klass
 831       // ptr or a 64 bit method pointer.
 832       ShouldNotReachHere();
 833       __ str(src->as_register(), as_Address(to_addr));
 834       break;
 835     case T_ADDRESS:
 836       __ str(src->as_register(), as_Address(to_addr));
 837       break;
 838     case T_INT:

 928   add_call_info_here(info);
 929 }
 930 
 931 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
 932 
 933   LIR_Opr temp;
 934   if (type == T_LONG || type == T_DOUBLE)
 935     temp = FrameMap::rscratch1_long_opr;
 936   else
 937     temp = FrameMap::rscratch1_opr;
 938 
 939   stack2reg(src, temp, src->type());
 940   reg2stack(temp, dest, dest->type(), false);
 941 }
 942 
 943 
 944 void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide) {
 945   LIR_Address* addr = src->as_address_ptr();
 946   LIR_Address* from_addr = src->as_address_ptr();
 947 
 948   if (addr->base()->type() == T_OBJECT) {
 949     __ verify_oop(addr->base()->as_pointer_register());
 950   }
 951 
 952   if (patch_code != lir_patch_none) {
 953     deoptimize_trap(info);
 954     return;
 955   }
 956 
 957   if (info != NULL) {
 958     add_debug_info_for_null_check_here(info);
 959   }
 960   int null_check_here = code_offset();
 961   switch (type) {
 962     case T_FLOAT: {
 963       __ ldrs(dest->as_float_reg(), as_Address(from_addr));
 964       break;
 965     }
 966 
 967     case T_DOUBLE: {
 968       __ ldrd(dest->as_double_reg(), as_Address(from_addr));
 969       break;
 970     }
 971 

 972     case T_ARRAY:   // fall through
 973     case T_OBJECT:  // fall through
 974       if (UseCompressedOops && !wide) {
 975         __ ldrw(dest->as_register(), as_Address(from_addr));
 976       } else {
 977          __ ldr(dest->as_register(), as_Address(from_addr));
 978       }
 979       break;
 980     case T_METADATA:
 981       // We get here to store a method pointer to the stack to pass to
 982       // a dtrace runtime call. This can't work on 64 bit with
 983       // compressed klass ptrs: T_METADATA can be a compressed klass
 984       // ptr or a 64 bit method pointer.
 985       ShouldNotReachHere();
 986       __ ldr(dest->as_register(), as_Address(from_addr));
 987       break;
 988     case T_ADDRESS:
 989       // FIXME: OMG this is a horrible kludge.  Any offset from an
 990       // address that matches klass_offset_in_bytes() will be loaded
 991       // as a word, not a long.

1022     default:
1023       ShouldNotReachHere();
1024   }
1025 
1026   if (is_reference_type(type)) {
1027     if (UseCompressedOops && !wide) {
1028       __ decode_heap_oop(dest->as_register());
1029     }
1030 
1031     if (!UseZGC) {
1032       // Load barrier has not yet been applied, so ZGC can't verify the oop here
1033       __ verify_oop(dest->as_register());
1034     }
1035   } else if (type == T_ADDRESS && addr->disp() == oopDesc::klass_offset_in_bytes()) {
1036     if (UseCompressedClassPointers) {
1037       __ decode_klass_not_null(dest->as_register());
1038     }
1039   }
1040 }
1041 














1042 
1043 int LIR_Assembler::array_element_size(BasicType type) const {
1044   int elem_size = type2aelembytes(type);
1045   return exact_log2(elem_size);
1046 }
1047 
1048 
1049 void LIR_Assembler::emit_op3(LIR_Op3* op) {
1050   switch (op->code()) {
1051   case lir_idiv:
1052   case lir_irem:
1053     arithmetic_idiv(op->code(),
1054                     op->in_opr1(),
1055                     op->in_opr2(),
1056                     op->in_opr3(),
1057                     op->result_opr(),
1058                     op->info());
1059     break;
1060   case lir_fmad:
1061     __ fmaddd(op->result_opr()->as_double_reg(),

1213     __ ldrb(rscratch1, Address(op->klass()->as_register(),
1214                                InstanceKlass::init_state_offset()));
1215     __ cmpw(rscratch1, InstanceKlass::fully_initialized);
1216     add_debug_info_for_null_check_here(op->stub()->info());
1217     __ br(Assembler::NE, *op->stub()->entry());
1218   }
1219   __ allocate_object(op->obj()->as_register(),
1220                      op->tmp1()->as_register(),
1221                      op->tmp2()->as_register(),
1222                      op->header_size(),
1223                      op->object_size(),
1224                      op->klass()->as_register(),
1225                      *op->stub()->entry());
1226   __ bind(*op->stub()->continuation());
1227 }
1228 
1229 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
1230   Register len =  op->len()->as_register();
1231   __ uxtw(len, len);
1232 
1233   if (UseSlowPath ||
1234       (!UseFastNewObjectArray && is_reference_type(op->type())) ||
1235       (!UseFastNewTypeArray   && !is_reference_type(op->type()))) {
1236     __ b(*op->stub()->entry());
1237   } else {
1238     Register tmp1 = op->tmp1()->as_register();
1239     Register tmp2 = op->tmp2()->as_register();
1240     Register tmp3 = op->tmp3()->as_register();
1241     if (len == tmp1) {
1242       tmp1 = tmp3;
1243     } else if (len == tmp2) {
1244       tmp2 = tmp3;
1245     } else if (len == tmp3) {
1246       // everything is ok
1247     } else {
1248       __ mov(tmp3, len);
1249     }
1250     __ allocate_array(op->obj()->as_register(),
1251                       len,
1252                       tmp1,
1253                       tmp2,

1319     assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1320   }
1321   Label profile_cast_success, profile_cast_failure;
1322   Label *success_target = should_profile ? &profile_cast_success : success;
1323   Label *failure_target = should_profile ? &profile_cast_failure : failure;
1324 
1325   if (obj == k_RInfo) {
1326     k_RInfo = dst;
1327   } else if (obj == klass_RInfo) {
1328     klass_RInfo = dst;
1329   }
1330   if (k->is_loaded() && !UseCompressedClassPointers) {
1331     select_different_registers(obj, dst, k_RInfo, klass_RInfo);
1332   } else {
1333     Rtmp1 = op->tmp3()->as_register();
1334     select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
1335   }
1336 
1337   assert_different_registers(obj, k_RInfo, klass_RInfo);
1338 

1339     if (should_profile) {
1340       Label not_null;
1341       __ cbnz(obj, not_null);
1342       // Object is null; update MDO and exit
1343       Register mdo  = klass_RInfo;
1344       __ mov_metadata(mdo, md->constant_encoding());
1345       Address data_addr
1346         = __ form_address(rscratch2, mdo,
1347                           md->byte_offset_of_slot(data, DataLayout::flags_offset()),
1348                           0);
1349       __ ldrb(rscratch1, data_addr);
1350       __ orr(rscratch1, rscratch1, BitData::null_seen_byte_constant());
1351       __ strb(rscratch1, data_addr);
1352       __ b(*obj_is_null);
1353       __ bind(not_null);
1354     } else {
1355       __ cbz(obj, *obj_is_null);
1356     }

1357 
1358   if (!k->is_loaded()) {
1359     klass2reg_with_patching(k_RInfo, op->info_for_patch());
1360   } else {
1361     __ mov_metadata(k_RInfo, k->constant_encoding());
1362   }
1363   __ verify_oop(obj);
1364 
1365   if (op->fast_check()) {
1366     // get object class
1367     // not a safepoint as obj null check happens earlier
1368     __ load_klass(rscratch1, obj);
1369     __ cmp( rscratch1, k_RInfo);
1370 
1371     __ br(Assembler::NE, *failure_target);
1372     // successful cast, fall through to profile or jump
1373   } else {
1374     // get object class
1375     // not a safepoint as obj null check happens earlier
1376     __ load_klass(klass_RInfo, obj);

1525     __ bind(success);
1526     if (dst != obj) {
1527       __ mov(dst, obj);
1528     }
1529   } else if (code == lir_instanceof) {
1530     Register obj = op->object()->as_register();
1531     Register dst = op->result_opr()->as_register();
1532     Label success, failure, done;
1533     emit_typecheck_helper(op, &success, &failure, &failure);
1534     __ bind(failure);
1535     __ mov(dst, zr);
1536     __ b(done);
1537     __ bind(success);
1538     __ mov(dst, 1);
1539     __ bind(done);
1540   } else {
1541     ShouldNotReachHere();
1542   }
1543 }
1544 































































































































1545 void LIR_Assembler::casw(Register addr, Register newval, Register cmpval) {
1546   __ cmpxchg(addr, cmpval, newval, Assembler::word, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
1547   __ cset(rscratch1, Assembler::NE);
1548   __ membar(__ AnyAny);
1549 }
1550 
1551 void LIR_Assembler::casl(Register addr, Register newval, Register cmpval) {
1552   __ cmpxchg(addr, cmpval, newval, Assembler::xword, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
1553   __ cset(rscratch1, Assembler::NE);
1554   __ membar(__ AnyAny);
1555 }
1556 
1557 
1558 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
1559   assert(VM_Version::supports_cx8(), "wrong machine");
1560   Register addr;
1561   if (op->addr()->is_register()) {
1562     addr = as_reg(op->addr());
1563   } else {
1564     assert(op->addr()->is_address(), "what else?");

1964     }
1965 
1966     if (opr2->is_constant()) {
1967       bool is_32bit = false; // width of register operand
1968       jlong imm;
1969 
1970       switch(opr2->type()) {
1971       case T_INT:
1972         imm = opr2->as_constant_ptr()->as_jint();
1973         is_32bit = true;
1974         break;
1975       case T_LONG:
1976         imm = opr2->as_constant_ptr()->as_jlong();
1977         break;
1978       case T_ADDRESS:
1979         imm = opr2->as_constant_ptr()->as_jint();
1980         break;
1981       case T_METADATA:
1982         imm = (intptr_t)(opr2->as_constant_ptr()->as_metadata());
1983         break;

1984       case T_OBJECT:
1985       case T_ARRAY:
1986         jobject2reg(opr2->as_constant_ptr()->as_jobject(), rscratch1);
1987         __ cmpoop(reg1, rscratch1);
1988         return;
1989       default:
1990         ShouldNotReachHere();
1991         imm = 0;  // unreachable
1992         break;
1993       }
1994 
1995       if (Assembler::operand_valid_for_add_sub_immediate(imm)) {
1996         if (is_32bit)
1997           __ cmpw(reg1, imm);
1998         else
1999           __ subs(zr, reg1, imm);
2000         return;
2001       } else {
2002         __ mov(rscratch1, imm);
2003         if (is_32bit)

2038     __ cmp(left->as_register_lo(), right->as_register_lo());
2039     __ mov(dst->as_register(), (uint64_t)-1L);
2040     __ br(Assembler::LT, done);
2041     __ csinc(dst->as_register(), zr, zr, Assembler::EQ);
2042     __ bind(done);
2043   } else {
2044     ShouldNotReachHere();
2045   }
2046 }
2047 
2048 
2049 void LIR_Assembler::align_call(LIR_Code code) {  }
2050 
2051 
2052 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
2053   address call = __ trampoline_call(Address(op->addr(), rtype));
2054   if (call == NULL) {
2055     bailout("trampoline stub overflow");
2056     return;
2057   }
2058   add_call_info(code_offset(), op->info());
2059 }
2060 
2061 
2062 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
2063   address call = __ ic_call(op->addr());
2064   if (call == NULL) {
2065     bailout("trampoline stub overflow");
2066     return;
2067   }
2068   add_call_info(code_offset(), op->info());
2069 }
2070 
2071 void LIR_Assembler::emit_static_call_stub() {
2072   address call_pc = __ pc();
2073   address stub = __ start_a_stub(call_stub_size());
2074   if (stub == NULL) {
2075     bailout("static call stub overflow");
2076     return;
2077   }
2078 
2079   int start = __ offset();
2080 
2081   __ relocate(static_stub_Relocation::spec(call_pc));
2082   __ emit_static_call_stub();
2083 
2084   assert(__ offset() - start + CompiledStaticCall::to_trampoline_stub_size()
2085         <= call_stub_size(), "stub too big");
2086   __ end_a_stub();
2087 }
2088 

2130   __ b(_unwind_handler_entry);
2131 }
2132 
2133 
2134 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) {
2135   Register lreg = left->is_single_cpu() ? left->as_register() : left->as_register_lo();
2136   Register dreg = dest->is_single_cpu() ? dest->as_register() : dest->as_register_lo();
2137 
2138   switch (left->type()) {
2139     case T_INT: {
2140       switch (code) {
2141       case lir_shl:  __ lslvw (dreg, lreg, count->as_register()); break;
2142       case lir_shr:  __ asrvw (dreg, lreg, count->as_register()); break;
2143       case lir_ushr: __ lsrvw (dreg, lreg, count->as_register()); break;
2144       default:
2145         ShouldNotReachHere();
2146         break;
2147       }
2148       break;
2149     case T_LONG:

2150     case T_ADDRESS:
2151     case T_OBJECT:
2152       switch (code) {
2153       case lir_shl:  __ lslv (dreg, lreg, count->as_register()); break;
2154       case lir_shr:  __ asrv (dreg, lreg, count->as_register()); break;
2155       case lir_ushr: __ lsrv (dreg, lreg, count->as_register()); break;
2156       default:
2157         ShouldNotReachHere();
2158         break;
2159       }
2160       break;
2161     default:
2162       ShouldNotReachHere();
2163       break;
2164     }
2165   }
2166 }
2167 
2168 
2169 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) {
2170   Register dreg = dest->is_single_cpu() ? dest->as_register() : dest->as_register_lo();
2171   Register lreg = left->is_single_cpu() ? left->as_register() : left->as_register_lo();
2172 
2173   switch (left->type()) {
2174     case T_INT: {
2175       switch (code) {
2176       case lir_shl:  __ lslw (dreg, lreg, count); break;
2177       case lir_shr:  __ asrw (dreg, lreg, count); break;
2178       case lir_ushr: __ lsrw (dreg, lreg, count); break;
2179       default:
2180         ShouldNotReachHere();
2181         break;
2182       }
2183       break;
2184     case T_LONG:
2185     case T_ADDRESS:

2186     case T_OBJECT:
2187       switch (code) {
2188       case lir_shl:  __ lsl (dreg, lreg, count); break;
2189       case lir_shr:  __ asr (dreg, lreg, count); break;
2190       case lir_ushr: __ lsr (dreg, lreg, count); break;
2191       default:
2192         ShouldNotReachHere();
2193         break;
2194       }
2195       break;
2196     default:
2197       ShouldNotReachHere();
2198       break;
2199     }
2200   }
2201 }
2202 
2203 
2204 void LIR_Assembler::store_parameter(Register r, int offset_from_rsp_in_words) {
2205   assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");

2210 
2211 
2212 void LIR_Assembler::store_parameter(jint c,     int offset_from_rsp_in_words) {
2213   assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2214   int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2215   assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2216   __ mov (rscratch1, c);
2217   __ str (rscratch1, Address(sp, offset_from_rsp_in_bytes));
2218 }
2219 
2220 
2221 void LIR_Assembler::store_parameter(jobject o,  int offset_from_rsp_in_words) {
2222   ShouldNotReachHere();
2223   assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2224   int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2225   assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2226   __ lea(rscratch1, __ constant_oop_address(o));
2227   __ str(rscratch1, Address(sp, offset_from_rsp_in_bytes));
2228 }
2229 






















2230 
2231 // This code replaces a call to arraycopy; no exception may
2232 // be thrown in this code, they must be thrown in the System.arraycopy
2233 // activation frame; we could save some checks if this would not be the case
2234 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
2235   ciArrayKlass* default_type = op->expected_type();
2236   Register src = op->src()->as_register();
2237   Register dst = op->dst()->as_register();
2238   Register src_pos = op->src_pos()->as_register();
2239   Register dst_pos = op->dst_pos()->as_register();
2240   Register length  = op->length()->as_register();
2241   Register tmp = op->tmp()->as_register();
2242 
2243   CodeStub* stub = op->stub();
2244   int flags = op->flags();
2245   BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
2246   if (is_reference_type(basic_type)) basic_type = T_OBJECT;
2247 






2248   // if we don't know anything, just go through the generic arraycopy
2249   if (default_type == NULL // || basic_type == T_OBJECT
2250       ) {
2251     Label done;
2252     assert(src == r1 && src_pos == r2, "mismatch in calling convention");
2253 
2254     // Save the arguments in case the generic arraycopy fails and we
2255     // have to fall back to the JNI stub
2256     __ stp(dst,     dst_pos, Address(sp, 0*BytesPerWord));
2257     __ stp(length,  src_pos, Address(sp, 2*BytesPerWord));
2258     __ str(src,              Address(sp, 4*BytesPerWord));
2259 
2260     address copyfunc_addr = StubRoutines::generic_arraycopy();
2261     assert(copyfunc_addr != NULL, "generic arraycopy stub required");
2262 
2263     // The arguments are in java calling convention so we shift them
2264     // to C convention
2265     assert_different_registers(c_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4);
2266     __ mov(c_rarg0, j_rarg0);
2267     assert_different_registers(c_rarg1, j_rarg2, j_rarg3, j_rarg4);

2281     __ cbz(r0, *stub->continuation());
2282 
2283     // Reload values from the stack so they are where the stub
2284     // expects them.
2285     __ ldp(dst,     dst_pos, Address(sp, 0*BytesPerWord));
2286     __ ldp(length,  src_pos, Address(sp, 2*BytesPerWord));
2287     __ ldr(src,              Address(sp, 4*BytesPerWord));
2288 
2289     // r0 is -1^K where K == partial copied count
2290     __ eonw(rscratch1, r0, zr);
2291     // adjust length down and src/end pos up by partial copied count
2292     __ subw(length, length, rscratch1);
2293     __ addw(src_pos, src_pos, rscratch1);
2294     __ addw(dst_pos, dst_pos, rscratch1);
2295     __ b(*stub->entry());
2296 
2297     __ bind(*stub->continuation());
2298     return;
2299   }
2300 









2301   assert(default_type != NULL && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point");
2302 
2303   int elem_size = type2aelembytes(basic_type);
2304   int scale = exact_log2(elem_size);
2305 
2306   Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes());
2307   Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes());
2308   Address src_klass_addr = Address(src, oopDesc::klass_offset_in_bytes());
2309   Address dst_klass_addr = Address(dst, oopDesc::klass_offset_in_bytes());
2310 
2311   // test for NULL
2312   if (flags & LIR_OpArrayCopy::src_null_check) {
2313     __ cbz(src, *stub->entry());
2314   }
2315   if (flags & LIR_OpArrayCopy::dst_null_check) {
2316     __ cbz(dst, *stub->entry());
2317   }
2318 
2319   // If the compiler was not able to prove that exact type of the source or the destination
2320   // of the arraycopy is an array type, check at runtime if the source or the destination is

2835         // first time here. Set profile type.
2836         __ str(tmp, mdo_addr);
2837       } else {
2838         assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&
2839                ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
2840 
2841         __ ldr(tmp, mdo_addr);
2842         __ tbnz(tmp, exact_log2(TypeEntries::type_unknown), next); // already unknown. Nothing to do anymore.
2843 
2844         __ orr(tmp, tmp, TypeEntries::type_unknown);
2845         __ str(tmp, mdo_addr);
2846         // FIXME: Write barrier needed here?
2847       }
2848     }
2849 
2850     __ bind(next);
2851   }
2852   COMMENT("} emit_profile_type");
2853 }
2854 




















2855 
2856 void LIR_Assembler::align_backward_branch_target() {
2857 }
2858 
2859 
2860 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) {
2861   // tmp must be unused
2862   assert(tmp->is_illegal(), "wasting a register if tmp is allocated");
2863 
2864   if (left->is_single_cpu()) {
2865     assert(dest->is_single_cpu(), "expect single result reg");
2866     __ negw(dest->as_register(), left->as_register());
2867   } else if (left->is_double_cpu()) {
2868     assert(dest->is_double_cpu(), "expect double result reg");
2869     __ neg(dest->as_register_lo(), left->as_register_lo());
2870   } else if (left->is_single_fpu()) {
2871     assert(dest->is_single_fpu(), "expect single float result reg");
2872     __ fnegs(dest->as_float_reg(), left->as_float_reg());
2873   } else {
2874     assert(left->is_double_fpu(), "expect double float operand reg");

2974 void LIR_Assembler::membar_loadload() {
2975   __ membar(Assembler::LoadLoad);
2976 }
2977 
2978 void LIR_Assembler::membar_storestore() {
2979   __ membar(MacroAssembler::StoreStore);
2980 }
2981 
2982 void LIR_Assembler::membar_loadstore() { __ membar(MacroAssembler::LoadStore); }
2983 
2984 void LIR_Assembler::membar_storeload() { __ membar(MacroAssembler::StoreLoad); }
2985 
2986 void LIR_Assembler::on_spin_wait() {
2987   Unimplemented();
2988 }
2989 
2990 void LIR_Assembler::get_thread(LIR_Opr result_reg) {
2991   __ mov(result_reg->as_register(), rthread);
2992 }
2993 




2994 
2995 void LIR_Assembler::peephole(LIR_List *lir) {
2996 #if 0
2997   if (tableswitch_count >= max_tableswitches)
2998     return;
2999 
3000   /*
3001     This finite-state automaton recognizes sequences of compare-and-
3002     branch instructions.  We will turn them into a tableswitch.  You
3003     could argue that C1 really shouldn't be doing this sort of
3004     optimization, but without it the code is really horrible.
3005   */
3006 
3007   enum { start_s, cmp1_s, beq_s, cmp_s } state;
3008   int first_key, last_key = -2147483648;
3009   int next_key = 0;
3010   int start_insn = -1;
3011   int last_insn = -1;
3012   Register reg = noreg;
3013   LIR_Opr reg_opr;

3121 #endif
3122 }
3123 
3124 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp_op) {
3125   Address addr = as_Address(src->as_address_ptr());
3126   BasicType type = src->type();
3127   bool is_oop = is_reference_type(type);
3128 
3129   void (MacroAssembler::* add)(Register prev, RegisterOrConstant incr, Register addr);
3130   void (MacroAssembler::* xchg)(Register prev, Register newv, Register addr);
3131 
3132   switch(type) {
3133   case T_INT:
3134     xchg = &MacroAssembler::atomic_xchgalw;
3135     add = &MacroAssembler::atomic_addalw;
3136     break;
3137   case T_LONG:
3138     xchg = &MacroAssembler::atomic_xchgal;
3139     add = &MacroAssembler::atomic_addal;
3140     break;

3141   case T_OBJECT:
3142   case T_ARRAY:
3143     if (UseCompressedOops) {
3144       xchg = &MacroAssembler::atomic_xchgalw;
3145       add = &MacroAssembler::atomic_addalw;
3146     } else {
3147       xchg = &MacroAssembler::atomic_xchgal;
3148       add = &MacroAssembler::atomic_addal;
3149     }
3150     break;
3151   default:
3152     ShouldNotReachHere();
3153     xchg = &MacroAssembler::atomic_xchgal;
3154     add = &MacroAssembler::atomic_addal; // unreachable
3155   }
3156 
3157   switch (code) {
3158   case lir_xadd:
3159     {
3160       RegisterOrConstant inc;

  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "asm/assembler.hpp"
  29 #include "c1/c1_CodeStubs.hpp"
  30 #include "c1/c1_Compilation.hpp"
  31 #include "c1/c1_LIRAssembler.hpp"
  32 #include "c1/c1_MacroAssembler.hpp"
  33 #include "c1/c1_Runtime1.hpp"
  34 #include "c1/c1_ValueStack.hpp"
  35 #include "ci/ciArrayKlass.hpp"
  36 #include "ci/ciInlineKlass.hpp"
  37 #include "ci/ciInstance.hpp"
  38 #include "code/compiledIC.hpp"
  39 #include "gc/shared/collectedHeap.hpp"
  40 #include "gc/shared/gc_globals.hpp"
  41 #include "nativeInst_aarch64.hpp"
  42 #include "oops/objArrayKlass.hpp"
  43 #include "oops/oop.inline.hpp"
  44 #include "runtime/frame.inline.hpp"
  45 #include "runtime/sharedRuntime.hpp"
  46 #include "runtime/stubRoutines.hpp"
  47 #include "utilities/powerOfTwo.hpp"
  48 #include "vmreg_aarch64.inline.hpp"
  49 
  50 
  51 #ifndef PRODUCT
  52 #define COMMENT(x)   do { __ block_comment(x); } while (0)
  53 #else
  54 #define COMMENT(x)
  55 #endif
  56 
  57 NEEDS_CLEANUP // remove this definitions ?
  58 const Register IC_Klass    = rscratch2;   // where the IC klass is cached
  59 const Register SYNC_header = r0;   // synchronization header
  60 const Register SHIFT_count = r0;   // where count for shift operations must be
  61 
  62 #define __ _masm->
  63 

 440   MonitorExitStub* stub = NULL;
 441   if (method()->is_synchronized()) {
 442     monitor_address(0, FrameMap::r0_opr);
 443     stub = new MonitorExitStub(FrameMap::r0_opr, true, 0);
 444     __ unlock_object(r5, r4, r0, *stub->entry());
 445     __ bind(*stub->continuation());
 446   }
 447 
 448   if (compilation()->env()->dtrace_method_probes()) {
 449     __ mov(c_rarg0, rthread);
 450     __ mov_metadata(c_rarg1, method()->constant_encoding());
 451     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), c_rarg0, c_rarg1);
 452   }
 453 
 454   if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
 455     __ mov(r0, r19);  // Restore the exception
 456   }
 457 
 458   // remove the activation and dispatch to the unwind handler
 459   __ block_comment("remove_frame and dispatch to the unwind handler");
 460   __ remove_frame(initial_frame_size_in_bytes(), needs_stack_repair());
 461   __ far_jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));
 462 
 463   // Emit the slow path assembly
 464   if (stub != NULL) {
 465     stub->emit_code(this);
 466   }
 467 
 468   return offset;
 469 }
 470 
 471 
 472 int LIR_Assembler::emit_deopt_handler() {
 473   // if the last instruction is a call (typically to do a throw which
 474   // is coming at the end after block reordering) the return address
 475   // must still point into the code area in order to avoid assertion
 476   // failures when searching for the corresponding bci => add a nop
 477   // (was bug 5/14/1999 - gri)
 478   __ nop();
 479 
 480   // generate code for exception handler

 491   __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
 492   guarantee(code_offset() - offset <= deopt_handler_size(), "overflow");
 493   __ end_a_stub();
 494 
 495   return offset;
 496 }
 497 
 498 void LIR_Assembler::add_debug_info_for_branch(address adr, CodeEmitInfo* info) {
 499   _masm->code_section()->relocate(adr, relocInfo::poll_type);
 500   int pc_offset = code_offset();
 501   flush_debug_info(pc_offset);
 502   info->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
 503   if (info->exception_handlers() != NULL) {
 504     compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers());
 505   }
 506 }
 507 
 508 void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) {
 509   assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == r0, "word returns are in r0,");
 510 
 511   ciMethod* method = compilation()->method();
 512   if (InlineTypeReturnedAsFields && method->signature()->returns_null_free_inline_type()) {
 513     ciInlineKlass* vk = method->return_type()->as_inline_klass();
 514     if (vk->can_be_returned_as_fields()) {
 515       address unpack_handler = vk->unpack_handler();
 516       assert(unpack_handler != NULL, "must be");
 517       __ far_call(RuntimeAddress(unpack_handler));
 518       // At this point, r0 points to the value object (for interpreter or C1 caller).
 519       // The fields of the object are copied into registers (for C2 caller).
 520     }
 521   }
 522 
 523   // Pop the stack before the safepoint code
 524   __ remove_frame(initial_frame_size_in_bytes(), needs_stack_repair());
 525 
 526   if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
 527     __ reserved_stack_check();
 528   }
 529 
 530   code_stub->set_safepoint_offset(__ offset());
 531   __ relocate(relocInfo::poll_return_type);
 532   __ safepoint_poll(*code_stub->entry(), true /* at_return */, false /* acquire */, true /* in_nmethod */);
 533   __ ret(lr);
 534 }
 535 
 536 int LIR_Assembler::store_inline_type_fields_to_buf(ciInlineKlass* vk) {
 537   return (__ store_inline_type_fields_to_buf(vk, false));
 538 }
 539 
 540 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
 541   guarantee(info != NULL, "Shouldn't be NULL");
 542   __ get_polling_page(rscratch1, relocInfo::poll_type);
 543   add_debug_info_for_branch(info);  // This isn't just debug info:
 544                                     // it's the oop map
 545   __ read_polling_page(rscratch1, relocInfo::poll_type);
 546   return __ offset();
 547 }
 548 
 549 
 550 void LIR_Assembler::move_regs(Register from_reg, Register to_reg) {
 551   if (from_reg == r31_sp)
 552     from_reg = sp;
 553   if (to_reg == r31_sp)
 554     to_reg = sp;
 555   __ mov(to_reg, from_reg);
 556 }
 557 
 558 void LIR_Assembler::swap_reg(Register a, Register b) { Unimplemented(); }
 559 

 565 
 566   switch (c->type()) {
 567     case T_INT: {
 568       assert(patch_code == lir_patch_none, "no patching handled here");
 569       __ movw(dest->as_register(), c->as_jint());
 570       break;
 571     }
 572 
 573     case T_ADDRESS: {
 574       assert(patch_code == lir_patch_none, "no patching handled here");
 575       __ mov(dest->as_register(), c->as_jint());
 576       break;
 577     }
 578 
 579     case T_LONG: {
 580       assert(patch_code == lir_patch_none, "no patching handled here");
 581       __ mov(dest->as_register_lo(), (intptr_t)c->as_jlong());
 582       break;
 583     }
 584 
 585     case T_INLINE_TYPE:
 586     case T_OBJECT: {
 587         if (patch_code != lir_patch_none) {


 588           jobject2reg_with_patching(dest->as_register(), info);
 589         } else {
 590           jobject2reg(c->as_jobject(), dest->as_register());
 591         }
 592       break;
 593     }
 594 
 595     case T_METADATA: {
 596       if (patch_code != lir_patch_none) {
 597         klass2reg_with_patching(dest->as_register(), info);
 598       } else {
 599         __ mov_metadata(dest->as_register(), c->as_metadata());
 600       }
 601       break;
 602     }
 603 
 604     case T_FLOAT: {
 605       if (__ operand_valid_for_float_immediate(c->as_jfloat())) {
 606         __ fmovs(dest->as_float_reg(), (c->as_jfloat()));
 607       } else {
 608         __ adr(rscratch1, InternalAddress(float_constant(c->as_jfloat())));
 609         __ ldrs(dest->as_float_reg(), Address(rscratch1));
 610       }

 612     }
 613 
 614     case T_DOUBLE: {
 615       if (__ operand_valid_for_float_immediate(c->as_jdouble())) {
 616         __ fmovd(dest->as_double_reg(), (c->as_jdouble()));
 617       } else {
 618         __ adr(rscratch1, InternalAddress(double_constant(c->as_jdouble())));
 619         __ ldrd(dest->as_double_reg(), Address(rscratch1));
 620       }
 621       break;
 622     }
 623 
 624     default:
 625       ShouldNotReachHere();
 626   }
 627 }
 628 
 629 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
 630   LIR_Const* c = src->as_constant_ptr();
 631   switch (c->type()) {
 632   case T_INLINE_TYPE:
 633   case T_OBJECT:
 634     {
 635       if (! c->as_jobject())
 636         __ str(zr, frame_map()->address_for_slot(dest->single_stack_ix()));
 637       else {
 638         const2reg(src, FrameMap::rscratch1_opr, lir_patch_none, NULL);
 639         reg2stack(FrameMap::rscratch1_opr, dest, c->type(), false);
 640       }
 641     }
 642     break;
 643   case T_ADDRESS:
 644     {
 645       const2reg(src, FrameMap::rscratch1_opr, lir_patch_none, NULL);
 646       reg2stack(FrameMap::rscratch1_opr, dest, c->type(), false);
 647     }
 648   case T_INT:
 649   case T_FLOAT:
 650     {
 651       Register reg = zr;
 652       if (c->as_jint_bits() == 0)

 679 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) {
 680   assert(src->is_constant(), "should not call otherwise");
 681   LIR_Const* c = src->as_constant_ptr();
 682   LIR_Address* to_addr = dest->as_address_ptr();
 683 
 684   void (Assembler::* insn)(Register Rt, const Address &adr);
 685 
 686   switch (type) {
 687   case T_ADDRESS:
 688     assert(c->as_jint() == 0, "should be");
 689     insn = &Assembler::str;
 690     break;
 691   case T_LONG:
 692     assert(c->as_jlong() == 0, "should be");
 693     insn = &Assembler::str;
 694     break;
 695   case T_INT:
 696     assert(c->as_jint() == 0, "should be");
 697     insn = &Assembler::strw;
 698     break;
 699   case T_INLINE_TYPE:
 700   case T_OBJECT:
 701   case T_ARRAY:
 702     // Non-null case is not handled on aarch64 but handled on x86
 703     // FIXME: do we need to add it here?
 704     assert(c->as_jobject() == 0, "should be");
 705     if (UseCompressedOops && !wide) {
 706       insn = &Assembler::strw;
 707     } else {
 708       insn = &Assembler::str;
 709     }
 710     break;
 711   case T_CHAR:
 712   case T_SHORT:
 713     assert(c->as_jint() == 0, "should be");
 714     insn = &Assembler::strh;
 715     break;
 716   case T_BOOLEAN:
 717   case T_BYTE:
 718     assert(c->as_jint() == 0, "should be");
 719     insn = &Assembler::strb;
 720     break;
 721   default:
 722     ShouldNotReachHere();
 723     insn = &Assembler::str;  // unreachable
 724   }
 725 
 726   if (info) add_debug_info_for_null_check_here(info);
 727   (_masm->*insn)(zr, as_Address(to_addr, rscratch1));
 728 }
 729 
 730 void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) {
 731   assert(src->is_register(), "should not call otherwise");
 732   assert(dest->is_register(), "should not call otherwise");
 733 
 734   // move between cpu-registers
 735   if (dest->is_single_cpu()) {
 736     if (src->type() == T_LONG) {
 737       // Can do LONG -> OBJECT
 738       move_regs(src->as_register_lo(), dest->as_register());
 739       return;
 740     }
 741     assert(src->is_single_cpu(), "must match");
 742     if (src->type() == T_OBJECT || src->type() == T_INLINE_TYPE) {
 743       __ verify_oop(src->as_register());
 744     }
 745     move_regs(src->as_register(), dest->as_register());
 746 
 747   } else if (dest->is_double_cpu()) {
 748     if (is_reference_type(src->type())) {
 749       // Surprising to me but we can see move of a long to t_object
 750       __ verify_oop(src->as_register());
 751       move_regs(src->as_register(), dest->as_register_lo());
 752       return;
 753     }
 754     assert(src->is_double_cpu(), "must match");
 755     Register f_lo = src->as_register_lo();
 756     Register f_hi = src->as_register_hi();
 757     Register t_lo = dest->as_register_lo();
 758     Register t_hi = dest->as_register_hi();
 759     assert(f_hi == f_lo, "must be same");
 760     assert(t_hi == t_lo, "must be same");
 761     move_regs(f_lo, t_lo);
 762 

 822 
 823     if (UseCompressedOops && !wide) {
 824       __ encode_heap_oop(compressed_src, src->as_register());
 825     } else {
 826       compressed_src = src->as_register();
 827     }
 828   }
 829 
 830   int null_check_here = code_offset();
 831   switch (type) {
 832     case T_FLOAT: {
 833       __ strs(src->as_float_reg(), as_Address(to_addr));
 834       break;
 835     }
 836 
 837     case T_DOUBLE: {
 838       __ strd(src->as_double_reg(), as_Address(to_addr));
 839       break;
 840     }
 841 
 842     case T_INLINE_TYPE: // fall through
 843     case T_ARRAY:   // fall through
 844     case T_OBJECT:  // fall through
 845       if (UseCompressedOops && !wide) {
 846         __ strw(compressed_src, as_Address(to_addr, rscratch2));
 847       } else {
 848          __ str(compressed_src, as_Address(to_addr));
 849       }
 850       break;
 851     case T_METADATA:
 852       // We get here to store a method pointer to the stack to pass to
 853       // a dtrace runtime call. This can't work on 64 bit with
 854       // compressed klass ptrs: T_METADATA can be a compressed klass
 855       // ptr or a 64 bit method pointer.
 856       ShouldNotReachHere();
 857       __ str(src->as_register(), as_Address(to_addr));
 858       break;
 859     case T_ADDRESS:
 860       __ str(src->as_register(), as_Address(to_addr));
 861       break;
 862     case T_INT:

 952   add_call_info_here(info);
 953 }
 954 
 955 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
 956 
 957   LIR_Opr temp;
 958   if (type == T_LONG || type == T_DOUBLE)
 959     temp = FrameMap::rscratch1_long_opr;
 960   else
 961     temp = FrameMap::rscratch1_opr;
 962 
 963   stack2reg(src, temp, src->type());
 964   reg2stack(temp, dest, dest->type(), false);
 965 }
 966 
 967 
 968 void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide) {
 969   LIR_Address* addr = src->as_address_ptr();
 970   LIR_Address* from_addr = src->as_address_ptr();
 971 
 972   if (addr->base()->type() == T_OBJECT || addr->base()->type() == T_INLINE_TYPE) {
 973     __ verify_oop(addr->base()->as_pointer_register());
 974   }
 975 
 976   if (patch_code != lir_patch_none) {
 977     deoptimize_trap(info);
 978     return;
 979   }
 980 
 981   if (info != NULL) {
 982     add_debug_info_for_null_check_here(info);
 983   }
 984   int null_check_here = code_offset();
 985   switch (type) {
 986     case T_FLOAT: {
 987       __ ldrs(dest->as_float_reg(), as_Address(from_addr));
 988       break;
 989     }
 990 
 991     case T_DOUBLE: {
 992       __ ldrd(dest->as_double_reg(), as_Address(from_addr));
 993       break;
 994     }
 995 
 996     case T_INLINE_TYPE: // fall through
 997     case T_ARRAY:   // fall through
 998     case T_OBJECT:  // fall through
 999       if (UseCompressedOops && !wide) {
1000         __ ldrw(dest->as_register(), as_Address(from_addr));
1001       } else {
1002          __ ldr(dest->as_register(), as_Address(from_addr));
1003       }
1004       break;
1005     case T_METADATA:
1006       // We get here to store a method pointer to the stack to pass to
1007       // a dtrace runtime call. This can't work on 64 bit with
1008       // compressed klass ptrs: T_METADATA can be a compressed klass
1009       // ptr or a 64 bit method pointer.
1010       ShouldNotReachHere();
1011       __ ldr(dest->as_register(), as_Address(from_addr));
1012       break;
1013     case T_ADDRESS:
1014       // FIXME: OMG this is a horrible kludge.  Any offset from an
1015       // address that matches klass_offset_in_bytes() will be loaded
1016       // as a word, not a long.

1047     default:
1048       ShouldNotReachHere();
1049   }
1050 
1051   if (is_reference_type(type)) {
1052     if (UseCompressedOops && !wide) {
1053       __ decode_heap_oop(dest->as_register());
1054     }
1055 
1056     if (!UseZGC) {
1057       // Load barrier has not yet been applied, so ZGC can't verify the oop here
1058       __ verify_oop(dest->as_register());
1059     }
1060   } else if (type == T_ADDRESS && addr->disp() == oopDesc::klass_offset_in_bytes()) {
1061     if (UseCompressedClassPointers) {
1062       __ decode_klass_not_null(dest->as_register());
1063     }
1064   }
1065 }
1066 
1067 void LIR_Assembler::move(LIR_Opr src, LIR_Opr dst) {
1068   assert(dst->is_cpu_register(), "must be");
1069   assert(dst->type() == src->type(), "must be");
1070 
1071   if (src->is_cpu_register()) {
1072     reg2reg(src, dst);
1073   } else if (src->is_stack()) {
1074     stack2reg(src, dst, dst->type());
1075   } else if (src->is_constant()) {
1076     const2reg(src, dst, lir_patch_none, NULL);
1077   } else {
1078     ShouldNotReachHere();
1079   }
1080 }
1081 
1082 int LIR_Assembler::array_element_size(BasicType type) const {
1083   int elem_size = type2aelembytes(type);
1084   return exact_log2(elem_size);
1085 }
1086 
1087 
1088 void LIR_Assembler::emit_op3(LIR_Op3* op) {
1089   switch (op->code()) {
1090   case lir_idiv:
1091   case lir_irem:
1092     arithmetic_idiv(op->code(),
1093                     op->in_opr1(),
1094                     op->in_opr2(),
1095                     op->in_opr3(),
1096                     op->result_opr(),
1097                     op->info());
1098     break;
1099   case lir_fmad:
1100     __ fmaddd(op->result_opr()->as_double_reg(),

1252     __ ldrb(rscratch1, Address(op->klass()->as_register(),
1253                                InstanceKlass::init_state_offset()));
1254     __ cmpw(rscratch1, InstanceKlass::fully_initialized);
1255     add_debug_info_for_null_check_here(op->stub()->info());
1256     __ br(Assembler::NE, *op->stub()->entry());
1257   }
1258   __ allocate_object(op->obj()->as_register(),
1259                      op->tmp1()->as_register(),
1260                      op->tmp2()->as_register(),
1261                      op->header_size(),
1262                      op->object_size(),
1263                      op->klass()->as_register(),
1264                      *op->stub()->entry());
1265   __ bind(*op->stub()->continuation());
1266 }
1267 
1268 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
1269   Register len =  op->len()->as_register();
1270   __ uxtw(len, len);
1271 
1272   if (UseSlowPath || op->type() == T_INLINE_TYPE ||
1273       (!UseFastNewObjectArray && is_reference_type(op->type())) ||
1274       (!UseFastNewTypeArray   && !is_reference_type(op->type()))) {
1275     __ b(*op->stub()->entry());
1276   } else {
1277     Register tmp1 = op->tmp1()->as_register();
1278     Register tmp2 = op->tmp2()->as_register();
1279     Register tmp3 = op->tmp3()->as_register();
1280     if (len == tmp1) {
1281       tmp1 = tmp3;
1282     } else if (len == tmp2) {
1283       tmp2 = tmp3;
1284     } else if (len == tmp3) {
1285       // everything is ok
1286     } else {
1287       __ mov(tmp3, len);
1288     }
1289     __ allocate_array(op->obj()->as_register(),
1290                       len,
1291                       tmp1,
1292                       tmp2,

1358     assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1359   }
1360   Label profile_cast_success, profile_cast_failure;
1361   Label *success_target = should_profile ? &profile_cast_success : success;
1362   Label *failure_target = should_profile ? &profile_cast_failure : failure;
1363 
1364   if (obj == k_RInfo) {
1365     k_RInfo = dst;
1366   } else if (obj == klass_RInfo) {
1367     klass_RInfo = dst;
1368   }
1369   if (k->is_loaded() && !UseCompressedClassPointers) {
1370     select_different_registers(obj, dst, k_RInfo, klass_RInfo);
1371   } else {
1372     Rtmp1 = op->tmp3()->as_register();
1373     select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
1374   }
1375 
1376   assert_different_registers(obj, k_RInfo, klass_RInfo);
1377 
1378   if (op->need_null_check()) {
1379     if (should_profile) {
1380       Label not_null;
1381       __ cbnz(obj, not_null);
1382       // Object is null; update MDO and exit
1383       Register mdo  = klass_RInfo;
1384       __ mov_metadata(mdo, md->constant_encoding());
1385       Address data_addr
1386         = __ form_address(rscratch2, mdo,
1387                           md->byte_offset_of_slot(data, DataLayout::flags_offset()),
1388                           0);
1389       __ ldrb(rscratch1, data_addr);
1390       __ orr(rscratch1, rscratch1, BitData::null_seen_byte_constant());
1391       __ strb(rscratch1, data_addr);
1392       __ b(*obj_is_null);
1393       __ bind(not_null);
1394     } else {
1395       __ cbz(obj, *obj_is_null);
1396     }
1397   }
1398 
1399   if (!k->is_loaded()) {
1400     klass2reg_with_patching(k_RInfo, op->info_for_patch());
1401   } else {
1402     __ mov_metadata(k_RInfo, k->constant_encoding());
1403   }
1404   __ verify_oop(obj);
1405 
1406   if (op->fast_check()) {
1407     // get object class
1408     // not a safepoint as obj null check happens earlier
1409     __ load_klass(rscratch1, obj);
1410     __ cmp( rscratch1, k_RInfo);
1411 
1412     __ br(Assembler::NE, *failure_target);
1413     // successful cast, fall through to profile or jump
1414   } else {
1415     // get object class
1416     // not a safepoint as obj null check happens earlier
1417     __ load_klass(klass_RInfo, obj);

1566     __ bind(success);
1567     if (dst != obj) {
1568       __ mov(dst, obj);
1569     }
1570   } else if (code == lir_instanceof) {
1571     Register obj = op->object()->as_register();
1572     Register dst = op->result_opr()->as_register();
1573     Label success, failure, done;
1574     emit_typecheck_helper(op, &success, &failure, &failure);
1575     __ bind(failure);
1576     __ mov(dst, zr);
1577     __ b(done);
1578     __ bind(success);
1579     __ mov(dst, 1);
1580     __ bind(done);
1581   } else {
1582     ShouldNotReachHere();
1583   }
1584 }
1585 
1586 void LIR_Assembler::emit_opFlattenedArrayCheck(LIR_OpFlattenedArrayCheck* op) {
1587   // We are loading/storing from/to an array that *may* be flattened (the
1588   // declared type is Object[], abstract[], interface[] or VT.ref[]).
1589   // If this array is flattened, take the slow path.
1590 
1591   Register klass = op->tmp()->as_register();
1592   if (UseArrayMarkWordCheck) {
1593     __ test_flattened_array_oop(op->array()->as_register(), op->tmp()->as_register(), *op->stub()->entry());
1594   } else {
1595     __ load_klass(klass, op->array()->as_register());
1596     __ ldrw(klass, Address(klass, Klass::layout_helper_offset()));
1597     __ tst(klass, Klass::_lh_array_tag_vt_value_bit_inplace);
1598     __ br(Assembler::NE, *op->stub()->entry());
1599   }
1600   if (!op->value()->is_illegal()) {
1601     // The array is not flattened, but it might be null-free. If we are storing
1602     // a null into a null-free array, take the slow path (which will throw NPE).
1603     Label skip;
1604     __ cbnz(op->value()->as_register(), skip);
1605     if (UseArrayMarkWordCheck) {
1606       __ test_null_free_array_oop(op->array()->as_register(), op->tmp()->as_register(), *op->stub()->entry());
1607     } else {
1608       __ tst(klass, Klass::_lh_null_free_bit_inplace);
1609       __ br(Assembler::NE, *op->stub()->entry());
1610     }
1611     __ bind(skip);
1612   }
1613 }
1614 
1615 void LIR_Assembler::emit_opNullFreeArrayCheck(LIR_OpNullFreeArrayCheck* op) {
1616   // We are storing into an array that *may* be null-free (the declared type is
1617   // Object[], abstract[], interface[] or VT.ref[]).
1618   if (UseArrayMarkWordCheck) {
1619     Label test_mark_word;
1620     Register tmp = op->tmp()->as_register();
1621     __ ldr(tmp, Address(op->array()->as_register(), oopDesc::mark_offset_in_bytes()));
1622     __ tst(tmp, markWord::unlocked_value);
1623     __ br(Assembler::NE, test_mark_word);
1624     __ load_prototype_header(tmp, op->array()->as_register());
1625     __ bind(test_mark_word);
1626     __ tst(tmp, markWord::null_free_array_bit_in_place);
1627   } else {
1628     Register klass = op->tmp()->as_register();
1629     __ load_klass(klass, op->array()->as_register());
1630     __ ldr(klass, Address(klass, Klass::layout_helper_offset()));
1631     __ tst(klass, Klass::_lh_null_free_bit_inplace);
1632   }
1633 }
1634 
1635 void LIR_Assembler::emit_opSubstitutabilityCheck(LIR_OpSubstitutabilityCheck* op) {
1636   Label L_oops_equal;
1637   Label L_oops_not_equal;
1638   Label L_end;
1639 
1640   Register left  = op->left()->as_register();
1641   Register right = op->right()->as_register();
1642 
1643   __ cmp(left, right);
1644   __ br(Assembler::EQ, L_oops_equal);
1645 
1646   // (1) Null check -- if one of the operands is null, the other must not be null (because
1647   //     the two references are not equal), so they are not substitutable,
1648   //     FIXME: do null check only if the operand is nullable
1649   {
1650     __ cbz(left, L_oops_not_equal);
1651     __ cbz(right, L_oops_not_equal);
1652   }
1653 
1654   ciKlass* left_klass = op->left_klass();
1655   ciKlass* right_klass = op->right_klass();
1656 
1657   // (2) Inline type check -- if either of the operands is not a inline type,
1658   //     they are not substitutable. We do this only if we are not sure that the
1659   //     operands are inline type
1660   if ((left_klass == NULL || right_klass == NULL) ||// The klass is still unloaded, or came from a Phi node.
1661       !left_klass->is_inlinetype() || !right_klass->is_inlinetype()) {
1662     Register tmp1  = op->tmp1()->as_register();
1663     __ mov(tmp1, markWord::inline_type_pattern);
1664     __ ldr(rscratch1, Address(left, oopDesc::mark_offset_in_bytes()));
1665     __ andr(tmp1, tmp1, rscratch1);
1666     __ ldr(rscratch1, Address(right, oopDesc::mark_offset_in_bytes()));
1667     __ andr(tmp1, tmp1, rscratch1);
1668     __ cmp(tmp1, (u1)markWord::inline_type_pattern);
1669     __ br(Assembler::NE, L_oops_not_equal);
1670   }
1671 
1672   // (3) Same klass check: if the operands are of different klasses, they are not substitutable.
1673   if (left_klass != NULL && left_klass->is_inlinetype() && left_klass == right_klass) {
1674     // No need to load klass -- the operands are statically known to be the same inline klass.
1675     __ b(*op->stub()->entry());
1676   } else {
1677     Register left_klass_op = op->left_klass_op()->as_register();
1678     Register right_klass_op = op->right_klass_op()->as_register();
1679 
1680     if (UseCompressedClassPointers) {
1681       __ ldrw(left_klass_op,  Address(left,  oopDesc::klass_offset_in_bytes()));
1682       __ ldrw(right_klass_op, Address(right, oopDesc::klass_offset_in_bytes()));
1683       __ cmpw(left_klass_op, right_klass_op);
1684     } else {
1685       __ ldr(left_klass_op,  Address(left,  oopDesc::klass_offset_in_bytes()));
1686       __ ldr(right_klass_op, Address(right, oopDesc::klass_offset_in_bytes()));
1687       __ cmp(left_klass_op, right_klass_op);
1688     }
1689 
1690     __ br(Assembler::EQ, *op->stub()->entry()); // same klass -> do slow check
1691     // fall through to L_oops_not_equal
1692   }
1693 
1694   __ bind(L_oops_not_equal);
1695   move(op->not_equal_result(), op->result_opr());
1696   __ b(L_end);
1697 
1698   __ bind(L_oops_equal);
1699   move(op->equal_result(), op->result_opr());
1700   __ b(L_end);
1701 
1702   // We've returned from the stub. R0 contains 0x0 IFF the two
1703   // operands are not substitutable. (Don't compare against 0x1 in case the
1704   // C compiler is naughty)
1705   __ bind(*op->stub()->continuation());
1706   __ cbz(r0, L_oops_not_equal); // (call_stub() == 0x0) -> not_equal
1707   move(op->equal_result(), op->result_opr()); // (call_stub() != 0x0) -> equal
1708   // fall-through
1709   __ bind(L_end);
1710 }
1711 
1712 
1713 void LIR_Assembler::casw(Register addr, Register newval, Register cmpval) {
1714   __ cmpxchg(addr, cmpval, newval, Assembler::word, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
1715   __ cset(rscratch1, Assembler::NE);
1716   __ membar(__ AnyAny);
1717 }
1718 
1719 void LIR_Assembler::casl(Register addr, Register newval, Register cmpval) {
1720   __ cmpxchg(addr, cmpval, newval, Assembler::xword, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
1721   __ cset(rscratch1, Assembler::NE);
1722   __ membar(__ AnyAny);
1723 }
1724 
1725 
1726 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
1727   assert(VM_Version::supports_cx8(), "wrong machine");
1728   Register addr;
1729   if (op->addr()->is_register()) {
1730     addr = as_reg(op->addr());
1731   } else {
1732     assert(op->addr()->is_address(), "what else?");

2132     }
2133 
2134     if (opr2->is_constant()) {
2135       bool is_32bit = false; // width of register operand
2136       jlong imm;
2137 
2138       switch(opr2->type()) {
2139       case T_INT:
2140         imm = opr2->as_constant_ptr()->as_jint();
2141         is_32bit = true;
2142         break;
2143       case T_LONG:
2144         imm = opr2->as_constant_ptr()->as_jlong();
2145         break;
2146       case T_ADDRESS:
2147         imm = opr2->as_constant_ptr()->as_jint();
2148         break;
2149       case T_METADATA:
2150         imm = (intptr_t)(opr2->as_constant_ptr()->as_metadata());
2151         break;
2152       case T_INLINE_TYPE:
2153       case T_OBJECT:
2154       case T_ARRAY:
2155         jobject2reg(opr2->as_constant_ptr()->as_jobject(), rscratch1);
2156         __ cmpoop(reg1, rscratch1);
2157         return;
2158       default:
2159         ShouldNotReachHere();
2160         imm = 0;  // unreachable
2161         break;
2162       }
2163 
2164       if (Assembler::operand_valid_for_add_sub_immediate(imm)) {
2165         if (is_32bit)
2166           __ cmpw(reg1, imm);
2167         else
2168           __ subs(zr, reg1, imm);
2169         return;
2170       } else {
2171         __ mov(rscratch1, imm);
2172         if (is_32bit)

2207     __ cmp(left->as_register_lo(), right->as_register_lo());
2208     __ mov(dst->as_register(), (uint64_t)-1L);
2209     __ br(Assembler::LT, done);
2210     __ csinc(dst->as_register(), zr, zr, Assembler::EQ);
2211     __ bind(done);
2212   } else {
2213     ShouldNotReachHere();
2214   }
2215 }
2216 
2217 
2218 void LIR_Assembler::align_call(LIR_Code code) {  }
2219 
2220 
2221 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
2222   address call = __ trampoline_call(Address(op->addr(), rtype));
2223   if (call == NULL) {
2224     bailout("trampoline stub overflow");
2225     return;
2226   }
2227   add_call_info(code_offset(), op->info(), op->maybe_return_as_fields());
2228 }
2229 
2230 
2231 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
2232   address call = __ ic_call(op->addr());
2233   if (call == NULL) {
2234     bailout("trampoline stub overflow");
2235     return;
2236   }
2237   add_call_info(code_offset(), op->info(), op->maybe_return_as_fields());
2238 }
2239 
2240 void LIR_Assembler::emit_static_call_stub() {
2241   address call_pc = __ pc();
2242   address stub = __ start_a_stub(call_stub_size());
2243   if (stub == NULL) {
2244     bailout("static call stub overflow");
2245     return;
2246   }
2247 
2248   int start = __ offset();
2249 
2250   __ relocate(static_stub_Relocation::spec(call_pc));
2251   __ emit_static_call_stub();
2252 
2253   assert(__ offset() - start + CompiledStaticCall::to_trampoline_stub_size()
2254         <= call_stub_size(), "stub too big");
2255   __ end_a_stub();
2256 }
2257 

2299   __ b(_unwind_handler_entry);
2300 }
2301 
2302 
2303 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) {
2304   Register lreg = left->is_single_cpu() ? left->as_register() : left->as_register_lo();
2305   Register dreg = dest->is_single_cpu() ? dest->as_register() : dest->as_register_lo();
2306 
2307   switch (left->type()) {
2308     case T_INT: {
2309       switch (code) {
2310       case lir_shl:  __ lslvw (dreg, lreg, count->as_register()); break;
2311       case lir_shr:  __ asrvw (dreg, lreg, count->as_register()); break;
2312       case lir_ushr: __ lsrvw (dreg, lreg, count->as_register()); break;
2313       default:
2314         ShouldNotReachHere();
2315         break;
2316       }
2317       break;
2318     case T_LONG:
2319     case T_INLINE_TYPE:
2320     case T_ADDRESS:
2321     case T_OBJECT:
2322       switch (code) {
2323       case lir_shl:  __ lslv (dreg, lreg, count->as_register()); break;
2324       case lir_shr:  __ asrv (dreg, lreg, count->as_register()); break;
2325       case lir_ushr: __ lsrv (dreg, lreg, count->as_register()); break;
2326       default:
2327         ShouldNotReachHere();
2328         break;
2329       }
2330       break;
2331     default:
2332       ShouldNotReachHere();
2333       break;
2334     }
2335   }
2336 }
2337 
2338 
2339 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) {
2340   Register dreg = dest->is_single_cpu() ? dest->as_register() : dest->as_register_lo();
2341   Register lreg = left->is_single_cpu() ? left->as_register() : left->as_register_lo();
2342 
2343   switch (left->type()) {
2344     case T_INT: {
2345       switch (code) {
2346       case lir_shl:  __ lslw (dreg, lreg, count); break;
2347       case lir_shr:  __ asrw (dreg, lreg, count); break;
2348       case lir_ushr: __ lsrw (dreg, lreg, count); break;
2349       default:
2350         ShouldNotReachHere();
2351         break;
2352       }
2353       break;
2354     case T_LONG:
2355     case T_ADDRESS:
2356     case T_INLINE_TYPE:
2357     case T_OBJECT:
2358       switch (code) {
2359       case lir_shl:  __ lsl (dreg, lreg, count); break;
2360       case lir_shr:  __ asr (dreg, lreg, count); break;
2361       case lir_ushr: __ lsr (dreg, lreg, count); break;
2362       default:
2363         ShouldNotReachHere();
2364         break;
2365       }
2366       break;
2367     default:
2368       ShouldNotReachHere();
2369       break;
2370     }
2371   }
2372 }
2373 
2374 
2375 void LIR_Assembler::store_parameter(Register r, int offset_from_rsp_in_words) {
2376   assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");

2381 
2382 
2383 void LIR_Assembler::store_parameter(jint c,     int offset_from_rsp_in_words) {
2384   assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2385   int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2386   assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2387   __ mov (rscratch1, c);
2388   __ str (rscratch1, Address(sp, offset_from_rsp_in_bytes));
2389 }
2390 
2391 
2392 void LIR_Assembler::store_parameter(jobject o,  int offset_from_rsp_in_words) {
2393   ShouldNotReachHere();
2394   assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2395   int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2396   assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2397   __ lea(rscratch1, __ constant_oop_address(o));
2398   __ str(rscratch1, Address(sp, offset_from_rsp_in_bytes));
2399 }
2400 
2401 void LIR_Assembler::arraycopy_inlinetype_check(Register obj, Register tmp, CodeStub* slow_path, bool is_dest, bool null_check) {
2402   if (null_check) {
2403     __ cbz(obj, *slow_path->entry());
2404   }
2405   if (UseArrayMarkWordCheck) {
2406     if (is_dest) {
2407       __ test_null_free_array_oop(obj, tmp, *slow_path->entry());
2408     } else {
2409       __ test_flattened_array_oop(obj, tmp, *slow_path->entry());
2410     }
2411   } else {
2412     __ load_klass(tmp, obj);
2413     __ ldr(tmp, Address(tmp, Klass::layout_helper_offset()));
2414     if (is_dest) {
2415       // Take the slow path if it's a null_free destination array, in case the source array contains NULLs.
2416       __ tst(tmp, Klass::_lh_null_free_bit_inplace);
2417     } else {
2418       __ tst(tmp, Klass::_lh_array_tag_vt_value_bit_inplace);
2419     }
2420     __ br(Assembler::NE, *slow_path->entry());
2421   }
2422 }
2423 
2424 // This code replaces a call to arraycopy; no exception may
2425 // be thrown in this code, they must be thrown in the System.arraycopy
2426 // activation frame; we could save some checks if this would not be the case
2427 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
2428   ciArrayKlass* default_type = op->expected_type();
2429   Register src = op->src()->as_register();
2430   Register dst = op->dst()->as_register();
2431   Register src_pos = op->src_pos()->as_register();
2432   Register dst_pos = op->dst_pos()->as_register();
2433   Register length  = op->length()->as_register();
2434   Register tmp = op->tmp()->as_register();
2435 
2436   CodeStub* stub = op->stub();
2437   int flags = op->flags();
2438   BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
2439   if (is_reference_type(basic_type)) basic_type = T_OBJECT;
2440 
2441   if (flags & LIR_OpArrayCopy::always_slow_path) {
2442     __ b(*stub->entry());
2443     __ bind(*stub->continuation());
2444     return;
2445   }
2446 
2447   // if we don't know anything, just go through the generic arraycopy
2448   if (default_type == NULL // || basic_type == T_OBJECT
2449       ) {
2450     Label done;
2451     assert(src == r1 && src_pos == r2, "mismatch in calling convention");
2452 
2453     // Save the arguments in case the generic arraycopy fails and we
2454     // have to fall back to the JNI stub
2455     __ stp(dst,     dst_pos, Address(sp, 0*BytesPerWord));
2456     __ stp(length,  src_pos, Address(sp, 2*BytesPerWord));
2457     __ str(src,              Address(sp, 4*BytesPerWord));
2458 
2459     address copyfunc_addr = StubRoutines::generic_arraycopy();
2460     assert(copyfunc_addr != NULL, "generic arraycopy stub required");
2461 
2462     // The arguments are in java calling convention so we shift them
2463     // to C convention
2464     assert_different_registers(c_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4);
2465     __ mov(c_rarg0, j_rarg0);
2466     assert_different_registers(c_rarg1, j_rarg2, j_rarg3, j_rarg4);

2480     __ cbz(r0, *stub->continuation());
2481 
2482     // Reload values from the stack so they are where the stub
2483     // expects them.
2484     __ ldp(dst,     dst_pos, Address(sp, 0*BytesPerWord));
2485     __ ldp(length,  src_pos, Address(sp, 2*BytesPerWord));
2486     __ ldr(src,              Address(sp, 4*BytesPerWord));
2487 
2488     // r0 is -1^K where K == partial copied count
2489     __ eonw(rscratch1, r0, zr);
2490     // adjust length down and src/end pos up by partial copied count
2491     __ subw(length, length, rscratch1);
2492     __ addw(src_pos, src_pos, rscratch1);
2493     __ addw(dst_pos, dst_pos, rscratch1);
2494     __ b(*stub->entry());
2495 
2496     __ bind(*stub->continuation());
2497     return;
2498   }
2499 
2500   // Handle inline type arrays
2501   if (flags & LIR_OpArrayCopy::src_inlinetype_check) {
2502     arraycopy_inlinetype_check(src, tmp, stub, false, (flags & LIR_OpArrayCopy::src_null_check));
2503   }
2504 
2505   if (flags & LIR_OpArrayCopy::dst_inlinetype_check) {
2506     arraycopy_inlinetype_check(dst, tmp, stub, true, (flags & LIR_OpArrayCopy::dst_null_check));
2507   }
2508 
2509   assert(default_type != NULL && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point");
2510 
2511   int elem_size = type2aelembytes(basic_type);
2512   int scale = exact_log2(elem_size);
2513 
2514   Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes());
2515   Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes());
2516   Address src_klass_addr = Address(src, oopDesc::klass_offset_in_bytes());
2517   Address dst_klass_addr = Address(dst, oopDesc::klass_offset_in_bytes());
2518 
2519   // test for NULL
2520   if (flags & LIR_OpArrayCopy::src_null_check) {
2521     __ cbz(src, *stub->entry());
2522   }
2523   if (flags & LIR_OpArrayCopy::dst_null_check) {
2524     __ cbz(dst, *stub->entry());
2525   }
2526 
2527   // If the compiler was not able to prove that exact type of the source or the destination
2528   // of the arraycopy is an array type, check at runtime if the source or the destination is

3043         // first time here. Set profile type.
3044         __ str(tmp, mdo_addr);
3045       } else {
3046         assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&
3047                ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
3048 
3049         __ ldr(tmp, mdo_addr);
3050         __ tbnz(tmp, exact_log2(TypeEntries::type_unknown), next); // already unknown. Nothing to do anymore.
3051 
3052         __ orr(tmp, tmp, TypeEntries::type_unknown);
3053         __ str(tmp, mdo_addr);
3054         // FIXME: Write barrier needed here?
3055       }
3056     }
3057 
3058     __ bind(next);
3059   }
3060   COMMENT("} emit_profile_type");
3061 }
3062 
3063 void LIR_Assembler::emit_profile_inline_type(LIR_OpProfileInlineType* op) {
3064   Register obj = op->obj()->as_register();
3065   Register tmp = op->tmp()->as_pointer_register();
3066   Address mdo_addr = as_Address(op->mdp()->as_address_ptr());
3067   bool not_null = op->not_null();
3068   int flag = op->flag();
3069 
3070   Label not_inline_type;
3071   if (!not_null) {
3072     __ cbz(obj, not_inline_type);
3073   }
3074 
3075   __ test_oop_is_not_inline_type(obj, tmp, not_inline_type);
3076 
3077   __ ldrb(rscratch1, mdo_addr);
3078   __ orr(rscratch1, rscratch1, flag);
3079   __ strb(rscratch1, mdo_addr);
3080 
3081   __ bind(not_inline_type);
3082 }
3083 
3084 void LIR_Assembler::align_backward_branch_target() {
3085 }
3086 
3087 
3088 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) {
3089   // tmp must be unused
3090   assert(tmp->is_illegal(), "wasting a register if tmp is allocated");
3091 
3092   if (left->is_single_cpu()) {
3093     assert(dest->is_single_cpu(), "expect single result reg");
3094     __ negw(dest->as_register(), left->as_register());
3095   } else if (left->is_double_cpu()) {
3096     assert(dest->is_double_cpu(), "expect double result reg");
3097     __ neg(dest->as_register_lo(), left->as_register_lo());
3098   } else if (left->is_single_fpu()) {
3099     assert(dest->is_single_fpu(), "expect single float result reg");
3100     __ fnegs(dest->as_float_reg(), left->as_float_reg());
3101   } else {
3102     assert(left->is_double_fpu(), "expect double float operand reg");

3202 void LIR_Assembler::membar_loadload() {
3203   __ membar(Assembler::LoadLoad);
3204 }
3205 
3206 void LIR_Assembler::membar_storestore() {
3207   __ membar(MacroAssembler::StoreStore);
3208 }
3209 
3210 void LIR_Assembler::membar_loadstore() { __ membar(MacroAssembler::LoadStore); }
3211 
3212 void LIR_Assembler::membar_storeload() { __ membar(MacroAssembler::StoreLoad); }
3213 
3214 void LIR_Assembler::on_spin_wait() {
3215   Unimplemented();
3216 }
3217 
3218 void LIR_Assembler::get_thread(LIR_Opr result_reg) {
3219   __ mov(result_reg->as_register(), rthread);
3220 }
3221 
3222 void LIR_Assembler::check_orig_pc() {
3223   __ ldr(rscratch2, frame_map()->address_for_orig_pc_addr());
3224   __ cmp(rscratch2, (u1)NULL_WORD);
3225 }
3226 
3227 void LIR_Assembler::peephole(LIR_List *lir) {
3228 #if 0
3229   if (tableswitch_count >= max_tableswitches)
3230     return;
3231 
3232   /*
3233     This finite-state automaton recognizes sequences of compare-and-
3234     branch instructions.  We will turn them into a tableswitch.  You
3235     could argue that C1 really shouldn't be doing this sort of
3236     optimization, but without it the code is really horrible.
3237   */
3238 
3239   enum { start_s, cmp1_s, beq_s, cmp_s } state;
3240   int first_key, last_key = -2147483648;
3241   int next_key = 0;
3242   int start_insn = -1;
3243   int last_insn = -1;
3244   Register reg = noreg;
3245   LIR_Opr reg_opr;

3353 #endif
3354 }
3355 
3356 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp_op) {
3357   Address addr = as_Address(src->as_address_ptr());
3358   BasicType type = src->type();
3359   bool is_oop = is_reference_type(type);
3360 
3361   void (MacroAssembler::* add)(Register prev, RegisterOrConstant incr, Register addr);
3362   void (MacroAssembler::* xchg)(Register prev, Register newv, Register addr);
3363 
3364   switch(type) {
3365   case T_INT:
3366     xchg = &MacroAssembler::atomic_xchgalw;
3367     add = &MacroAssembler::atomic_addalw;
3368     break;
3369   case T_LONG:
3370     xchg = &MacroAssembler::atomic_xchgal;
3371     add = &MacroAssembler::atomic_addal;
3372     break;
3373   case T_INLINE_TYPE:
3374   case T_OBJECT:
3375   case T_ARRAY:
3376     if (UseCompressedOops) {
3377       xchg = &MacroAssembler::atomic_xchgalw;
3378       add = &MacroAssembler::atomic_addalw;
3379     } else {
3380       xchg = &MacroAssembler::atomic_xchgal;
3381       add = &MacroAssembler::atomic_addal;
3382     }
3383     break;
3384   default:
3385     ShouldNotReachHere();
3386     xchg = &MacroAssembler::atomic_xchgal;
3387     add = &MacroAssembler::atomic_addal; // unreachable
3388   }
3389 
3390   switch (code) {
3391   case lir_xadd:
3392     {
3393       RegisterOrConstant inc;
< prev index next >